Merge pull request #943 from liangyongxiang/fcitx5-gtk
[gentoo-zh.git] / sys-kernel / xanmod-hybird / files / patch-5.12.8-xanmod1
blobea57835af536209a3cca9e5bceebd2f6ad84801b
1 diff --git a/.config b/.config
2 new file mode 100644
3 index 000000000000..2aaa55e07eb3
4 --- /dev/null
5 +++ b/.config
6 @@ -0,0 +1,11068 @@
7 +#
8 +# Automatically generated file; DO NOT EDIT.
9 +# Linux/x86 5.12.8 Kernel Configuration
11 +CONFIG_CC_VERSION_TEXT="gcc-11 (Debian 11.1.0-2) 11.1.0"
12 +CONFIG_CC_IS_GCC=y
13 +CONFIG_GCC_VERSION=110100
14 +CONFIG_CLANG_VERSION=0
15 +CONFIG_LD_IS_BFD=y
16 +CONFIG_LD_VERSION=23502
17 +CONFIG_LLD_VERSION=0
18 +CONFIG_CC_CAN_LINK=y
19 +CONFIG_CC_CAN_LINK_STATIC=y
20 +CONFIG_CC_HAS_ASM_GOTO=y
21 +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
22 +CONFIG_CC_HAS_ASM_INLINE=y
23 +CONFIG_IRQ_WORK=y
24 +CONFIG_BUILDTIME_TABLE_SORT=y
25 +CONFIG_THREAD_INFO_IN_TASK=y
28 +# General setup
30 +CONFIG_INIT_ENV_ARG_LIMIT=32
31 +# CONFIG_COMPILE_TEST is not set
32 +CONFIG_LOCALVERSION=""
33 +# CONFIG_LOCALVERSION_AUTO is not set
34 +CONFIG_BUILD_SALT=""
35 +CONFIG_HAVE_KERNEL_GZIP=y
36 +CONFIG_HAVE_KERNEL_BZIP2=y
37 +CONFIG_HAVE_KERNEL_LZMA=y
38 +CONFIG_HAVE_KERNEL_XZ=y
39 +CONFIG_HAVE_KERNEL_LZO=y
40 +CONFIG_HAVE_KERNEL_LZ4=y
41 +CONFIG_HAVE_KERNEL_ZSTD=y
42 +# CONFIG_KERNEL_GZIP is not set
43 +# CONFIG_KERNEL_BZIP2 is not set
44 +# CONFIG_KERNEL_LZMA is not set
45 +# CONFIG_KERNEL_XZ is not set
46 +# CONFIG_KERNEL_LZO is not set
47 +# CONFIG_KERNEL_LZ4 is not set
48 +CONFIG_KERNEL_ZSTD=y
49 +CONFIG_DEFAULT_INIT=""
50 +CONFIG_DEFAULT_HOSTNAME="(none)"
51 +CONFIG_SWAP=y
52 +CONFIG_SYSVIPC=y
53 +CONFIG_SYSVIPC_SYSCTL=y
54 +CONFIG_POSIX_MQUEUE=y
55 +CONFIG_POSIX_MQUEUE_SYSCTL=y
56 +CONFIG_WATCH_QUEUE=y
57 +CONFIG_CROSS_MEMORY_ATTACH=y
58 +CONFIG_USELIB=y
59 +CONFIG_AUDIT=y
60 +CONFIG_HAVE_ARCH_AUDITSYSCALL=y
61 +CONFIG_AUDITSYSCALL=y
64 +# IRQ subsystem
66 +CONFIG_GENERIC_IRQ_PROBE=y
67 +CONFIG_GENERIC_IRQ_SHOW=y
68 +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
69 +CONFIG_GENERIC_PENDING_IRQ=y
70 +CONFIG_GENERIC_IRQ_MIGRATION=y
71 +CONFIG_HARDIRQS_SW_RESEND=y
72 +CONFIG_GENERIC_IRQ_CHIP=y
73 +CONFIG_IRQ_DOMAIN=y
74 +CONFIG_IRQ_DOMAIN_HIERARCHY=y
75 +CONFIG_GENERIC_MSI_IRQ=y
76 +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y
77 +CONFIG_IRQ_MSI_IOMMU=y
78 +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y
79 +CONFIG_GENERIC_IRQ_RESERVATION_MODE=y
80 +CONFIG_IRQ_FORCED_THREADING=y
81 +CONFIG_SPARSE_IRQ=y
82 +# CONFIG_GENERIC_IRQ_DEBUGFS is not set
83 +# end of IRQ subsystem
85 +CONFIG_CLOCKSOURCE_WATCHDOG=y
86 +CONFIG_ARCH_CLOCKSOURCE_INIT=y
87 +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y
88 +CONFIG_GENERIC_TIME_VSYSCALL=y
89 +CONFIG_GENERIC_CLOCKEVENTS=y
90 +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
91 +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
92 +CONFIG_GENERIC_CMOS_UPDATE=y
93 +CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y
94 +CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y
97 +# Timers subsystem
99 +CONFIG_TICK_ONESHOT=y
100 +CONFIG_NO_HZ_COMMON=y
101 +# CONFIG_HZ_PERIODIC is not set
102 +# CONFIG_NO_HZ_IDLE is not set
103 +CONFIG_NO_HZ_FULL=y
104 +CONFIG_CONTEXT_TRACKING=y
105 +# CONFIG_CONTEXT_TRACKING_FORCE is not set
106 +# CONFIG_NO_HZ is not set
107 +CONFIG_HIGH_RES_TIMERS=y
108 +# end of Timers subsystem
110 +# CONFIG_PREEMPT_NONE is not set
111 +# CONFIG_PREEMPT_VOLUNTARY is not set
112 +CONFIG_PREEMPT=y
113 +CONFIG_PREEMPT_COUNT=y
114 +CONFIG_PREEMPTION=y
115 +CONFIG_PREEMPT_DYNAMIC=y
118 +# CPU/Task time and stats accounting
120 +CONFIG_VIRT_CPU_ACCOUNTING=y
121 +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y
122 +# CONFIG_IRQ_TIME_ACCOUNTING is not set
123 +CONFIG_BSD_PROCESS_ACCT=y
124 +CONFIG_BSD_PROCESS_ACCT_V3=y
125 +CONFIG_TASKSTATS=y
126 +CONFIG_TASK_DELAY_ACCT=y
127 +CONFIG_TASK_XACCT=y
128 +CONFIG_TASK_IO_ACCOUNTING=y
129 +CONFIG_PSI=y
130 +CONFIG_PSI_DEFAULT_DISABLED=y
131 +# end of CPU/Task time and stats accounting
133 +CONFIG_CPU_ISOLATION=y
136 +# RCU Subsystem
138 +CONFIG_TREE_RCU=y
139 +CONFIG_PREEMPT_RCU=y
140 +CONFIG_RCU_EXPERT=y
141 +CONFIG_SRCU=y
142 +CONFIG_TREE_SRCU=y
143 +CONFIG_TASKS_RCU_GENERIC=y
144 +CONFIG_TASKS_RCU=y
145 +CONFIG_TASKS_TRACE_RCU=y
146 +CONFIG_RCU_STALL_COMMON=y
147 +CONFIG_RCU_NEED_SEGCBLIST=y
148 +CONFIG_RCU_FANOUT=64
149 +CONFIG_RCU_FANOUT_LEAF=16
150 +# CONFIG_RCU_FAST_NO_HZ is not set
151 +CONFIG_RCU_BOOST=y
152 +CONFIG_RCU_BOOST_DELAY=0
153 +CONFIG_RCU_NOCB_CPU=y
154 +# CONFIG_TASKS_TRACE_RCU_READ_MB is not set
155 +# end of RCU Subsystem
157 +CONFIG_BUILD_BIN2C=y
158 +# CONFIG_IKCONFIG is not set
159 +CONFIG_IKHEADERS=m
160 +CONFIG_LOG_BUF_SHIFT=18
161 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12
162 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13
163 +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
166 +# Scheduler features
168 +CONFIG_UCLAMP_TASK=y
169 +CONFIG_UCLAMP_BUCKETS_COUNT=5
170 +# end of Scheduler features
172 +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
173 +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y
174 +CONFIG_CC_HAS_INT128=y
175 +CONFIG_ARCH_SUPPORTS_INT128=y
176 +CONFIG_NUMA_BALANCING=y
177 +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y
178 +CONFIG_CGROUPS=y
179 +CONFIG_PAGE_COUNTER=y
180 +CONFIG_MEMCG=y
181 +CONFIG_MEMCG_SWAP=y
182 +CONFIG_MEMCG_KMEM=y
183 +CONFIG_BLK_CGROUP=y
184 +CONFIG_CGROUP_WRITEBACK=y
185 +CONFIG_CGROUP_SCHED=y
186 +CONFIG_FAIR_GROUP_SCHED=y
187 +CONFIG_CFS_BANDWIDTH=y
188 +# CONFIG_RT_GROUP_SCHED is not set
189 +CONFIG_UCLAMP_TASK_GROUP=y
190 +CONFIG_CGROUP_PIDS=y
191 +CONFIG_CGROUP_RDMA=y
192 +CONFIG_CGROUP_FREEZER=y
193 +CONFIG_CGROUP_HUGETLB=y
194 +CONFIG_CPUSETS=y
195 +CONFIG_PROC_PID_CPUSET=y
196 +CONFIG_CGROUP_DEVICE=y
197 +CONFIG_CGROUP_CPUACCT=y
198 +CONFIG_CGROUP_PERF=y
199 +CONFIG_CGROUP_BPF=y
200 +# CONFIG_CGROUP_DEBUG is not set
201 +CONFIG_SOCK_CGROUP_DATA=y
202 +CONFIG_NAMESPACES=y
203 +CONFIG_UTS_NS=y
204 +CONFIG_TIME_NS=y
205 +CONFIG_IPC_NS=y
206 +CONFIG_USER_NS=y
207 +CONFIG_PID_NS=y
208 +CONFIG_NET_NS=y
209 +CONFIG_CHECKPOINT_RESTORE=y
210 +CONFIG_SCHED_AUTOGROUP=y
211 +CONFIG_SCHED_AUTOGROUP_DEFAULT_ENABLED=y
212 +# CONFIG_SYSFS_DEPRECATED is not set
213 +CONFIG_RELAY=y
214 +CONFIG_BLK_DEV_INITRD=y
215 +CONFIG_INITRAMFS_SOURCE=""
216 +CONFIG_RD_GZIP=y
217 +CONFIG_RD_BZIP2=y
218 +CONFIG_RD_LZMA=y
219 +CONFIG_RD_XZ=y
220 +CONFIG_RD_LZO=y
221 +CONFIG_RD_LZ4=y
222 +CONFIG_RD_ZSTD=y
223 +CONFIG_BOOT_CONFIG=y
224 +# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set
225 +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y
226 +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
227 +CONFIG_LD_ORPHAN_WARN=y
228 +CONFIG_SYSCTL=y
229 +CONFIG_HAVE_UID16=y
230 +CONFIG_SYSCTL_EXCEPTION_TRACE=y
231 +CONFIG_HAVE_PCSPKR_PLATFORM=y
232 +CONFIG_BPF=y
233 +CONFIG_EXPERT=y
234 +CONFIG_UID16=y
235 +CONFIG_MULTIUSER=y
236 +CONFIG_SGETMASK_SYSCALL=y
237 +CONFIG_SYSFS_SYSCALL=y
238 +CONFIG_FHANDLE=y
239 +CONFIG_POSIX_TIMERS=y
240 +CONFIG_PRINTK=y
241 +CONFIG_PRINTK_NMI=y
242 +CONFIG_BUG=y
243 +CONFIG_ELF_CORE=y
244 +CONFIG_PCSPKR_PLATFORM=y
245 +CONFIG_BASE_FULL=y
246 +CONFIG_FUTEX=y
247 +CONFIG_FUTEX2=y
248 +CONFIG_FUTEX_PI=y
249 +CONFIG_EPOLL=y
250 +CONFIG_SIGNALFD=y
251 +CONFIG_TIMERFD=y
252 +CONFIG_EVENTFD=y
253 +CONFIG_SHMEM=y
254 +CONFIG_AIO=y
255 +CONFIG_IO_URING=y
256 +CONFIG_ADVISE_SYSCALLS=y
257 +CONFIG_HAVE_ARCH_USERFAULTFD_WP=y
258 +CONFIG_MEMBARRIER=y
259 +CONFIG_KALLSYMS=y
260 +CONFIG_KALLSYMS_ALL=y
261 +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y
262 +CONFIG_KALLSYMS_BASE_RELATIVE=y
263 +CONFIG_BPF_SYSCALL=y
264 +CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y
265 +CONFIG_BPF_JIT_ALWAYS_ON=y
266 +CONFIG_BPF_JIT_DEFAULT_ON=y
267 +CONFIG_USERMODE_DRIVER=y
268 +# CONFIG_BPF_PRELOAD is not set
269 +CONFIG_USERFAULTFD=y
270 +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y
271 +CONFIG_KCMP=y
272 +CONFIG_RSEQ=y
273 +# CONFIG_DEBUG_RSEQ is not set
274 +# CONFIG_EMBEDDED is not set
275 +CONFIG_HAVE_PERF_EVENTS=y
276 +CONFIG_PC104=y
279 +# Kernel Performance Events And Counters
281 +CONFIG_PERF_EVENTS=y
282 +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
283 +# end of Kernel Performance Events And Counters
285 +CONFIG_VM_EVENT_COUNTERS=y
286 +CONFIG_SLUB_DEBUG=y
287 +# CONFIG_COMPAT_BRK is not set
288 +# CONFIG_SLAB is not set
289 +CONFIG_SLUB=y
290 +# CONFIG_SLOB is not set
291 +CONFIG_SLAB_MERGE_DEFAULT=y
292 +CONFIG_SLAB_FREELIST_RANDOM=y
293 +CONFIG_SLAB_FREELIST_HARDENED=y
294 +CONFIG_SHUFFLE_PAGE_ALLOCATOR=y
295 +CONFIG_SLUB_CPU_PARTIAL=y
296 +CONFIG_SYSTEM_DATA_VERIFICATION=y
297 +CONFIG_PROFILING=y
298 +# end of General setup
300 +CONFIG_64BIT=y
301 +CONFIG_X86_64=y
302 +CONFIG_X86=y
303 +CONFIG_INSTRUCTION_DECODER=y
304 +CONFIG_OUTPUT_FORMAT="elf64-x86-64"
305 +CONFIG_LOCKDEP_SUPPORT=y
306 +CONFIG_STACKTRACE_SUPPORT=y
307 +CONFIG_MMU=y
308 +CONFIG_ARCH_MMAP_RND_BITS_MIN=28
309 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32
310 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8
311 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16
312 +CONFIG_GENERIC_ISA_DMA=y
313 +CONFIG_GENERIC_BUG=y
314 +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
315 +CONFIG_ARCH_MAY_HAVE_PC_FDC=y
316 +CONFIG_GENERIC_CALIBRATE_DELAY=y
317 +CONFIG_ARCH_HAS_CPU_RELAX=y
318 +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
319 +CONFIG_ARCH_HAS_FILTER_PGPROT=y
320 +CONFIG_HAVE_SETUP_PER_CPU_AREA=y
321 +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
322 +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
323 +CONFIG_ARCH_HIBERNATION_POSSIBLE=y
324 +CONFIG_ARCH_SUSPEND_POSSIBLE=y
325 +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y
326 +CONFIG_ZONE_DMA32=y
327 +CONFIG_AUDIT_ARCH=y
328 +CONFIG_HAVE_INTEL_TXT=y
329 +CONFIG_X86_64_SMP=y
330 +CONFIG_ARCH_SUPPORTS_UPROBES=y
331 +CONFIG_FIX_EARLYCON_MEM=y
332 +CONFIG_DYNAMIC_PHYSICAL_MASK=y
333 +CONFIG_PGTABLE_LEVELS=5
334 +CONFIG_CC_HAS_SANE_STACKPROTECTOR=y
337 +# Processor type and features
339 +CONFIG_ZONE_DMA=y
340 +CONFIG_SMP=y
341 +CONFIG_X86_FEATURE_NAMES=y
342 +CONFIG_X86_X2APIC=y
343 +CONFIG_X86_MPPARSE=y
344 +# CONFIG_GOLDFISH is not set
345 +CONFIG_RETPOLINE=y
346 +CONFIG_X86_CPU_RESCTRL=y
347 +CONFIG_X86_EXTENDED_PLATFORM=y
348 +CONFIG_X86_NUMACHIP=y
349 +# CONFIG_X86_VSMP is not set
350 +CONFIG_X86_UV=y
351 +# CONFIG_X86_GOLDFISH is not set
352 +# CONFIG_X86_INTEL_MID is not set
353 +CONFIG_X86_INTEL_LPSS=y
354 +CONFIG_X86_AMD_PLATFORM_DEVICE=y
355 +CONFIG_IOSF_MBI=y
356 +CONFIG_IOSF_MBI_DEBUG=y
357 +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y
358 +CONFIG_SCHED_OMIT_FRAME_POINTER=y
359 +CONFIG_HYPERVISOR_GUEST=y
360 +CONFIG_PARAVIRT=y
361 +CONFIG_PARAVIRT_XXL=y
362 +# CONFIG_PARAVIRT_DEBUG is not set
363 +CONFIG_PARAVIRT_SPINLOCKS=y
364 +CONFIG_X86_HV_CALLBACK_VECTOR=y
365 +CONFIG_XEN=y
366 +CONFIG_XEN_PV=y
367 +CONFIG_XEN_512GB=y
368 +CONFIG_XEN_PV_SMP=y
369 +CONFIG_XEN_DOM0=y
370 +CONFIG_XEN_PVHVM=y
371 +CONFIG_XEN_PVHVM_SMP=y
372 +CONFIG_XEN_PVHVM_GUEST=y
373 +CONFIG_XEN_SAVE_RESTORE=y
374 +# CONFIG_XEN_DEBUG_FS is not set
375 +CONFIG_XEN_PVH=y
376 +CONFIG_KVM_GUEST=y
377 +CONFIG_ARCH_CPUIDLE_HALTPOLL=y
378 +CONFIG_PVH=y
379 +# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set
380 +CONFIG_PARAVIRT_CLOCK=y
381 +CONFIG_JAILHOUSE_GUEST=y
382 +CONFIG_ACRN_GUEST=y
383 +# CONFIG_MK8 is not set
384 +# CONFIG_MK8SSE3 is not set
385 +# CONFIG_MK10 is not set
386 +# CONFIG_MBARCELONA is not set
387 +# CONFIG_MBOBCAT is not set
388 +# CONFIG_MJAGUAR is not set
389 +# CONFIG_MBULLDOZER is not set
390 +# CONFIG_MPILEDRIVER is not set
391 +# CONFIG_MSTEAMROLLER is not set
392 +# CONFIG_MEXCAVATOR is not set
393 +# CONFIG_MZEN is not set
394 +# CONFIG_MZEN2 is not set
395 +# CONFIG_MZEN3 is not set
396 +# CONFIG_MPSC is not set
397 +# CONFIG_MCORE2 is not set
398 +# CONFIG_MATOM is not set
399 +# CONFIG_MNEHALEM is not set
400 +# CONFIG_MWESTMERE is not set
401 +# CONFIG_MSILVERMONT is not set
402 +# CONFIG_MGOLDMONT is not set
403 +# CONFIG_MGOLDMONTPLUS is not set
404 +# CONFIG_MSANDYBRIDGE is not set
405 +# CONFIG_MIVYBRIDGE is not set
406 +# CONFIG_MHASWELL is not set
407 +# CONFIG_MBROADWELL is not set
408 +# CONFIG_MSKYLAKE is not set
409 +# CONFIG_MSKYLAKEX is not set
410 +# CONFIG_MCANNONLAKE is not set
411 +# CONFIG_MICELAKE is not set
412 +# CONFIG_MCASCADELAKE is not set
413 +# CONFIG_MCOOPERLAKE is not set
414 +# CONFIG_MTIGERLAKE is not set
415 +# CONFIG_MSAPPHIRERAPIDS is not set
416 +# CONFIG_MROCKETLAKE is not set
417 +# CONFIG_MALDERLAKE is not set
418 +CONFIG_GENERIC_CPU=y
419 +# CONFIG_GENERIC_CPU2 is not set
420 +# CONFIG_GENERIC_CPU3 is not set
421 +# CONFIG_GENERIC_CPU4 is not set
422 +# CONFIG_MNATIVE_INTEL is not set
423 +# CONFIG_MNATIVE_AMD is not set
424 +CONFIG_X86_INTERNODE_CACHE_SHIFT=6
425 +CONFIG_X86_L1_CACHE_SHIFT=6
426 +CONFIG_X86_TSC=y
427 +CONFIG_X86_CMPXCHG64=y
428 +CONFIG_X86_CMOV=y
429 +CONFIG_X86_MINIMUM_CPU_FAMILY=64
430 +CONFIG_X86_DEBUGCTLMSR=y
431 +CONFIG_IA32_FEAT_CTL=y
432 +CONFIG_X86_VMX_FEATURE_NAMES=y
433 +CONFIG_PROCESSOR_SELECT=y
434 +CONFIG_CPU_SUP_INTEL=y
435 +CONFIG_CPU_SUP_AMD=y
436 +CONFIG_CPU_SUP_HYGON=y
437 +CONFIG_CPU_SUP_CENTAUR=y
438 +CONFIG_CPU_SUP_ZHAOXIN=y
439 +CONFIG_HPET_TIMER=y
440 +CONFIG_HPET_EMULATE_RTC=y
441 +CONFIG_DMI=y
442 +CONFIG_GART_IOMMU=y
443 +# CONFIG_MAXSMP is not set
444 +CONFIG_NR_CPUS_RANGE_BEGIN=2
445 +CONFIG_NR_CPUS_RANGE_END=512
446 +CONFIG_NR_CPUS_DEFAULT=64
447 +CONFIG_NR_CPUS=512
448 +CONFIG_SCHED_SMT=y
449 +CONFIG_SCHED_MC=y
450 +CONFIG_SCHED_MC_PRIO=y
451 +CONFIG_X86_LOCAL_APIC=y
452 +CONFIG_X86_IO_APIC=y
453 +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
454 +CONFIG_X86_MCE=y
455 +CONFIG_X86_MCELOG_LEGACY=y
456 +CONFIG_X86_MCE_INTEL=y
457 +CONFIG_X86_MCE_AMD=y
458 +CONFIG_X86_MCE_THRESHOLD=y
459 +CONFIG_X86_MCE_INJECT=m
462 +# Performance monitoring
464 +CONFIG_PERF_EVENTS_INTEL_UNCORE=y
465 +CONFIG_PERF_EVENTS_INTEL_RAPL=m
466 +CONFIG_PERF_EVENTS_INTEL_CSTATE=m
467 +# CONFIG_PERF_EVENTS_AMD_POWER is not set
468 +# end of Performance monitoring
470 +CONFIG_X86_16BIT=y
471 +CONFIG_X86_ESPFIX64=y
472 +CONFIG_X86_VSYSCALL_EMULATION=y
473 +CONFIG_X86_IOPL_IOPERM=y
474 +CONFIG_I8K=m
475 +CONFIG_MICROCODE=y
476 +CONFIG_MICROCODE_INTEL=y
477 +CONFIG_MICROCODE_AMD=y
478 +CONFIG_MICROCODE_OLD_INTERFACE=y
479 +CONFIG_X86_MSR=m
480 +CONFIG_X86_CPUID=m
481 +CONFIG_X86_5LEVEL=y
482 +CONFIG_X86_DIRECT_GBPAGES=y
483 +# CONFIG_X86_CPA_STATISTICS is not set
484 +CONFIG_AMD_MEM_ENCRYPT=y
485 +# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set
486 +CONFIG_NUMA=y
487 +CONFIG_AMD_NUMA=y
488 +CONFIG_X86_64_ACPI_NUMA=y
489 +# CONFIG_NUMA_EMU is not set
490 +CONFIG_NODES_SHIFT=10
491 +CONFIG_ARCH_SPARSEMEM_ENABLE=y
492 +CONFIG_ARCH_SPARSEMEM_DEFAULT=y
493 +CONFIG_ARCH_SELECT_MEMORY_MODEL=y
494 +CONFIG_ARCH_MEMORY_PROBE=y
495 +CONFIG_ARCH_PROC_KCORE_TEXT=y
496 +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
497 +CONFIG_X86_PMEM_LEGACY_DEVICE=y
498 +CONFIG_X86_PMEM_LEGACY=y
499 +CONFIG_X86_CHECK_BIOS_CORRUPTION=y
500 +CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y
501 +CONFIG_X86_RESERVE_LOW=64
502 +CONFIG_MTRR=y
503 +CONFIG_MTRR_SANITIZER=y
504 +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
505 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
506 +CONFIG_X86_PAT=y
507 +CONFIG_ARCH_USES_PG_UNCACHED=y
508 +CONFIG_ARCH_RANDOM=y
509 +CONFIG_X86_SMAP=y
510 +CONFIG_X86_UMIP=y
511 +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y
512 +# CONFIG_X86_INTEL_TSX_MODE_OFF is not set
513 +# CONFIG_X86_INTEL_TSX_MODE_ON is not set
514 +CONFIG_X86_INTEL_TSX_MODE_AUTO=y
515 +CONFIG_X86_SGX=y
516 +CONFIG_EFI=y
517 +CONFIG_EFI_STUB=y
518 +CONFIG_EFI_MIXED=y
519 +# CONFIG_HZ_100 is not set
520 +# CONFIG_HZ_250 is not set
521 +# CONFIG_HZ_300 is not set
522 +CONFIG_HZ_500=y
523 +# CONFIG_HZ_1000 is not set
524 +CONFIG_HZ=500
525 +CONFIG_SCHED_HRTICK=y
526 +CONFIG_KEXEC=y
527 +CONFIG_KEXEC_FILE=y
528 +CONFIG_ARCH_HAS_KEXEC_PURGATORY=y
529 +CONFIG_KEXEC_SIG=y
530 +# CONFIG_KEXEC_SIG_FORCE is not set
531 +CONFIG_KEXEC_BZIMAGE_VERIFY_SIG=y
532 +CONFIG_CRASH_DUMP=y
533 +CONFIG_KEXEC_JUMP=y
534 +CONFIG_PHYSICAL_START=0x1000000
535 +CONFIG_RELOCATABLE=y
536 +CONFIG_RANDOMIZE_BASE=y
537 +CONFIG_X86_NEED_RELOCS=y
538 +CONFIG_PHYSICAL_ALIGN=0x200000
539 +CONFIG_DYNAMIC_MEMORY_LAYOUT=y
540 +CONFIG_RANDOMIZE_MEMORY=y
541 +CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa
542 +CONFIG_HOTPLUG_CPU=y
543 +# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
544 +# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
545 +# CONFIG_COMPAT_VDSO is not set
546 +# CONFIG_LEGACY_VSYSCALL_EMULATE is not set
547 +CONFIG_LEGACY_VSYSCALL_XONLY=y
548 +# CONFIG_LEGACY_VSYSCALL_NONE is not set
549 +# CONFIG_CMDLINE_BOOL is not set
550 +CONFIG_MODIFY_LDT_SYSCALL=y
551 +CONFIG_HAVE_LIVEPATCH=y
552 +# end of Processor type and features
554 +CONFIG_ARCH_HAS_ADD_PAGES=y
555 +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
556 +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
557 +CONFIG_USE_PERCPU_NUMA_NODE_ID=y
558 +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y
559 +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y
560 +CONFIG_ARCH_ENABLE_THP_MIGRATION=y
563 +# Power management and ACPI options
565 +CONFIG_ARCH_HIBERNATION_HEADER=y
566 +CONFIG_SUSPEND=y
567 +CONFIG_SUSPEND_FREEZER=y
568 +# CONFIG_SUSPEND_SKIP_SYNC is not set
569 +CONFIG_HIBERNATE_CALLBACKS=y
570 +CONFIG_HIBERNATION=y
571 +CONFIG_HIBERNATION_SNAPSHOT_DEV=y
572 +CONFIG_PM_STD_PARTITION=""
573 +CONFIG_PM_SLEEP=y
574 +CONFIG_PM_SLEEP_SMP=y
575 +# CONFIG_PM_AUTOSLEEP is not set
576 +CONFIG_PM_WAKELOCKS=y
577 +CONFIG_PM_WAKELOCKS_LIMIT=100
578 +CONFIG_PM_WAKELOCKS_GC=y
579 +CONFIG_PM=y
580 +CONFIG_PM_DEBUG=y
581 +CONFIG_PM_ADVANCED_DEBUG=y
582 +# CONFIG_PM_TEST_SUSPEND is not set
583 +CONFIG_PM_SLEEP_DEBUG=y
584 +# CONFIG_DPM_WATCHDOG is not set
585 +CONFIG_PM_TRACE=y
586 +CONFIG_PM_TRACE_RTC=y
587 +CONFIG_PM_CLK=y
588 +CONFIG_PM_GENERIC_DOMAINS=y
589 +CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
590 +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y
591 +CONFIG_ENERGY_MODEL=y
592 +CONFIG_ARCH_SUPPORTS_ACPI=y
593 +CONFIG_ACPI=y
594 +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y
595 +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y
596 +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y
597 +CONFIG_ACPI_DEBUGGER=y
598 +CONFIG_ACPI_DEBUGGER_USER=y
599 +CONFIG_ACPI_SPCR_TABLE=y
600 +CONFIG_ACPI_FPDT=y
601 +CONFIG_ACPI_LPIT=y
602 +CONFIG_ACPI_SLEEP=y
603 +CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y
604 +CONFIG_ACPI_EC_DEBUGFS=m
605 +CONFIG_ACPI_AC=y
606 +CONFIG_ACPI_BATTERY=y
607 +CONFIG_ACPI_BUTTON=y
608 +CONFIG_ACPI_VIDEO=m
609 +CONFIG_ACPI_FAN=y
610 +CONFIG_ACPI_TAD=m
611 +CONFIG_ACPI_DOCK=y
612 +CONFIG_ACPI_CPU_FREQ_PSS=y
613 +CONFIG_ACPI_PROCESSOR_CSTATE=y
614 +CONFIG_ACPI_PROCESSOR_IDLE=y
615 +CONFIG_ACPI_CPPC_LIB=y
616 +CONFIG_ACPI_PROCESSOR=y
617 +CONFIG_ACPI_IPMI=m
618 +CONFIG_ACPI_HOTPLUG_CPU=y
619 +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m
620 +CONFIG_ACPI_THERMAL=y
621 +CONFIG_ACPI_PLATFORM_PROFILE=m
622 +CONFIG_ACPI_CUSTOM_DSDT_FILE=""
623 +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y
624 +CONFIG_ACPI_TABLE_UPGRADE=y
625 +CONFIG_ACPI_DEBUG=y
626 +CONFIG_ACPI_PCI_SLOT=y
627 +CONFIG_ACPI_CONTAINER=y
628 +CONFIG_ACPI_HOTPLUG_MEMORY=y
629 +CONFIG_ACPI_HOTPLUG_IOAPIC=y
630 +CONFIG_ACPI_SBS=m
631 +CONFIG_ACPI_HED=y
632 +# CONFIG_ACPI_CUSTOM_METHOD is not set
633 +CONFIG_ACPI_BGRT=y
634 +# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set
635 +CONFIG_ACPI_NFIT=m
636 +# CONFIG_NFIT_SECURITY_DEBUG is not set
637 +CONFIG_ACPI_NUMA=y
638 +CONFIG_ACPI_HMAT=y
639 +CONFIG_HAVE_ACPI_APEI=y
640 +CONFIG_HAVE_ACPI_APEI_NMI=y
641 +CONFIG_ACPI_APEI=y
642 +CONFIG_ACPI_APEI_GHES=y
643 +CONFIG_ACPI_APEI_PCIEAER=y
644 +CONFIG_ACPI_APEI_MEMORY_FAILURE=y
645 +CONFIG_ACPI_APEI_EINJ=m
646 +# CONFIG_ACPI_APEI_ERST_DEBUG is not set
647 +CONFIG_ACPI_DPTF=y
648 +CONFIG_DPTF_POWER=m
649 +CONFIG_DPTF_PCH_FIVR=m
650 +CONFIG_ACPI_WATCHDOG=y
651 +CONFIG_ACPI_EXTLOG=m
652 +CONFIG_ACPI_ADXL=y
653 +CONFIG_ACPI_CONFIGFS=m
654 +CONFIG_PMIC_OPREGION=y
655 +CONFIG_BYTCRC_PMIC_OPREGION=y
656 +CONFIG_CHTCRC_PMIC_OPREGION=y
657 +CONFIG_XPOWER_PMIC_OPREGION=y
658 +CONFIG_BXT_WC_PMIC_OPREGION=y
659 +CONFIG_CHT_WC_PMIC_OPREGION=y
660 +CONFIG_CHT_DC_TI_PMIC_OPREGION=y
661 +CONFIG_TPS68470_PMIC_OPREGION=y
662 +CONFIG_X86_PM_TIMER=y
665 +# CPU Frequency scaling
667 +CONFIG_CPU_FREQ=y
668 +CONFIG_CPU_FREQ_GOV_ATTR_SET=y
669 +CONFIG_CPU_FREQ_GOV_COMMON=y
670 +CONFIG_CPU_FREQ_STAT=y
671 +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
672 +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
673 +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
674 +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set
675 +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
676 +CONFIG_CPU_FREQ_GOV_POWERSAVE=y
677 +CONFIG_CPU_FREQ_GOV_USERSPACE=y
678 +CONFIG_CPU_FREQ_GOV_ONDEMAND=y
679 +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
680 +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
683 +# CPU frequency scaling drivers
685 +CONFIG_X86_INTEL_PSTATE=y
686 +CONFIG_X86_PCC_CPUFREQ=y
687 +CONFIG_X86_ACPI_CPUFREQ=y
688 +CONFIG_X86_ACPI_CPUFREQ_CPB=y
689 +CONFIG_X86_POWERNOW_K8=y
690 +CONFIG_X86_AMD_FREQ_SENSITIVITY=m
691 +CONFIG_X86_SPEEDSTEP_CENTRINO=y
692 +CONFIG_X86_P4_CLOCKMOD=m
695 +# shared options
697 +CONFIG_X86_SPEEDSTEP_LIB=m
698 +# end of CPU Frequency scaling
701 +# CPU Idle
703 +CONFIG_CPU_IDLE=y
704 +CONFIG_CPU_IDLE_GOV_LADDER=y
705 +CONFIG_CPU_IDLE_GOV_MENU=y
706 +CONFIG_CPU_IDLE_GOV_TEO=y
707 +CONFIG_CPU_IDLE_GOV_HALTPOLL=y
708 +CONFIG_HALTPOLL_CPUIDLE=m
709 +# end of CPU Idle
711 +CONFIG_INTEL_IDLE=y
712 +# end of Power management and ACPI options
715 +# Bus options (PCI etc.)
717 +CONFIG_PCI_DIRECT=y
718 +CONFIG_PCI_MMCONFIG=y
719 +CONFIG_PCI_XEN=y
720 +CONFIG_MMCONF_FAM10H=y
721 +# CONFIG_PCI_CNB20LE_QUIRK is not set
722 +CONFIG_ISA_BUS=y
723 +CONFIG_ISA_DMA_API=y
724 +CONFIG_AMD_NB=y
725 +# CONFIG_X86_SYSFB is not set
726 +# end of Bus options (PCI etc.)
729 +# Binary Emulations
731 +CONFIG_IA32_EMULATION=y
732 +CONFIG_X86_X32=y
733 +CONFIG_COMPAT_32=y
734 +CONFIG_COMPAT=y
735 +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y
736 +CONFIG_SYSVIPC_COMPAT=y
737 +# end of Binary Emulations
740 +# Firmware Drivers
742 +CONFIG_EDD=y
743 +CONFIG_EDD_OFF=y
744 +CONFIG_FIRMWARE_MEMMAP=y
745 +CONFIG_DMIID=y
746 +CONFIG_DMI_SYSFS=m
747 +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y
748 +CONFIG_ISCSI_IBFT_FIND=y
749 +CONFIG_ISCSI_IBFT=m
750 +CONFIG_FW_CFG_SYSFS=m
751 +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set
752 +# CONFIG_GOOGLE_FIRMWARE is not set
755 +# EFI (Extensible Firmware Interface) Support
757 +CONFIG_EFI_VARS=y
758 +CONFIG_EFI_ESRT=y
759 +CONFIG_EFI_VARS_PSTORE=m
760 +# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set
761 +CONFIG_EFI_RUNTIME_MAP=y
762 +# CONFIG_EFI_FAKE_MEMMAP is not set
763 +CONFIG_EFI_SOFT_RESERVE=y
764 +CONFIG_EFI_RUNTIME_WRAPPERS=y
765 +CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y
766 +CONFIG_EFI_BOOTLOADER_CONTROL=m
767 +CONFIG_EFI_CAPSULE_LOADER=m
768 +CONFIG_EFI_TEST=m
769 +CONFIG_APPLE_PROPERTIES=y
770 +CONFIG_RESET_ATTACK_MITIGATION=y
771 +CONFIG_EFI_RCI2_TABLE=y
772 +# CONFIG_EFI_DISABLE_PCI_DMA is not set
773 +# end of EFI (Extensible Firmware Interface) Support
775 +CONFIG_EFI_EMBEDDED_FIRMWARE=y
776 +CONFIG_UEFI_CPER=y
777 +CONFIG_UEFI_CPER_X86=y
778 +CONFIG_EFI_DEV_PATH_PARSER=y
779 +CONFIG_EFI_EARLYCON=y
780 +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y
783 +# Tegra firmware driver
785 +# end of Tegra firmware driver
786 +# end of Firmware Drivers
788 +CONFIG_HAVE_KVM=y
789 +CONFIG_HAVE_KVM_IRQCHIP=y
790 +CONFIG_HAVE_KVM_IRQFD=y
791 +CONFIG_HAVE_KVM_IRQ_ROUTING=y
792 +CONFIG_HAVE_KVM_EVENTFD=y
793 +CONFIG_KVM_MMIO=y
794 +CONFIG_KVM_ASYNC_PF=y
795 +CONFIG_HAVE_KVM_MSI=y
796 +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y
797 +CONFIG_KVM_VFIO=y
798 +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y
799 +CONFIG_KVM_COMPAT=y
800 +CONFIG_HAVE_KVM_IRQ_BYPASS=y
801 +CONFIG_HAVE_KVM_NO_POLL=y
802 +CONFIG_KVM_XFER_TO_GUEST_WORK=y
803 +CONFIG_VIRTUALIZATION=y
804 +CONFIG_KVM=m
805 +CONFIG_KVM_WERROR=y
806 +CONFIG_KVM_INTEL=m
807 +CONFIG_KVM_AMD=m
808 +CONFIG_KVM_AMD_SEV=y
809 +CONFIG_KVM_XEN=y
810 +CONFIG_AS_AVX512=y
811 +CONFIG_AS_SHA1_NI=y
812 +CONFIG_AS_SHA256_NI=y
813 +CONFIG_AS_TPAUSE=y
816 +# General architecture-dependent options
818 +CONFIG_CRASH_CORE=y
819 +CONFIG_KEXEC_CORE=y
820 +CONFIG_HOTPLUG_SMT=y
821 +CONFIG_GENERIC_ENTRY=y
822 +CONFIG_KPROBES=y
823 +CONFIG_JUMP_LABEL=y
824 +# CONFIG_STATIC_KEYS_SELFTEST is not set
825 +# CONFIG_STATIC_CALL_SELFTEST is not set
826 +CONFIG_OPTPROBES=y
827 +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
828 +CONFIG_ARCH_USE_BUILTIN_BSWAP=y
829 +CONFIG_KRETPROBES=y
830 +CONFIG_USER_RETURN_NOTIFIER=y
831 +CONFIG_HAVE_IOREMAP_PROT=y
832 +CONFIG_HAVE_KPROBES=y
833 +CONFIG_HAVE_KRETPROBES=y
834 +CONFIG_HAVE_OPTPROBES=y
835 +CONFIG_HAVE_KPROBES_ON_FTRACE=y
836 +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y
837 +CONFIG_HAVE_NMI=y
838 +CONFIG_HAVE_ARCH_TRACEHOOK=y
839 +CONFIG_HAVE_DMA_CONTIGUOUS=y
840 +CONFIG_GENERIC_SMP_IDLE_THREAD=y
841 +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y
842 +CONFIG_ARCH_HAS_SET_MEMORY=y
843 +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y
844 +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y
845 +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y
846 +CONFIG_HAVE_ASM_MODVERSIONS=y
847 +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
848 +CONFIG_HAVE_RSEQ=y
849 +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y
850 +CONFIG_HAVE_HW_BREAKPOINT=y
851 +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
852 +CONFIG_HAVE_USER_RETURN_NOTIFIER=y
853 +CONFIG_HAVE_PERF_EVENTS_NMI=y
854 +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y
855 +CONFIG_HAVE_PERF_REGS=y
856 +CONFIG_HAVE_PERF_USER_STACK_DUMP=y
857 +CONFIG_HAVE_ARCH_JUMP_LABEL=y
858 +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y
859 +CONFIG_MMU_GATHER_TABLE_FREE=y
860 +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y
861 +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
862 +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
863 +CONFIG_HAVE_CMPXCHG_LOCAL=y
864 +CONFIG_HAVE_CMPXCHG_DOUBLE=y
865 +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y
866 +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y
867 +CONFIG_HAVE_ARCH_SECCOMP=y
868 +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
869 +CONFIG_SECCOMP=y
870 +CONFIG_SECCOMP_FILTER=y
871 +# CONFIG_SECCOMP_CACHE_DEBUG is not set
872 +CONFIG_HAVE_ARCH_STACKLEAK=y
873 +CONFIG_HAVE_STACKPROTECTOR=y
874 +CONFIG_STACKPROTECTOR=y
875 +CONFIG_STACKPROTECTOR_STRONG=y
876 +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y
877 +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y
878 +CONFIG_LTO_NONE=y
879 +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y
880 +CONFIG_HAVE_CONTEXT_TRACKING=y
881 +CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK=y
882 +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
883 +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
884 +CONFIG_HAVE_MOVE_PUD=y
885 +CONFIG_HAVE_MOVE_PMD=y
886 +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
887 +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y
888 +CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG=y
889 +CONFIG_HAVE_ARCH_HUGE_VMAP=y
890 +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y
891 +CONFIG_HAVE_ARCH_SOFT_DIRTY=y
892 +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y
893 +CONFIG_MODULES_USE_ELF_RELA=y
894 +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y
895 +CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y
896 +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y
897 +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y
898 +CONFIG_HAVE_EXIT_THREAD=y
899 +CONFIG_ARCH_MMAP_RND_BITS=28
900 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y
901 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8
902 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y
903 +CONFIG_HAVE_STACK_VALIDATION=y
904 +CONFIG_HAVE_RELIABLE_STACKTRACE=y
905 +CONFIG_ISA_BUS_API=y
906 +CONFIG_OLD_SIGSUSPEND3=y
907 +CONFIG_COMPAT_OLD_SIGACTION=y
908 +CONFIG_COMPAT_32BIT_TIME=y
909 +CONFIG_HAVE_ARCH_VMAP_STACK=y
910 +CONFIG_VMAP_STACK=y
911 +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y
912 +CONFIG_STRICT_KERNEL_RWX=y
913 +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
914 +CONFIG_STRICT_MODULE_RWX=y
915 +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y
916 +CONFIG_ARCH_USE_MEMREMAP_PROT=y
917 +# CONFIG_LOCK_EVENT_COUNTS is not set
918 +CONFIG_ARCH_HAS_MEM_ENCRYPT=y
919 +CONFIG_HAVE_STATIC_CALL=y
920 +CONFIG_HAVE_STATIC_CALL_INLINE=y
921 +CONFIG_HAVE_PREEMPT_DYNAMIC=y
922 +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y
923 +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
924 +CONFIG_ARCH_HAS_ELFCORE_COMPAT=y
927 +# GCOV-based kernel profiling
929 +# CONFIG_GCOV_KERNEL is not set
930 +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y
931 +# end of GCOV-based kernel profiling
933 +CONFIG_HAVE_GCC_PLUGINS=y
934 +# end of General architecture-dependent options
936 +CONFIG_RT_MUTEXES=y
937 +CONFIG_BASE_SMALL=0
938 +CONFIG_MODULE_SIG_FORMAT=y
939 +CONFIG_MODULES=y
940 +# CONFIG_MODULE_FORCE_LOAD is not set
941 +CONFIG_MODULE_UNLOAD=y
942 +# CONFIG_MODULE_FORCE_UNLOAD is not set
943 +CONFIG_MODVERSIONS=y
944 +CONFIG_ASM_MODVERSIONS=y
945 +CONFIG_MODULE_SRCVERSION_ALL=y
946 +CONFIG_MODULE_SIG=y
947 +# CONFIG_MODULE_SIG_FORCE is not set
948 +CONFIG_MODULE_SIG_ALL=y
949 +# CONFIG_MODULE_SIG_SHA1 is not set
950 +# CONFIG_MODULE_SIG_SHA224 is not set
951 +# CONFIG_MODULE_SIG_SHA256 is not set
952 +# CONFIG_MODULE_SIG_SHA384 is not set
953 +CONFIG_MODULE_SIG_SHA512=y
954 +CONFIG_MODULE_SIG_HASH="sha512"
955 +# CONFIG_MODULE_COMPRESS is not set
956 +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set
957 +# CONFIG_TRIM_UNUSED_KSYMS is not set
958 +CONFIG_MODULES_TREE_LOOKUP=y
959 +CONFIG_BLOCK=y
960 +CONFIG_BLK_SCSI_REQUEST=y
961 +CONFIG_BLK_CGROUP_RWSTAT=y
962 +CONFIG_BLK_DEV_BSG=y
963 +CONFIG_BLK_DEV_BSGLIB=y
964 +CONFIG_BLK_DEV_INTEGRITY=y
965 +CONFIG_BLK_DEV_INTEGRITY_T10=y
966 +CONFIG_BLK_DEV_ZONED=y
967 +CONFIG_BLK_DEV_THROTTLING=y
968 +# CONFIG_BLK_DEV_THROTTLING_LOW is not set
969 +CONFIG_BLK_CMDLINE_PARSER=y
970 +CONFIG_BLK_WBT=y
971 +CONFIG_BLK_CGROUP_IOLATENCY=y
972 +# CONFIG_BLK_CGROUP_IOCOST is not set
973 +CONFIG_BLK_WBT_MQ=y
974 +CONFIG_BLK_DEBUG_FS=y
975 +CONFIG_BLK_DEBUG_FS_ZONED=y
976 +CONFIG_BLK_SED_OPAL=y
977 +CONFIG_BLK_INLINE_ENCRYPTION=y
978 +CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
981 +# Partition Types
983 +CONFIG_PARTITION_ADVANCED=y
984 +# CONFIG_ACORN_PARTITION is not set
985 +CONFIG_AIX_PARTITION=y
986 +CONFIG_OSF_PARTITION=y
987 +CONFIG_AMIGA_PARTITION=y
988 +CONFIG_ATARI_PARTITION=y
989 +CONFIG_MAC_PARTITION=y
990 +CONFIG_MSDOS_PARTITION=y
991 +CONFIG_BSD_DISKLABEL=y
992 +CONFIG_MINIX_SUBPARTITION=y
993 +CONFIG_SOLARIS_X86_PARTITION=y
994 +CONFIG_UNIXWARE_DISKLABEL=y
995 +CONFIG_LDM_PARTITION=y
996 +# CONFIG_LDM_DEBUG is not set
997 +CONFIG_SGI_PARTITION=y
998 +CONFIG_ULTRIX_PARTITION=y
999 +CONFIG_SUN_PARTITION=y
1000 +CONFIG_KARMA_PARTITION=y
1001 +CONFIG_EFI_PARTITION=y
1002 +CONFIG_SYSV68_PARTITION=y
1003 +CONFIG_CMDLINE_PARTITION=y
1004 +# end of Partition Types
1006 +CONFIG_BLOCK_COMPAT=y
1007 +CONFIG_BLK_MQ_PCI=y
1008 +CONFIG_BLK_MQ_VIRTIO=y
1009 +CONFIG_BLK_MQ_RDMA=y
1010 +CONFIG_BLK_PM=y
1013 +# IO Schedulers
1015 +CONFIG_MQ_IOSCHED_DEADLINE=m
1016 +CONFIG_MQ_IOSCHED_KYBER=m
1017 +CONFIG_IOSCHED_BFQ=y
1018 +CONFIG_BFQ_GROUP_IOSCHED=y
1019 +# CONFIG_BFQ_CGROUP_DEBUG is not set
1020 +# end of IO Schedulers
1022 +CONFIG_PREEMPT_NOTIFIERS=y
1023 +CONFIG_PADATA=y
1024 +CONFIG_ASN1=y
1025 +CONFIG_UNINLINE_SPIN_UNLOCK=y
1026 +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
1027 +CONFIG_MUTEX_SPIN_ON_OWNER=y
1028 +CONFIG_RWSEM_SPIN_ON_OWNER=y
1029 +CONFIG_LOCK_SPIN_ON_OWNER=y
1030 +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y
1031 +CONFIG_QUEUED_SPINLOCKS=y
1032 +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y
1033 +CONFIG_QUEUED_RWLOCKS=y
1034 +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y
1035 +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y
1036 +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y
1037 +CONFIG_FREEZER=y
1040 +# Executable file formats
1042 +CONFIG_BINFMT_ELF=y
1043 +CONFIG_COMPAT_BINFMT_ELF=y
1044 +CONFIG_ELFCORE=y
1045 +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
1046 +CONFIG_BINFMT_SCRIPT=y
1047 +CONFIG_BINFMT_MISC=m
1048 +CONFIG_COREDUMP=y
1049 +# end of Executable file formats
1052 +# Memory Management options
1054 +CONFIG_SELECT_MEMORY_MODEL=y
1055 +CONFIG_SPARSEMEM_MANUAL=y
1056 +CONFIG_SPARSEMEM=y
1057 +CONFIG_NEED_MULTIPLE_NODES=y
1058 +CONFIG_SPARSEMEM_EXTREME=y
1059 +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
1060 +CONFIG_SPARSEMEM_VMEMMAP=y
1061 +CONFIG_CLEAN_LOW_KBYTES=524288
1062 +CONFIG_CLEAN_MIN_KBYTES=0
1063 +CONFIG_HAVE_FAST_GUP=y
1064 +CONFIG_NUMA_KEEP_MEMINFO=y
1065 +CONFIG_MEMORY_ISOLATION=y
1066 +CONFIG_HAVE_BOOTMEM_INFO_NODE=y
1067 +CONFIG_MEMORY_HOTPLUG=y
1068 +CONFIG_MEMORY_HOTPLUG_SPARSE=y
1069 +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
1070 +CONFIG_MEMORY_HOTREMOVE=y
1071 +CONFIG_SPLIT_PTLOCK_CPUS=4
1072 +CONFIG_MEMORY_BALLOON=y
1073 +CONFIG_BALLOON_COMPACTION=y
1074 +CONFIG_COMPACTION=y
1075 +CONFIG_PAGE_REPORTING=y
1076 +CONFIG_MIGRATION=y
1077 +CONFIG_CONTIG_ALLOC=y
1078 +CONFIG_PHYS_ADDR_T_64BIT=y
1079 +CONFIG_BOUNCE=y
1080 +CONFIG_VIRT_TO_BUS=y
1081 +CONFIG_MMU_NOTIFIER=y
1082 +CONFIG_KSM=y
1083 +CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
1084 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
1085 +CONFIG_MEMORY_FAILURE=y
1086 +CONFIG_HWPOISON_INJECT=m
1087 +CONFIG_TRANSPARENT_HUGEPAGE=y
1088 +# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set
1089 +CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
1090 +CONFIG_ARCH_WANTS_THP_SWAP=y
1091 +CONFIG_THP_SWAP=y
1092 +CONFIG_CLEANCACHE=y
1093 +CONFIG_FRONTSWAP=y
1094 +# CONFIG_CMA is not set
1095 +CONFIG_MEM_SOFT_DIRTY=y
1096 +CONFIG_ZSWAP=y
1097 +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set
1098 +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO is not set
1099 +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set
1100 +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4=y
1101 +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set
1102 +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set
1103 +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lz4"
1104 +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD is not set
1105 +CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD=y
1106 +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set
1107 +CONFIG_ZSWAP_ZPOOL_DEFAULT="z3fold"
1108 +# CONFIG_ZSWAP_DEFAULT_ON is not set
1109 +CONFIG_ZPOOL=y
1110 +CONFIG_ZBUD=m
1111 +CONFIG_Z3FOLD=y
1112 +CONFIG_ZSMALLOC=m
1113 +# CONFIG_ZSMALLOC_STAT is not set
1114 +CONFIG_GENERIC_EARLY_IOREMAP=y
1115 +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set
1116 +CONFIG_IDLE_PAGE_TRACKING=y
1117 +CONFIG_ARCH_HAS_PTE_DEVMAP=y
1118 +CONFIG_ZONE_DEVICE=y
1119 +CONFIG_DEV_PAGEMAP_OPS=y
1120 +CONFIG_HMM_MIRROR=y
1121 +CONFIG_DEVICE_PRIVATE=y
1122 +CONFIG_VMAP_PFN=y
1123 +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y
1124 +CONFIG_ARCH_HAS_PKEYS=y
1125 +# CONFIG_PERCPU_STATS is not set
1126 +# CONFIG_GUP_TEST is not set
1127 +# CONFIG_READ_ONLY_THP_FOR_FS is not set
1128 +CONFIG_ARCH_HAS_PTE_SPECIAL=y
1129 +CONFIG_MAPPING_DIRTY_HELPERS=y
1130 +CONFIG_LRU_GEN=y
1131 +CONFIG_NR_LRU_GENS=4
1132 +CONFIG_TIERS_PER_GEN=2
1133 +# CONFIG_LRU_GEN_ENABLED is not set
1134 +# CONFIG_LRU_GEN_STATS is not set
1135 +# end of Memory Management options
1137 +CONFIG_NET=y
1138 +CONFIG_WANT_COMPAT_NETLINK_MESSAGES=y
1139 +CONFIG_COMPAT_NETLINK_MESSAGES=y
1140 +CONFIG_NET_INGRESS=y
1141 +CONFIG_NET_EGRESS=y
1142 +CONFIG_NET_REDIRECT=y
1143 +CONFIG_SKB_EXTENSIONS=y
1146 +# Networking options
1148 +CONFIG_PACKET=y
1149 +CONFIG_PACKET_DIAG=m
1150 +CONFIG_UNIX=y
1151 +CONFIG_UNIX_SCM=y
1152 +CONFIG_UNIX_DIAG=m
1153 +CONFIG_TLS=m
1154 +CONFIG_TLS_DEVICE=y
1155 +# CONFIG_TLS_TOE is not set
1156 +CONFIG_XFRM=y
1157 +CONFIG_XFRM_OFFLOAD=y
1158 +CONFIG_XFRM_ALGO=m
1159 +CONFIG_XFRM_USER=m
1160 +CONFIG_XFRM_USER_COMPAT=m
1161 +CONFIG_XFRM_INTERFACE=m
1162 +# CONFIG_XFRM_SUB_POLICY is not set
1163 +# CONFIG_XFRM_MIGRATE is not set
1164 +CONFIG_XFRM_STATISTICS=y
1165 +CONFIG_XFRM_AH=m
1166 +CONFIG_XFRM_ESP=m
1167 +CONFIG_XFRM_IPCOMP=m
1168 +CONFIG_NET_KEY=m
1169 +# CONFIG_NET_KEY_MIGRATE is not set
1170 +CONFIG_XFRM_ESPINTCP=y
1171 +CONFIG_SMC=m
1172 +CONFIG_SMC_DIAG=m
1173 +CONFIG_XDP_SOCKETS=y
1174 +CONFIG_XDP_SOCKETS_DIAG=m
1175 +CONFIG_INET=y
1176 +CONFIG_IP_MULTICAST=y
1177 +CONFIG_IP_ADVANCED_ROUTER=y
1178 +CONFIG_IP_FIB_TRIE_STATS=y
1179 +CONFIG_IP_MULTIPLE_TABLES=y
1180 +CONFIG_IP_ROUTE_MULTIPATH=y
1181 +CONFIG_IP_ROUTE_VERBOSE=y
1182 +CONFIG_IP_ROUTE_CLASSID=y
1183 +# CONFIG_IP_PNP is not set
1184 +CONFIG_NET_IPIP=m
1185 +CONFIG_NET_IPGRE_DEMUX=m
1186 +CONFIG_NET_IP_TUNNEL=m
1187 +CONFIG_NET_IPGRE=m
1188 +CONFIG_NET_IPGRE_BROADCAST=y
1189 +CONFIG_IP_MROUTE_COMMON=y
1190 +CONFIG_IP_MROUTE=y
1191 +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
1192 +CONFIG_IP_PIMSM_V1=y
1193 +CONFIG_IP_PIMSM_V2=y
1194 +CONFIG_SYN_COOKIES=y
1195 +CONFIG_NET_IPVTI=m
1196 +CONFIG_NET_UDP_TUNNEL=m
1197 +CONFIG_NET_FOU=m
1198 +CONFIG_NET_FOU_IP_TUNNELS=y
1199 +CONFIG_INET_AH=m
1200 +CONFIG_INET_ESP=m
1201 +CONFIG_INET_ESP_OFFLOAD=m
1202 +CONFIG_INET_ESPINTCP=y
1203 +CONFIG_INET_IPCOMP=m
1204 +CONFIG_INET_XFRM_TUNNEL=m
1205 +CONFIG_INET_TUNNEL=m
1206 +CONFIG_INET_DIAG=m
1207 +CONFIG_INET_TCP_DIAG=m
1208 +CONFIG_INET_UDP_DIAG=m
1209 +CONFIG_INET_RAW_DIAG=m
1210 +CONFIG_INET_DIAG_DESTROY=y
1211 +CONFIG_TCP_CONG_ADVANCED=y
1212 +CONFIG_TCP_CONG_BIC=m
1213 +CONFIG_TCP_CONG_CUBIC=m
1214 +CONFIG_TCP_CONG_WESTWOOD=m
1215 +CONFIG_TCP_CONG_HTCP=m
1216 +CONFIG_TCP_CONG_HSTCP=m
1217 +CONFIG_TCP_CONG_HYBLA=m
1218 +CONFIG_TCP_CONG_VEGAS=m
1219 +CONFIG_TCP_CONG_NV=m
1220 +CONFIG_TCP_CONG_SCALABLE=m
1221 +CONFIG_TCP_CONG_LP=m
1222 +CONFIG_TCP_CONG_VENO=m
1223 +CONFIG_TCP_CONG_YEAH=m
1224 +CONFIG_TCP_CONG_ILLINOIS=m
1225 +CONFIG_TCP_CONG_DCTCP=m
1226 +CONFIG_TCP_CONG_CDG=m
1227 +CONFIG_TCP_CONG_BBR=m
1228 +CONFIG_TCP_CONG_BBR2=y
1229 +CONFIG_DEFAULT_BBR2=y
1230 +# CONFIG_DEFAULT_RENO is not set
1231 +CONFIG_DEFAULT_TCP_CONG="bbr2"
1232 +CONFIG_TCP_MD5SIG=y
1233 +CONFIG_IPV6=y
1234 +CONFIG_IPV6_ROUTER_PREF=y
1235 +CONFIG_IPV6_ROUTE_INFO=y
1236 +# CONFIG_IPV6_OPTIMISTIC_DAD is not set
1237 +CONFIG_INET6_AH=m
1238 +CONFIG_INET6_ESP=m
1239 +CONFIG_INET6_ESP_OFFLOAD=m
1240 +CONFIG_INET6_ESPINTCP=y
1241 +CONFIG_INET6_IPCOMP=m
1242 +CONFIG_IPV6_MIP6=m
1243 +CONFIG_IPV6_ILA=m
1244 +CONFIG_INET6_XFRM_TUNNEL=m
1245 +CONFIG_INET6_TUNNEL=m
1246 +CONFIG_IPV6_VTI=m
1247 +CONFIG_IPV6_SIT=m
1248 +CONFIG_IPV6_SIT_6RD=y
1249 +CONFIG_IPV6_NDISC_NODETYPE=y
1250 +CONFIG_IPV6_TUNNEL=m
1251 +CONFIG_IPV6_GRE=m
1252 +CONFIG_IPV6_FOU=m
1253 +CONFIG_IPV6_FOU_TUNNEL=m
1254 +CONFIG_IPV6_MULTIPLE_TABLES=y
1255 +CONFIG_IPV6_SUBTREES=y
1256 +CONFIG_IPV6_MROUTE=y
1257 +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
1258 +CONFIG_IPV6_PIMSM_V2=y
1259 +CONFIG_IPV6_SEG6_LWTUNNEL=y
1260 +CONFIG_IPV6_SEG6_HMAC=y
1261 +CONFIG_IPV6_SEG6_BPF=y
1262 +# CONFIG_IPV6_RPL_LWTUNNEL is not set
1263 +CONFIG_NETLABEL=y
1264 +CONFIG_MPTCP=y
1265 +CONFIG_INET_MPTCP_DIAG=m
1266 +CONFIG_MPTCP_IPV6=y
1267 +CONFIG_NETWORK_SECMARK=y
1268 +CONFIG_NET_PTP_CLASSIFY=y
1269 +CONFIG_NETWORK_PHY_TIMESTAMPING=y
1270 +CONFIG_NETFILTER=y
1271 +CONFIG_NETFILTER_ADVANCED=y
1272 +CONFIG_BRIDGE_NETFILTER=m
1275 +# Core Netfilter Configuration
1277 +CONFIG_NETFILTER_INGRESS=y
1278 +CONFIG_NETFILTER_NETLINK=m
1279 +CONFIG_NETFILTER_FAMILY_BRIDGE=y
1280 +CONFIG_NETFILTER_FAMILY_ARP=y
1281 +CONFIG_NETFILTER_NETLINK_ACCT=m
1282 +CONFIG_NETFILTER_NETLINK_QUEUE=m
1283 +CONFIG_NETFILTER_NETLINK_LOG=m
1284 +CONFIG_NETFILTER_NETLINK_OSF=m
1285 +CONFIG_NF_CONNTRACK=m
1286 +CONFIG_NF_LOG_COMMON=m
1287 +CONFIG_NF_LOG_NETDEV=m
1288 +CONFIG_NETFILTER_CONNCOUNT=m
1289 +CONFIG_NF_CONNTRACK_MARK=y
1290 +CONFIG_NF_CONNTRACK_SECMARK=y
1291 +CONFIG_NF_CONNTRACK_ZONES=y
1292 +# CONFIG_NF_CONNTRACK_PROCFS is not set
1293 +CONFIG_NF_CONNTRACK_EVENTS=y
1294 +CONFIG_NF_CONNTRACK_TIMEOUT=y
1295 +CONFIG_NF_CONNTRACK_TIMESTAMP=y
1296 +CONFIG_NF_CONNTRACK_LABELS=y
1297 +CONFIG_NF_CT_PROTO_DCCP=y
1298 +CONFIG_NF_CT_PROTO_GRE=y
1299 +CONFIG_NF_CT_PROTO_SCTP=y
1300 +CONFIG_NF_CT_PROTO_UDPLITE=y
1301 +CONFIG_NF_CONNTRACK_AMANDA=m
1302 +CONFIG_NF_CONNTRACK_FTP=m
1303 +CONFIG_NF_CONNTRACK_H323=m
1304 +CONFIG_NF_CONNTRACK_IRC=m
1305 +CONFIG_NF_CONNTRACK_BROADCAST=m
1306 +CONFIG_NF_CONNTRACK_NETBIOS_NS=m
1307 +CONFIG_NF_CONNTRACK_SNMP=m
1308 +CONFIG_NF_CONNTRACK_PPTP=m
1309 +CONFIG_NF_CONNTRACK_SANE=m
1310 +CONFIG_NF_CONNTRACK_SIP=m
1311 +CONFIG_NF_CONNTRACK_TFTP=m
1312 +CONFIG_NF_CT_NETLINK=m
1313 +CONFIG_NF_CT_NETLINK_TIMEOUT=m
1314 +CONFIG_NF_CT_NETLINK_HELPER=m
1315 +CONFIG_NETFILTER_NETLINK_GLUE_CT=y
1316 +CONFIG_NF_NAT=m
1317 +CONFIG_NF_NAT_AMANDA=m
1318 +CONFIG_NF_NAT_FTP=m
1319 +CONFIG_NF_NAT_IRC=m
1320 +CONFIG_NF_NAT_SIP=m
1321 +CONFIG_NF_NAT_TFTP=m
1322 +CONFIG_NF_NAT_REDIRECT=y
1323 +CONFIG_NF_NAT_MASQUERADE=y
1324 +CONFIG_NETFILTER_SYNPROXY=m
1325 +CONFIG_NF_TABLES=m
1326 +CONFIG_NF_TABLES_INET=y
1327 +CONFIG_NF_TABLES_NETDEV=y
1328 +CONFIG_NFT_NUMGEN=m
1329 +CONFIG_NFT_CT=m
1330 +CONFIG_NFT_FLOW_OFFLOAD=m
1331 +CONFIG_NFT_COUNTER=m
1332 +CONFIG_NFT_CONNLIMIT=m
1333 +CONFIG_NFT_LOG=m
1334 +CONFIG_NFT_LIMIT=m
1335 +CONFIG_NFT_MASQ=m
1336 +CONFIG_NFT_REDIR=m
1337 +CONFIG_NFT_NAT=m
1338 +CONFIG_NFT_TUNNEL=m
1339 +CONFIG_NFT_OBJREF=m
1340 +CONFIG_NFT_QUEUE=m
1341 +CONFIG_NFT_QUOTA=m
1342 +CONFIG_NFT_REJECT=m
1343 +CONFIG_NFT_REJECT_INET=m
1344 +CONFIG_NFT_COMPAT=m
1345 +CONFIG_NFT_HASH=m
1346 +CONFIG_NFT_FIB=m
1347 +CONFIG_NFT_FIB_INET=m
1348 +CONFIG_NFT_XFRM=m
1349 +CONFIG_NFT_SOCKET=m
1350 +CONFIG_NFT_OSF=m
1351 +CONFIG_NFT_TPROXY=m
1352 +CONFIG_NFT_SYNPROXY=m
1353 +CONFIG_NF_DUP_NETDEV=m
1354 +CONFIG_NFT_DUP_NETDEV=m
1355 +CONFIG_NFT_FWD_NETDEV=m
1356 +CONFIG_NFT_FIB_NETDEV=m
1357 +CONFIG_NFT_REJECT_NETDEV=m
1358 +CONFIG_NF_FLOW_TABLE_INET=m
1359 +CONFIG_NF_FLOW_TABLE=m
1360 +CONFIG_NETFILTER_XTABLES=m
1363 +# Xtables combined modules
1365 +CONFIG_NETFILTER_XT_MARK=m
1366 +CONFIG_NETFILTER_XT_CONNMARK=m
1367 +CONFIG_NETFILTER_XT_SET=m
1370 +# Xtables targets
1372 +CONFIG_NETFILTER_XT_TARGET_AUDIT=m
1373 +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
1374 +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
1375 +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
1376 +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
1377 +CONFIG_NETFILTER_XT_TARGET_CT=m
1378 +CONFIG_NETFILTER_XT_TARGET_DSCP=m
1379 +CONFIG_NETFILTER_XT_TARGET_HL=m
1380 +CONFIG_NETFILTER_XT_TARGET_HMARK=m
1381 +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
1382 +CONFIG_NETFILTER_XT_TARGET_LED=m
1383 +CONFIG_NETFILTER_XT_TARGET_LOG=m
1384 +CONFIG_NETFILTER_XT_TARGET_MARK=m
1385 +CONFIG_NETFILTER_XT_NAT=m
1386 +CONFIG_NETFILTER_XT_TARGET_NETMAP=m
1387 +CONFIG_NETFILTER_XT_TARGET_NFLOG=m
1388 +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
1389 +# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
1390 +CONFIG_NETFILTER_XT_TARGET_RATEEST=m
1391 +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
1392 +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m
1393 +CONFIG_NETFILTER_XT_TARGET_TEE=m
1394 +CONFIG_NETFILTER_XT_TARGET_TPROXY=m
1395 +CONFIG_NETFILTER_XT_TARGET_TRACE=m
1396 +CONFIG_NETFILTER_XT_TARGET_SECMARK=m
1397 +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
1398 +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
1401 +# Xtables matches
1403 +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
1404 +CONFIG_NETFILTER_XT_MATCH_BPF=m
1405 +CONFIG_NETFILTER_XT_MATCH_CGROUP=m
1406 +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
1407 +CONFIG_NETFILTER_XT_MATCH_COMMENT=m
1408 +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
1409 +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
1410 +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
1411 +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
1412 +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
1413 +CONFIG_NETFILTER_XT_MATCH_CPU=m
1414 +CONFIG_NETFILTER_XT_MATCH_DCCP=m
1415 +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
1416 +CONFIG_NETFILTER_XT_MATCH_DSCP=m
1417 +CONFIG_NETFILTER_XT_MATCH_ECN=m
1418 +CONFIG_NETFILTER_XT_MATCH_ESP=m
1419 +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
1420 +CONFIG_NETFILTER_XT_MATCH_HELPER=m
1421 +CONFIG_NETFILTER_XT_MATCH_HL=m
1422 +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
1423 +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
1424 +CONFIG_NETFILTER_XT_MATCH_IPVS=m
1425 +CONFIG_NETFILTER_XT_MATCH_L2TP=m
1426 +CONFIG_NETFILTER_XT_MATCH_LENGTH=m
1427 +CONFIG_NETFILTER_XT_MATCH_LIMIT=m
1428 +CONFIG_NETFILTER_XT_MATCH_MAC=m
1429 +CONFIG_NETFILTER_XT_MATCH_MARK=m
1430 +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
1431 +CONFIG_NETFILTER_XT_MATCH_NFACCT=m
1432 +CONFIG_NETFILTER_XT_MATCH_OSF=m
1433 +CONFIG_NETFILTER_XT_MATCH_OWNER=m
1434 +CONFIG_NETFILTER_XT_MATCH_POLICY=m
1435 +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
1436 +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
1437 +CONFIG_NETFILTER_XT_MATCH_QUOTA=m
1438 +CONFIG_NETFILTER_XT_MATCH_RATEEST=m
1439 +CONFIG_NETFILTER_XT_MATCH_REALM=m
1440 +CONFIG_NETFILTER_XT_MATCH_RECENT=m
1441 +CONFIG_NETFILTER_XT_MATCH_SCTP=m
1442 +CONFIG_NETFILTER_XT_MATCH_SOCKET=m
1443 +CONFIG_NETFILTER_XT_MATCH_STATE=m
1444 +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
1445 +CONFIG_NETFILTER_XT_MATCH_STRING=m
1446 +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
1447 +CONFIG_NETFILTER_XT_MATCH_TIME=m
1448 +CONFIG_NETFILTER_XT_MATCH_U32=m
1449 +# end of Core Netfilter Configuration
1451 +CONFIG_IP_SET=m
1452 +CONFIG_IP_SET_MAX=256
1453 +CONFIG_IP_SET_BITMAP_IP=m
1454 +CONFIG_IP_SET_BITMAP_IPMAC=m
1455 +CONFIG_IP_SET_BITMAP_PORT=m
1456 +CONFIG_IP_SET_HASH_IP=m
1457 +CONFIG_IP_SET_HASH_IPMARK=m
1458 +CONFIG_IP_SET_HASH_IPPORT=m
1459 +CONFIG_IP_SET_HASH_IPPORTIP=m
1460 +CONFIG_IP_SET_HASH_IPPORTNET=m
1461 +CONFIG_IP_SET_HASH_IPMAC=m
1462 +CONFIG_IP_SET_HASH_MAC=m
1463 +CONFIG_IP_SET_HASH_NETPORTNET=m
1464 +CONFIG_IP_SET_HASH_NET=m
1465 +CONFIG_IP_SET_HASH_NETNET=m
1466 +CONFIG_IP_SET_HASH_NETPORT=m
1467 +CONFIG_IP_SET_HASH_NETIFACE=m
1468 +CONFIG_IP_SET_LIST_SET=m
1469 +CONFIG_IP_VS=m
1470 +CONFIG_IP_VS_IPV6=y
1471 +# CONFIG_IP_VS_DEBUG is not set
1472 +CONFIG_IP_VS_TAB_BITS=12
1475 +# IPVS transport protocol load balancing support
1477 +CONFIG_IP_VS_PROTO_TCP=y
1478 +CONFIG_IP_VS_PROTO_UDP=y
1479 +CONFIG_IP_VS_PROTO_AH_ESP=y
1480 +CONFIG_IP_VS_PROTO_ESP=y
1481 +CONFIG_IP_VS_PROTO_AH=y
1482 +CONFIG_IP_VS_PROTO_SCTP=y
1485 +# IPVS scheduler
1487 +CONFIG_IP_VS_RR=m
1488 +CONFIG_IP_VS_WRR=m
1489 +CONFIG_IP_VS_LC=m
1490 +CONFIG_IP_VS_WLC=m
1491 +CONFIG_IP_VS_FO=m
1492 +CONFIG_IP_VS_OVF=m
1493 +CONFIG_IP_VS_LBLC=m
1494 +CONFIG_IP_VS_LBLCR=m
1495 +CONFIG_IP_VS_DH=m
1496 +CONFIG_IP_VS_SH=m
1497 +CONFIG_IP_VS_MH=m
1498 +CONFIG_IP_VS_SED=m
1499 +CONFIG_IP_VS_NQ=m
1500 +CONFIG_IP_VS_TWOS=m
1503 +# IPVS SH scheduler
1505 +CONFIG_IP_VS_SH_TAB_BITS=8
1508 +# IPVS MH scheduler
1510 +CONFIG_IP_VS_MH_TAB_INDEX=12
1513 +# IPVS application helper
1515 +CONFIG_IP_VS_FTP=m
1516 +CONFIG_IP_VS_NFCT=y
1517 +CONFIG_IP_VS_PE_SIP=m
1520 +# IP: Netfilter Configuration
1522 +CONFIG_NF_DEFRAG_IPV4=m
1523 +CONFIG_NF_SOCKET_IPV4=m
1524 +CONFIG_NF_TPROXY_IPV4=m
1525 +CONFIG_NF_TABLES_IPV4=y
1526 +CONFIG_NFT_REJECT_IPV4=m
1527 +CONFIG_NFT_DUP_IPV4=m
1528 +CONFIG_NFT_FIB_IPV4=m
1529 +CONFIG_NF_TABLES_ARP=y
1530 +CONFIG_NF_FLOW_TABLE_IPV4=m
1531 +CONFIG_NF_DUP_IPV4=m
1532 +CONFIG_NF_LOG_ARP=m
1533 +CONFIG_NF_LOG_IPV4=m
1534 +CONFIG_NF_REJECT_IPV4=m
1535 +CONFIG_NF_NAT_SNMP_BASIC=m
1536 +CONFIG_NF_NAT_PPTP=m
1537 +CONFIG_NF_NAT_H323=m
1538 +CONFIG_IP_NF_IPTABLES=m
1539 +CONFIG_IP_NF_MATCH_AH=m
1540 +CONFIG_IP_NF_MATCH_ECN=m
1541 +CONFIG_IP_NF_MATCH_RPFILTER=m
1542 +CONFIG_IP_NF_MATCH_TTL=m
1543 +CONFIG_IP_NF_FILTER=m
1544 +CONFIG_IP_NF_TARGET_REJECT=m
1545 +CONFIG_IP_NF_TARGET_SYNPROXY=m
1546 +CONFIG_IP_NF_NAT=m
1547 +CONFIG_IP_NF_TARGET_MASQUERADE=m
1548 +CONFIG_IP_NF_TARGET_NETMAP=m
1549 +CONFIG_IP_NF_TARGET_REDIRECT=m
1550 +CONFIG_IP_NF_MANGLE=m
1551 +CONFIG_IP_NF_TARGET_CLUSTERIP=m
1552 +CONFIG_IP_NF_TARGET_ECN=m
1553 +CONFIG_IP_NF_TARGET_TTL=m
1554 +CONFIG_IP_NF_RAW=m
1555 +CONFIG_IP_NF_SECURITY=m
1556 +CONFIG_IP_NF_ARPTABLES=m
1557 +CONFIG_IP_NF_ARPFILTER=m
1558 +CONFIG_IP_NF_ARP_MANGLE=m
1559 +# end of IP: Netfilter Configuration
1562 +# IPv6: Netfilter Configuration
1564 +CONFIG_NF_SOCKET_IPV6=m
1565 +CONFIG_NF_TPROXY_IPV6=m
1566 +CONFIG_NF_TABLES_IPV6=y
1567 +CONFIG_NFT_REJECT_IPV6=m
1568 +CONFIG_NFT_DUP_IPV6=m
1569 +CONFIG_NFT_FIB_IPV6=m
1570 +CONFIG_NF_FLOW_TABLE_IPV6=m
1571 +CONFIG_NF_DUP_IPV6=m
1572 +CONFIG_NF_REJECT_IPV6=m
1573 +CONFIG_NF_LOG_IPV6=m
1574 +CONFIG_IP6_NF_IPTABLES=m
1575 +CONFIG_IP6_NF_MATCH_AH=m
1576 +CONFIG_IP6_NF_MATCH_EUI64=m
1577 +CONFIG_IP6_NF_MATCH_FRAG=m
1578 +CONFIG_IP6_NF_MATCH_OPTS=m
1579 +CONFIG_IP6_NF_MATCH_HL=m
1580 +CONFIG_IP6_NF_MATCH_IPV6HEADER=m
1581 +CONFIG_IP6_NF_MATCH_MH=m
1582 +CONFIG_IP6_NF_MATCH_RPFILTER=m
1583 +CONFIG_IP6_NF_MATCH_RT=m
1584 +CONFIG_IP6_NF_MATCH_SRH=m
1585 +CONFIG_IP6_NF_TARGET_HL=m
1586 +CONFIG_IP6_NF_FILTER=m
1587 +CONFIG_IP6_NF_TARGET_REJECT=m
1588 +CONFIG_IP6_NF_TARGET_SYNPROXY=m
1589 +CONFIG_IP6_NF_MANGLE=m
1590 +CONFIG_IP6_NF_RAW=m
1591 +CONFIG_IP6_NF_SECURITY=m
1592 +CONFIG_IP6_NF_NAT=m
1593 +CONFIG_IP6_NF_TARGET_MASQUERADE=m
1594 +CONFIG_IP6_NF_TARGET_NPT=m
1595 +# end of IPv6: Netfilter Configuration
1597 +CONFIG_NF_DEFRAG_IPV6=m
1600 +# DECnet: Netfilter Configuration
1602 +CONFIG_DECNET_NF_GRABULATOR=m
1603 +# end of DECnet: Netfilter Configuration
1605 +CONFIG_NF_TABLES_BRIDGE=m
1606 +CONFIG_NFT_BRIDGE_META=m
1607 +CONFIG_NFT_BRIDGE_REJECT=m
1608 +CONFIG_NF_LOG_BRIDGE=m
1609 +CONFIG_NF_CONNTRACK_BRIDGE=m
1610 +CONFIG_BRIDGE_NF_EBTABLES=m
1611 +CONFIG_BRIDGE_EBT_BROUTE=m
1612 +CONFIG_BRIDGE_EBT_T_FILTER=m
1613 +CONFIG_BRIDGE_EBT_T_NAT=m
1614 +CONFIG_BRIDGE_EBT_802_3=m
1615 +CONFIG_BRIDGE_EBT_AMONG=m
1616 +CONFIG_BRIDGE_EBT_ARP=m
1617 +CONFIG_BRIDGE_EBT_IP=m
1618 +CONFIG_BRIDGE_EBT_IP6=m
1619 +CONFIG_BRIDGE_EBT_LIMIT=m
1620 +CONFIG_BRIDGE_EBT_MARK=m
1621 +CONFIG_BRIDGE_EBT_PKTTYPE=m
1622 +CONFIG_BRIDGE_EBT_STP=m
1623 +CONFIG_BRIDGE_EBT_VLAN=m
1624 +CONFIG_BRIDGE_EBT_ARPREPLY=m
1625 +CONFIG_BRIDGE_EBT_DNAT=m
1626 +CONFIG_BRIDGE_EBT_MARK_T=m
1627 +CONFIG_BRIDGE_EBT_REDIRECT=m
1628 +CONFIG_BRIDGE_EBT_SNAT=m
1629 +CONFIG_BRIDGE_EBT_LOG=m
1630 +CONFIG_BRIDGE_EBT_NFLOG=m
1631 +CONFIG_BPFILTER=y
1632 +CONFIG_BPFILTER_UMH=m
1633 +CONFIG_IP_DCCP=m
1634 +CONFIG_INET_DCCP_DIAG=m
1637 +# DCCP CCIDs Configuration
1639 +# CONFIG_IP_DCCP_CCID2_DEBUG is not set
1640 +# CONFIG_IP_DCCP_CCID3 is not set
1641 +# end of DCCP CCIDs Configuration
1644 +# DCCP Kernel Hacking
1646 +# CONFIG_IP_DCCP_DEBUG is not set
1647 +# end of DCCP Kernel Hacking
1649 +CONFIG_IP_SCTP=m
1650 +# CONFIG_SCTP_DBG_OBJCNT is not set
1651 +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set
1652 +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y
1653 +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set
1654 +CONFIG_SCTP_COOKIE_HMAC_MD5=y
1655 +CONFIG_SCTP_COOKIE_HMAC_SHA1=y
1656 +CONFIG_INET_SCTP_DIAG=m
1657 +CONFIG_RDS=m
1658 +CONFIG_RDS_RDMA=m
1659 +CONFIG_RDS_TCP=m
1660 +# CONFIG_RDS_DEBUG is not set
1661 +CONFIG_TIPC=m
1662 +CONFIG_TIPC_MEDIA_IB=y
1663 +CONFIG_TIPC_MEDIA_UDP=y
1664 +CONFIG_TIPC_CRYPTO=y
1665 +CONFIG_TIPC_DIAG=m
1666 +CONFIG_ATM=m
1667 +CONFIG_ATM_CLIP=m
1668 +# CONFIG_ATM_CLIP_NO_ICMP is not set
1669 +CONFIG_ATM_LANE=m
1670 +CONFIG_ATM_MPOA=m
1671 +CONFIG_ATM_BR2684=m
1672 +# CONFIG_ATM_BR2684_IPFILTER is not set
1673 +CONFIG_L2TP=m
1674 +CONFIG_L2TP_DEBUGFS=m
1675 +CONFIG_L2TP_V3=y
1676 +CONFIG_L2TP_IP=m
1677 +CONFIG_L2TP_ETH=m
1678 +CONFIG_STP=m
1679 +CONFIG_GARP=m
1680 +CONFIG_MRP=m
1681 +CONFIG_BRIDGE=m
1682 +CONFIG_BRIDGE_IGMP_SNOOPING=y
1683 +CONFIG_BRIDGE_VLAN_FILTERING=y
1684 +CONFIG_BRIDGE_MRP=y
1685 +CONFIG_BRIDGE_CFM=y
1686 +CONFIG_HAVE_NET_DSA=y
1687 +CONFIG_NET_DSA=m
1688 +CONFIG_NET_DSA_TAG_8021Q=m
1689 +CONFIG_NET_DSA_TAG_AR9331=m
1690 +CONFIG_NET_DSA_TAG_BRCM_COMMON=m
1691 +CONFIG_NET_DSA_TAG_BRCM=m
1692 +CONFIG_NET_DSA_TAG_BRCM_PREPEND=m
1693 +CONFIG_NET_DSA_TAG_HELLCREEK=m
1694 +CONFIG_NET_DSA_TAG_GSWIP=m
1695 +CONFIG_NET_DSA_TAG_DSA_COMMON=m
1696 +CONFIG_NET_DSA_TAG_DSA=m
1697 +CONFIG_NET_DSA_TAG_EDSA=m
1698 +CONFIG_NET_DSA_TAG_MTK=m
1699 +CONFIG_NET_DSA_TAG_KSZ=m
1700 +CONFIG_NET_DSA_TAG_RTL4_A=m
1701 +CONFIG_NET_DSA_TAG_OCELOT=m
1702 +CONFIG_NET_DSA_TAG_OCELOT_8021Q=m
1703 +CONFIG_NET_DSA_TAG_QCA=m
1704 +CONFIG_NET_DSA_TAG_LAN9303=m
1705 +CONFIG_NET_DSA_TAG_SJA1105=m
1706 +CONFIG_NET_DSA_TAG_TRAILER=m
1707 +CONFIG_NET_DSA_TAG_XRS700X=m
1708 +CONFIG_VLAN_8021Q=m
1709 +CONFIG_VLAN_8021Q_GVRP=y
1710 +CONFIG_VLAN_8021Q_MVRP=y
1711 +CONFIG_DECNET=m
1712 +# CONFIG_DECNET_ROUTER is not set
1713 +CONFIG_LLC=m
1714 +CONFIG_LLC2=m
1715 +CONFIG_ATALK=m
1716 +CONFIG_DEV_APPLETALK=m
1717 +# CONFIG_IPDDP is not set
1718 +CONFIG_X25=m
1719 +CONFIG_LAPB=m
1720 +CONFIG_PHONET=m
1721 +CONFIG_6LOWPAN=m
1722 +# CONFIG_6LOWPAN_DEBUGFS is not set
1723 +CONFIG_6LOWPAN_NHC=m
1724 +CONFIG_6LOWPAN_NHC_DEST=m
1725 +CONFIG_6LOWPAN_NHC_FRAGMENT=m
1726 +CONFIG_6LOWPAN_NHC_HOP=m
1727 +CONFIG_6LOWPAN_NHC_IPV6=m
1728 +CONFIG_6LOWPAN_NHC_MOBILITY=m
1729 +CONFIG_6LOWPAN_NHC_ROUTING=m
1730 +CONFIG_6LOWPAN_NHC_UDP=m
1731 +# CONFIG_6LOWPAN_GHC_EXT_HDR_HOP is not set
1732 +# CONFIG_6LOWPAN_GHC_UDP is not set
1733 +# CONFIG_6LOWPAN_GHC_ICMPV6 is not set
1734 +# CONFIG_6LOWPAN_GHC_EXT_HDR_DEST is not set
1735 +# CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG is not set
1736 +# CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE is not set
1737 +CONFIG_IEEE802154=m
1738 +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set
1739 +CONFIG_IEEE802154_SOCKET=m
1740 +CONFIG_IEEE802154_6LOWPAN=m
1741 +CONFIG_MAC802154=m
1742 +CONFIG_NET_SCHED=y
1745 +# Queueing/Scheduling
1747 +CONFIG_NET_SCH_CBQ=m
1748 +CONFIG_NET_SCH_HTB=m
1749 +CONFIG_NET_SCH_HFSC=m
1750 +CONFIG_NET_SCH_ATM=m
1751 +CONFIG_NET_SCH_PRIO=m
1752 +CONFIG_NET_SCH_MULTIQ=m
1753 +CONFIG_NET_SCH_RED=m
1754 +CONFIG_NET_SCH_SFB=m
1755 +CONFIG_NET_SCH_SFQ=m
1756 +CONFIG_NET_SCH_TEQL=m
1757 +CONFIG_NET_SCH_TBF=m
1758 +CONFIG_NET_SCH_CBS=m
1759 +CONFIG_NET_SCH_ETF=m
1760 +CONFIG_NET_SCH_TAPRIO=m
1761 +CONFIG_NET_SCH_GRED=m
1762 +CONFIG_NET_SCH_DSMARK=m
1763 +CONFIG_NET_SCH_NETEM=m
1764 +CONFIG_NET_SCH_DRR=m
1765 +CONFIG_NET_SCH_MQPRIO=m
1766 +CONFIG_NET_SCH_SKBPRIO=m
1767 +CONFIG_NET_SCH_CHOKE=m
1768 +CONFIG_NET_SCH_QFQ=m
1769 +CONFIG_NET_SCH_CODEL=m
1770 +CONFIG_NET_SCH_FQ_CODEL=m
1771 +CONFIG_NET_SCH_CAKE=m
1772 +CONFIG_NET_SCH_FQ=m
1773 +CONFIG_NET_SCH_HHF=m
1774 +CONFIG_NET_SCH_PIE=y
1775 +CONFIG_NET_SCH_FQ_PIE=y
1776 +CONFIG_NET_SCH_INGRESS=m
1777 +CONFIG_NET_SCH_PLUG=m
1778 +CONFIG_NET_SCH_ETS=m
1779 +CONFIG_NET_SCH_DEFAULT=y
1780 +# CONFIG_DEFAULT_FQ is not set
1781 +# CONFIG_DEFAULT_CODEL is not set
1782 +# CONFIG_DEFAULT_FQ_CODEL is not set
1783 +CONFIG_DEFAULT_FQ_PIE=y
1784 +# CONFIG_DEFAULT_SFQ is not set
1785 +# CONFIG_DEFAULT_PFIFO_FAST is not set
1786 +CONFIG_DEFAULT_NET_SCH="fq_pie"
1789 +# Classification
1791 +CONFIG_NET_CLS=y
1792 +CONFIG_NET_CLS_BASIC=m
1793 +CONFIG_NET_CLS_TCINDEX=m
1794 +CONFIG_NET_CLS_ROUTE4=m
1795 +CONFIG_NET_CLS_FW=m
1796 +CONFIG_NET_CLS_U32=m
1797 +# CONFIG_CLS_U32_PERF is not set
1798 +CONFIG_CLS_U32_MARK=y
1799 +CONFIG_NET_CLS_RSVP=m
1800 +CONFIG_NET_CLS_RSVP6=m
1801 +CONFIG_NET_CLS_FLOW=m
1802 +CONFIG_NET_CLS_CGROUP=m
1803 +CONFIG_NET_CLS_BPF=m
1804 +CONFIG_NET_CLS_FLOWER=m
1805 +CONFIG_NET_CLS_MATCHALL=m
1806 +CONFIG_NET_EMATCH=y
1807 +CONFIG_NET_EMATCH_STACK=32
1808 +CONFIG_NET_EMATCH_CMP=m
1809 +CONFIG_NET_EMATCH_NBYTE=m
1810 +CONFIG_NET_EMATCH_U32=m
1811 +CONFIG_NET_EMATCH_META=m
1812 +CONFIG_NET_EMATCH_TEXT=m
1813 +CONFIG_NET_EMATCH_CANID=m
1814 +CONFIG_NET_EMATCH_IPSET=m
1815 +CONFIG_NET_EMATCH_IPT=m
1816 +CONFIG_NET_CLS_ACT=y
1817 +CONFIG_NET_ACT_POLICE=m
1818 +CONFIG_NET_ACT_GACT=m
1819 +CONFIG_GACT_PROB=y
1820 +CONFIG_NET_ACT_MIRRED=m
1821 +CONFIG_NET_ACT_SAMPLE=m
1822 +CONFIG_NET_ACT_IPT=m
1823 +CONFIG_NET_ACT_NAT=m
1824 +CONFIG_NET_ACT_PEDIT=m
1825 +CONFIG_NET_ACT_SIMP=m
1826 +CONFIG_NET_ACT_SKBEDIT=m
1827 +CONFIG_NET_ACT_CSUM=m
1828 +CONFIG_NET_ACT_MPLS=m
1829 +CONFIG_NET_ACT_VLAN=m
1830 +CONFIG_NET_ACT_BPF=m
1831 +CONFIG_NET_ACT_CONNMARK=m
1832 +CONFIG_NET_ACT_CTINFO=m
1833 +CONFIG_NET_ACT_SKBMOD=m
1834 +# CONFIG_NET_ACT_IFE is not set
1835 +CONFIG_NET_ACT_TUNNEL_KEY=m
1836 +CONFIG_NET_ACT_CT=m
1837 +CONFIG_NET_ACT_GATE=m
1838 +CONFIG_NET_TC_SKB_EXT=y
1839 +CONFIG_NET_SCH_FIFO=y
1840 +CONFIG_DCB=y
1841 +CONFIG_DNS_RESOLVER=y
1842 +CONFIG_BATMAN_ADV=m
1843 +# CONFIG_BATMAN_ADV_BATMAN_V is not set
1844 +CONFIG_BATMAN_ADV_BLA=y
1845 +CONFIG_BATMAN_ADV_DAT=y
1846 +CONFIG_BATMAN_ADV_NC=y
1847 +CONFIG_BATMAN_ADV_MCAST=y
1848 +# CONFIG_BATMAN_ADV_DEBUG is not set
1849 +CONFIG_OPENVSWITCH=m
1850 +CONFIG_OPENVSWITCH_GRE=m
1851 +CONFIG_OPENVSWITCH_VXLAN=m
1852 +CONFIG_OPENVSWITCH_GENEVE=m
1853 +CONFIG_VSOCKETS=m
1854 +CONFIG_VSOCKETS_DIAG=m
1855 +CONFIG_VSOCKETS_LOOPBACK=m
1856 +CONFIG_VMWARE_VMCI_VSOCKETS=m
1857 +CONFIG_VIRTIO_VSOCKETS=m
1858 +CONFIG_VIRTIO_VSOCKETS_COMMON=m
1859 +CONFIG_HYPERV_VSOCKETS=m
1860 +CONFIG_NETLINK_DIAG=m
1861 +CONFIG_MPLS=y
1862 +CONFIG_NET_MPLS_GSO=m
1863 +CONFIG_MPLS_ROUTING=m
1864 +CONFIG_MPLS_IPTUNNEL=m
1865 +CONFIG_NET_NSH=m
1866 +CONFIG_HSR=m
1867 +CONFIG_NET_SWITCHDEV=y
1868 +CONFIG_NET_L3_MASTER_DEV=y
1869 +CONFIG_QRTR=m
1870 +CONFIG_QRTR_SMD=m
1871 +CONFIG_QRTR_TUN=m
1872 +CONFIG_QRTR_MHI=m
1873 +CONFIG_NET_NCSI=y
1874 +CONFIG_NCSI_OEM_CMD_GET_MAC=y
1875 +CONFIG_RPS=y
1876 +CONFIG_RFS_ACCEL=y
1877 +CONFIG_SOCK_RX_QUEUE_MAPPING=y
1878 +CONFIG_XPS=y
1879 +CONFIG_CGROUP_NET_PRIO=y
1880 +CONFIG_CGROUP_NET_CLASSID=y
1881 +CONFIG_NET_RX_BUSY_POLL=y
1882 +CONFIG_BQL=y
1883 +CONFIG_BPF_JIT=y
1884 +CONFIG_BPF_STREAM_PARSER=y
1885 +CONFIG_NET_FLOW_LIMIT=y
1888 +# Network testing
1890 +CONFIG_NET_PKTGEN=m
1891 +# end of Network testing
1892 +# end of Networking options
1894 +CONFIG_HAMRADIO=y
1897 +# Packet Radio protocols
1899 +CONFIG_AX25=m
1900 +CONFIG_AX25_DAMA_SLAVE=y
1901 +CONFIG_NETROM=m
1902 +CONFIG_ROSE=m
1905 +# AX.25 network device drivers
1907 +CONFIG_MKISS=m
1908 +CONFIG_6PACK=m
1909 +CONFIG_BPQETHER=m
1910 +CONFIG_BAYCOM_SER_FDX=m
1911 +CONFIG_BAYCOM_SER_HDX=m
1912 +CONFIG_BAYCOM_PAR=m
1913 +CONFIG_YAM=m
1914 +# end of AX.25 network device drivers
1916 +CONFIG_CAN=m
1917 +CONFIG_CAN_RAW=m
1918 +CONFIG_CAN_BCM=m
1919 +CONFIG_CAN_GW=m
1920 +CONFIG_CAN_J1939=m
1921 +CONFIG_CAN_ISOTP=m
1924 +# CAN Device Drivers
1926 +CONFIG_CAN_VCAN=m
1927 +CONFIG_CAN_VXCAN=m
1928 +CONFIG_CAN_SLCAN=m
1929 +CONFIG_CAN_DEV=m
1930 +CONFIG_CAN_CALC_BITTIMING=y
1931 +CONFIG_CAN_JANZ_ICAN3=m
1932 +CONFIG_CAN_KVASER_PCIEFD=m
1933 +CONFIG_CAN_C_CAN=m
1934 +CONFIG_CAN_C_CAN_PLATFORM=m
1935 +CONFIG_CAN_C_CAN_PCI=m
1936 +CONFIG_CAN_CC770=m
1937 +CONFIG_CAN_CC770_ISA=m
1938 +CONFIG_CAN_CC770_PLATFORM=m
1939 +CONFIG_CAN_IFI_CANFD=m
1940 +CONFIG_CAN_M_CAN=m
1941 +CONFIG_CAN_M_CAN_PCI=m
1942 +CONFIG_CAN_M_CAN_PLATFORM=m
1943 +CONFIG_CAN_M_CAN_TCAN4X5X=m
1944 +CONFIG_CAN_PEAK_PCIEFD=m
1945 +CONFIG_CAN_SJA1000=m
1946 +CONFIG_CAN_EMS_PCI=m
1947 +CONFIG_CAN_EMS_PCMCIA=m
1948 +CONFIG_CAN_F81601=m
1949 +CONFIG_CAN_KVASER_PCI=m
1950 +CONFIG_CAN_PEAK_PCI=m
1951 +CONFIG_CAN_PEAK_PCIEC=y
1952 +CONFIG_CAN_PEAK_PCMCIA=m
1953 +CONFIG_CAN_PLX_PCI=m
1954 +CONFIG_CAN_SJA1000_ISA=m
1955 +CONFIG_CAN_SJA1000_PLATFORM=m
1956 +CONFIG_CAN_SOFTING=m
1957 +CONFIG_CAN_SOFTING_CS=m
1960 +# CAN SPI interfaces
1962 +CONFIG_CAN_HI311X=m
1963 +CONFIG_CAN_MCP251X=m
1964 +CONFIG_CAN_MCP251XFD=m
1965 +# CONFIG_CAN_MCP251XFD_SANITY is not set
1966 +# end of CAN SPI interfaces
1969 +# CAN USB interfaces
1971 +CONFIG_CAN_8DEV_USB=m
1972 +CONFIG_CAN_EMS_USB=m
1973 +CONFIG_CAN_ESD_USB2=m
1974 +CONFIG_CAN_GS_USB=m
1975 +CONFIG_CAN_KVASER_USB=m
1976 +CONFIG_CAN_MCBA_USB=m
1977 +CONFIG_CAN_PEAK_USB=m
1978 +CONFIG_CAN_UCAN=m
1979 +# end of CAN USB interfaces
1981 +# CONFIG_CAN_DEBUG_DEVICES is not set
1982 +# end of CAN Device Drivers
1984 +CONFIG_BT=m
1985 +CONFIG_BT_BREDR=y
1986 +CONFIG_BT_RFCOMM=m
1987 +CONFIG_BT_RFCOMM_TTY=y
1988 +CONFIG_BT_BNEP=m
1989 +CONFIG_BT_BNEP_MC_FILTER=y
1990 +CONFIG_BT_BNEP_PROTO_FILTER=y
1991 +CONFIG_BT_CMTP=m
1992 +CONFIG_BT_HIDP=m
1993 +CONFIG_BT_HS=y
1994 +CONFIG_BT_LE=y
1995 +CONFIG_BT_6LOWPAN=m
1996 +CONFIG_BT_LEDS=y
1997 +CONFIG_BT_MSFTEXT=y
1998 +CONFIG_BT_DEBUGFS=y
1999 +# CONFIG_BT_SELFTEST is not set
2002 +# Bluetooth device drivers
2004 +CONFIG_BT_INTEL=m
2005 +CONFIG_BT_BCM=m
2006 +CONFIG_BT_RTL=m
2007 +CONFIG_BT_QCA=m
2008 +CONFIG_BT_HCIBTUSB=m
2009 +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y
2010 +CONFIG_BT_HCIBTUSB_BCM=y
2011 +CONFIG_BT_HCIBTUSB_MTK=y
2012 +CONFIG_BT_HCIBTUSB_RTL=y
2013 +CONFIG_BT_HCIBTSDIO=m
2014 +CONFIG_BT_HCIUART=m
2015 +CONFIG_BT_HCIUART_SERDEV=y
2016 +CONFIG_BT_HCIUART_H4=y
2017 +CONFIG_BT_HCIUART_NOKIA=m
2018 +CONFIG_BT_HCIUART_BCSP=y
2019 +CONFIG_BT_HCIUART_ATH3K=y
2020 +CONFIG_BT_HCIUART_LL=y
2021 +CONFIG_BT_HCIUART_3WIRE=y
2022 +CONFIG_BT_HCIUART_INTEL=y
2023 +CONFIG_BT_HCIUART_BCM=y
2024 +CONFIG_BT_HCIUART_RTL=y
2025 +CONFIG_BT_HCIUART_QCA=y
2026 +CONFIG_BT_HCIUART_AG6XX=y
2027 +CONFIG_BT_HCIUART_MRVL=y
2028 +CONFIG_BT_HCIBCM203X=m
2029 +CONFIG_BT_HCIBPA10X=m
2030 +CONFIG_BT_HCIBFUSB=m
2031 +CONFIG_BT_HCIDTL1=m
2032 +CONFIG_BT_HCIBT3C=m
2033 +CONFIG_BT_HCIBLUECARD=m
2034 +CONFIG_BT_HCIVHCI=m
2035 +CONFIG_BT_MRVL=m
2036 +CONFIG_BT_MRVL_SDIO=m
2037 +CONFIG_BT_ATH3K=m
2038 +CONFIG_BT_MTKSDIO=m
2039 +CONFIG_BT_MTKUART=m
2040 +CONFIG_BT_HCIRSI=m
2041 +# end of Bluetooth device drivers
2043 +CONFIG_AF_RXRPC=m
2044 +CONFIG_AF_RXRPC_IPV6=y
2045 +# CONFIG_AF_RXRPC_INJECT_LOSS is not set
2046 +# CONFIG_AF_RXRPC_DEBUG is not set
2047 +CONFIG_RXKAD=y
2048 +CONFIG_AF_KCM=m
2049 +CONFIG_STREAM_PARSER=y
2050 +CONFIG_FIB_RULES=y
2051 +CONFIG_WIRELESS=y
2052 +CONFIG_WIRELESS_EXT=y
2053 +CONFIG_WEXT_CORE=y
2054 +CONFIG_WEXT_PROC=y
2055 +CONFIG_WEXT_SPY=y
2056 +CONFIG_WEXT_PRIV=y
2057 +CONFIG_CFG80211=m
2058 +# CONFIG_NL80211_TESTMODE is not set
2059 +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
2060 +# CONFIG_CFG80211_CERTIFICATION_ONUS is not set
2061 +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y
2062 +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y
2063 +CONFIG_CFG80211_DEFAULT_PS=y
2064 +CONFIG_CFG80211_DEBUGFS=y
2065 +CONFIG_CFG80211_CRDA_SUPPORT=y
2066 +CONFIG_CFG80211_WEXT=y
2067 +CONFIG_CFG80211_WEXT_EXPORT=y
2068 +CONFIG_LIB80211=m
2069 +CONFIG_LIB80211_CRYPT_WEP=m
2070 +CONFIG_LIB80211_CRYPT_CCMP=m
2071 +CONFIG_LIB80211_CRYPT_TKIP=m
2072 +# CONFIG_LIB80211_DEBUG is not set
2073 +CONFIG_MAC80211=m
2074 +CONFIG_MAC80211_HAS_RC=y
2075 +CONFIG_MAC80211_RC_MINSTREL=y
2076 +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
2077 +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
2078 +CONFIG_MAC80211_MESH=y
2079 +CONFIG_MAC80211_LEDS=y
2080 +CONFIG_MAC80211_DEBUGFS=y
2081 +CONFIG_MAC80211_MESSAGE_TRACING=y
2082 +# CONFIG_MAC80211_DEBUG_MENU is not set
2083 +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0
2084 +CONFIG_RFKILL=y
2085 +CONFIG_RFKILL_LEDS=y
2086 +CONFIG_RFKILL_INPUT=y
2087 +CONFIG_RFKILL_GPIO=m
2088 +CONFIG_NET_9P=m
2089 +CONFIG_NET_9P_VIRTIO=m
2090 +CONFIG_NET_9P_XEN=m
2091 +CONFIG_NET_9P_RDMA=m
2092 +# CONFIG_NET_9P_DEBUG is not set
2093 +CONFIG_CAIF=m
2094 +# CONFIG_CAIF_DEBUG is not set
2095 +CONFIG_CAIF_NETDEV=m
2096 +CONFIG_CAIF_USB=m
2097 +CONFIG_CEPH_LIB=m
2098 +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set
2099 +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y
2100 +CONFIG_NFC=m
2101 +CONFIG_NFC_DIGITAL=m
2102 +CONFIG_NFC_NCI=m
2103 +CONFIG_NFC_NCI_SPI=m
2104 +CONFIG_NFC_NCI_UART=m
2105 +CONFIG_NFC_HCI=m
2106 +CONFIG_NFC_SHDLC=y
2109 +# Near Field Communication (NFC) devices
2111 +CONFIG_NFC_TRF7970A=m
2112 +CONFIG_NFC_MEI_PHY=m
2113 +CONFIG_NFC_SIM=m
2114 +CONFIG_NFC_PORT100=m
2115 +CONFIG_NFC_VIRTUAL_NCI=m
2116 +CONFIG_NFC_FDP=m
2117 +CONFIG_NFC_FDP_I2C=m
2118 +CONFIG_NFC_PN544=m
2119 +CONFIG_NFC_PN544_I2C=m
2120 +CONFIG_NFC_PN544_MEI=m
2121 +CONFIG_NFC_PN533=m
2122 +CONFIG_NFC_PN533_USB=m
2123 +CONFIG_NFC_PN533_I2C=m
2124 +CONFIG_NFC_PN532_UART=m
2125 +CONFIG_NFC_MICROREAD=m
2126 +CONFIG_NFC_MICROREAD_I2C=m
2127 +CONFIG_NFC_MICROREAD_MEI=m
2128 +CONFIG_NFC_MRVL=m
2129 +CONFIG_NFC_MRVL_USB=m
2130 +CONFIG_NFC_MRVL_UART=m
2131 +CONFIG_NFC_MRVL_I2C=m
2132 +CONFIG_NFC_MRVL_SPI=m
2133 +CONFIG_NFC_ST21NFCA=m
2134 +CONFIG_NFC_ST21NFCA_I2C=m
2135 +CONFIG_NFC_ST_NCI=m
2136 +CONFIG_NFC_ST_NCI_I2C=m
2137 +CONFIG_NFC_ST_NCI_SPI=m
2138 +CONFIG_NFC_NXP_NCI=m
2139 +CONFIG_NFC_NXP_NCI_I2C=m
2140 +CONFIG_NFC_S3FWRN5=m
2141 +CONFIG_NFC_S3FWRN5_I2C=m
2142 +CONFIG_NFC_S3FWRN82_UART=m
2143 +CONFIG_NFC_ST95HF=m
2144 +# end of Near Field Communication (NFC) devices
2146 +CONFIG_PSAMPLE=m
2147 +CONFIG_NET_IFE=m
2148 +CONFIG_LWTUNNEL=y
2149 +CONFIG_LWTUNNEL_BPF=y
2150 +CONFIG_DST_CACHE=y
2151 +CONFIG_GRO_CELLS=y
2152 +CONFIG_SOCK_VALIDATE_XMIT=y
2153 +CONFIG_NET_SOCK_MSG=y
2154 +CONFIG_NET_DEVLINK=y
2155 +CONFIG_PAGE_POOL=y
2156 +CONFIG_FAILOVER=m
2157 +CONFIG_ETHTOOL_NETLINK=y
2158 +CONFIG_HAVE_EBPF_JIT=y
2161 +# Device Drivers
2163 +CONFIG_HAVE_EISA=y
2164 +# CONFIG_EISA is not set
2165 +CONFIG_HAVE_PCI=y
2166 +CONFIG_PCI=y
2167 +CONFIG_PCI_DOMAINS=y
2168 +CONFIG_PCIEPORTBUS=y
2169 +CONFIG_HOTPLUG_PCI_PCIE=y
2170 +CONFIG_PCIEAER=y
2171 +# CONFIG_PCIEAER_INJECT is not set
2172 +# CONFIG_PCIE_ECRC is not set
2173 +CONFIG_PCIEASPM=y
2174 +CONFIG_PCIEASPM_DEFAULT=y
2175 +# CONFIG_PCIEASPM_POWERSAVE is not set
2176 +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set
2177 +# CONFIG_PCIEASPM_PERFORMANCE is not set
2178 +CONFIG_PCIE_PME=y
2179 +CONFIG_PCIE_DPC=y
2180 +CONFIG_PCIE_PTM=y
2181 +# CONFIG_PCIE_EDR is not set
2182 +CONFIG_PCI_MSI=y
2183 +CONFIG_PCI_MSI_IRQ_DOMAIN=y
2184 +CONFIG_PCI_QUIRKS=y
2185 +# CONFIG_PCI_DEBUG is not set
2186 +CONFIG_PCI_REALLOC_ENABLE_AUTO=y
2187 +CONFIG_PCI_STUB=m
2188 +CONFIG_PCI_PF_STUB=m
2189 +CONFIG_XEN_PCIDEV_FRONTEND=m
2190 +CONFIG_PCI_ATS=y
2191 +CONFIG_PCI_LOCKLESS_CONFIG=y
2192 +CONFIG_PCI_IOV=y
2193 +CONFIG_PCI_PRI=y
2194 +CONFIG_PCI_PASID=y
2195 +# CONFIG_PCI_P2PDMA is not set
2196 +CONFIG_PCI_LABEL=y
2197 +CONFIG_PCI_HYPERV=m
2198 +# CONFIG_PCIE_BUS_TUNE_OFF is not set
2199 +CONFIG_PCIE_BUS_DEFAULT=y
2200 +# CONFIG_PCIE_BUS_SAFE is not set
2201 +# CONFIG_PCIE_BUS_PERFORMANCE is not set
2202 +# CONFIG_PCIE_BUS_PEER2PEER is not set
2203 +CONFIG_HOTPLUG_PCI=y
2204 +CONFIG_HOTPLUG_PCI_ACPI=y
2205 +CONFIG_HOTPLUG_PCI_ACPI_IBM=m
2206 +CONFIG_HOTPLUG_PCI_CPCI=y
2207 +CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m
2208 +CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m
2209 +CONFIG_HOTPLUG_PCI_SHPC=y
2212 +# PCI controller drivers
2214 +CONFIG_VMD=m
2215 +CONFIG_PCI_HYPERV_INTERFACE=m
2218 +# DesignWare PCI Core Support
2220 +CONFIG_PCIE_DW=y
2221 +CONFIG_PCIE_DW_HOST=y
2222 +CONFIG_PCIE_DW_EP=y
2223 +CONFIG_PCIE_DW_PLAT=y
2224 +CONFIG_PCIE_DW_PLAT_HOST=y
2225 +CONFIG_PCIE_DW_PLAT_EP=y
2226 +# CONFIG_PCI_MESON is not set
2227 +# end of DesignWare PCI Core Support
2230 +# Mobiveil PCIe Core Support
2232 +# end of Mobiveil PCIe Core Support
2235 +# Cadence PCIe controllers support
2237 +# end of Cadence PCIe controllers support
2238 +# end of PCI controller drivers
2241 +# PCI Endpoint
2243 +CONFIG_PCI_ENDPOINT=y
2244 +CONFIG_PCI_ENDPOINT_CONFIGFS=y
2245 +# CONFIG_PCI_EPF_TEST is not set
2246 +CONFIG_PCI_EPF_NTB=m
2247 +# end of PCI Endpoint
2250 +# PCI switch controller drivers
2252 +CONFIG_PCI_SW_SWITCHTEC=m
2253 +# end of PCI switch controller drivers
2255 +CONFIG_CXL_BUS=m
2256 +CONFIG_CXL_MEM=m
2257 +# CONFIG_CXL_MEM_RAW_COMMANDS is not set
2258 +CONFIG_PCCARD=m
2259 +CONFIG_PCMCIA=m
2260 +CONFIG_PCMCIA_LOAD_CIS=y
2261 +CONFIG_CARDBUS=y
2264 +# PC-card bridges
2266 +CONFIG_YENTA=m
2267 +CONFIG_YENTA_O2=y
2268 +CONFIG_YENTA_RICOH=y
2269 +CONFIG_YENTA_TI=y
2270 +CONFIG_YENTA_ENE_TUNE=y
2271 +CONFIG_YENTA_TOSHIBA=y
2272 +CONFIG_PD6729=m
2273 +CONFIG_I82092=m
2274 +CONFIG_PCCARD_NONSTATIC=y
2275 +CONFIG_RAPIDIO=y
2276 +CONFIG_RAPIDIO_TSI721=m
2277 +CONFIG_RAPIDIO_DISC_TIMEOUT=30
2278 +# CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS is not set
2279 +CONFIG_RAPIDIO_DMA_ENGINE=y
2280 +# CONFIG_RAPIDIO_DEBUG is not set
2281 +CONFIG_RAPIDIO_ENUM_BASIC=m
2282 +CONFIG_RAPIDIO_CHMAN=m
2283 +CONFIG_RAPIDIO_MPORT_CDEV=m
2286 +# RapidIO Switch drivers
2288 +CONFIG_RAPIDIO_TSI57X=m
2289 +CONFIG_RAPIDIO_CPS_XX=m
2290 +CONFIG_RAPIDIO_TSI568=m
2291 +CONFIG_RAPIDIO_CPS_GEN2=m
2292 +CONFIG_RAPIDIO_RXS_GEN3=m
2293 +# end of RapidIO Switch drivers
2296 +# Generic Driver Options
2298 +CONFIG_AUXILIARY_BUS=y
2299 +CONFIG_UEVENT_HELPER=y
2300 +CONFIG_UEVENT_HELPER_PATH=""
2301 +CONFIG_DEVTMPFS=y
2302 +CONFIG_DEVTMPFS_MOUNT=y
2303 +# CONFIG_STANDALONE is not set
2304 +CONFIG_PREVENT_FIRMWARE_BUILD=y
2307 +# Firmware loader
2309 +CONFIG_FW_LOADER=y
2310 +CONFIG_FW_LOADER_PAGED_BUF=y
2311 +CONFIG_EXTRA_FIRMWARE=""
2312 +CONFIG_FW_LOADER_USER_HELPER=y
2313 +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set
2314 +CONFIG_FW_LOADER_COMPRESS=y
2315 +CONFIG_FW_CACHE=y
2316 +# end of Firmware loader
2318 +CONFIG_WANT_DEV_COREDUMP=y
2319 +CONFIG_ALLOW_DEV_COREDUMP=y
2320 +CONFIG_DEV_COREDUMP=y
2321 +# CONFIG_DEBUG_DRIVER is not set
2322 +# CONFIG_DEBUG_DEVRES is not set
2323 +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set
2324 +CONFIG_HMEM_REPORTING=y
2325 +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set
2326 +CONFIG_SYS_HYPERVISOR=y
2327 +CONFIG_GENERIC_CPU_AUTOPROBE=y
2328 +CONFIG_GENERIC_CPU_VULNERABILITIES=y
2329 +CONFIG_REGMAP=y
2330 +CONFIG_REGMAP_I2C=y
2331 +CONFIG_REGMAP_SLIMBUS=m
2332 +CONFIG_REGMAP_SPI=y
2333 +CONFIG_REGMAP_SPMI=m
2334 +CONFIG_REGMAP_W1=m
2335 +CONFIG_REGMAP_MMIO=y
2336 +CONFIG_REGMAP_IRQ=y
2337 +CONFIG_REGMAP_SOUNDWIRE=m
2338 +CONFIG_REGMAP_SCCB=m
2339 +CONFIG_REGMAP_I3C=m
2340 +CONFIG_REGMAP_SPI_AVMM=m
2341 +CONFIG_DMA_SHARED_BUFFER=y
2342 +# CONFIG_DMA_FENCE_TRACE is not set
2343 +# end of Generic Driver Options
2346 +# Bus devices
2348 +CONFIG_MHI_BUS=m
2349 +# CONFIG_MHI_BUS_DEBUG is not set
2350 +CONFIG_MHI_BUS_PCI_GENERIC=m
2351 +# end of Bus devices
2353 +CONFIG_CONNECTOR=y
2354 +CONFIG_PROC_EVENTS=y
2355 +CONFIG_GNSS=m
2356 +CONFIG_GNSS_SERIAL=m
2357 +CONFIG_GNSS_MTK_SERIAL=m
2358 +CONFIG_GNSS_SIRF_SERIAL=m
2359 +CONFIG_GNSS_UBX_SERIAL=m
2360 +CONFIG_MTD=m
2361 +# CONFIG_MTD_TESTS is not set
2364 +# Partition parsers
2366 +CONFIG_MTD_AR7_PARTS=m
2367 +CONFIG_MTD_CMDLINE_PARTS=m
2368 +CONFIG_MTD_REDBOOT_PARTS=m
2369 +CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
2370 +# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
2371 +# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
2372 +# end of Partition parsers
2375 +# User Modules And Translation Layers
2377 +CONFIG_MTD_BLKDEVS=m
2378 +CONFIG_MTD_BLOCK=m
2379 +CONFIG_MTD_BLOCK_RO=m
2380 +CONFIG_FTL=m
2381 +CONFIG_NFTL=m
2382 +CONFIG_NFTL_RW=y
2383 +CONFIG_INFTL=m
2384 +CONFIG_RFD_FTL=m
2385 +CONFIG_SSFDC=m
2386 +CONFIG_SM_FTL=m
2387 +CONFIG_MTD_OOPS=m
2388 +CONFIG_MTD_PSTORE=m
2389 +CONFIG_MTD_SWAP=m
2390 +# CONFIG_MTD_PARTITIONED_MASTER is not set
2393 +# RAM/ROM/Flash chip drivers
2395 +CONFIG_MTD_CFI=m
2396 +CONFIG_MTD_JEDECPROBE=m
2397 +CONFIG_MTD_GEN_PROBE=m
2398 +# CONFIG_MTD_CFI_ADV_OPTIONS is not set
2399 +CONFIG_MTD_MAP_BANK_WIDTH_1=y
2400 +CONFIG_MTD_MAP_BANK_WIDTH_2=y
2401 +CONFIG_MTD_MAP_BANK_WIDTH_4=y
2402 +CONFIG_MTD_CFI_I1=y
2403 +CONFIG_MTD_CFI_I2=y
2404 +CONFIG_MTD_CFI_INTELEXT=m
2405 +CONFIG_MTD_CFI_AMDSTD=m
2406 +CONFIG_MTD_CFI_STAA=m
2407 +CONFIG_MTD_CFI_UTIL=m
2408 +CONFIG_MTD_RAM=m
2409 +CONFIG_MTD_ROM=m
2410 +CONFIG_MTD_ABSENT=m
2411 +# end of RAM/ROM/Flash chip drivers
2414 +# Mapping drivers for chip access
2416 +CONFIG_MTD_COMPLEX_MAPPINGS=y
2417 +CONFIG_MTD_PHYSMAP=m
2418 +# CONFIG_MTD_PHYSMAP_COMPAT is not set
2419 +CONFIG_MTD_PHYSMAP_GPIO_ADDR=y
2420 +CONFIG_MTD_SBC_GXX=m
2421 +CONFIG_MTD_AMD76XROM=m
2422 +CONFIG_MTD_ICHXROM=m
2423 +CONFIG_MTD_ESB2ROM=m
2424 +CONFIG_MTD_CK804XROM=m
2425 +CONFIG_MTD_SCB2_FLASH=m
2426 +CONFIG_MTD_NETtel=m
2427 +CONFIG_MTD_L440GX=m
2428 +CONFIG_MTD_PCI=m
2429 +CONFIG_MTD_PCMCIA=m
2430 +# CONFIG_MTD_PCMCIA_ANONYMOUS is not set
2431 +CONFIG_MTD_INTEL_VR_NOR=m
2432 +CONFIG_MTD_PLATRAM=m
2433 +# end of Mapping drivers for chip access
2436 +# Self-contained MTD device drivers
2438 +CONFIG_MTD_PMC551=m
2439 +# CONFIG_MTD_PMC551_BUGFIX is not set
2440 +# CONFIG_MTD_PMC551_DEBUG is not set
2441 +CONFIG_MTD_DATAFLASH=m
2442 +# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
2443 +CONFIG_MTD_DATAFLASH_OTP=y
2444 +CONFIG_MTD_MCHP23K256=m
2445 +CONFIG_MTD_SST25L=m
2446 +CONFIG_MTD_SLRAM=m
2447 +CONFIG_MTD_PHRAM=m
2448 +CONFIG_MTD_MTDRAM=m
2449 +CONFIG_MTDRAM_TOTAL_SIZE=4096
2450 +CONFIG_MTDRAM_ERASE_SIZE=128
2451 +CONFIG_MTD_BLOCK2MTD=m
2454 +# Disk-On-Chip Device Drivers
2456 +# CONFIG_MTD_DOCG3 is not set
2457 +# end of Self-contained MTD device drivers
2460 +# NAND
2462 +CONFIG_MTD_NAND_CORE=m
2463 +CONFIG_MTD_ONENAND=m
2464 +CONFIG_MTD_ONENAND_VERIFY_WRITE=y
2465 +CONFIG_MTD_ONENAND_GENERIC=m
2466 +# CONFIG_MTD_ONENAND_OTP is not set
2467 +CONFIG_MTD_ONENAND_2X_PROGRAM=y
2468 +CONFIG_MTD_RAW_NAND=m
2471 +# Raw/parallel NAND flash controllers
2473 +CONFIG_MTD_NAND_DENALI=m
2474 +CONFIG_MTD_NAND_DENALI_PCI=m
2475 +CONFIG_MTD_NAND_CAFE=m
2476 +CONFIG_MTD_NAND_MXIC=m
2477 +CONFIG_MTD_NAND_GPIO=m
2478 +CONFIG_MTD_NAND_PLATFORM=m
2479 +CONFIG_MTD_NAND_ARASAN=m
2482 +# Misc
2484 +CONFIG_MTD_SM_COMMON=m
2485 +CONFIG_MTD_NAND_NANDSIM=m
2486 +CONFIG_MTD_NAND_RICOH=m
2487 +CONFIG_MTD_NAND_DISKONCHIP=m
2488 +# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
2489 +CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
2490 +# CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE is not set
2491 +CONFIG_MTD_SPI_NAND=m
2494 +# ECC engine support
2496 +CONFIG_MTD_NAND_ECC=y
2497 +CONFIG_MTD_NAND_ECC_SW_HAMMING=y
2498 +# CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC is not set
2499 +CONFIG_MTD_NAND_ECC_SW_BCH=y
2500 +# end of ECC engine support
2501 +# end of NAND
2504 +# LPDDR & LPDDR2 PCM memory drivers
2506 +CONFIG_MTD_LPDDR=m
2507 +CONFIG_MTD_QINFO_PROBE=m
2508 +# end of LPDDR & LPDDR2 PCM memory drivers
2510 +CONFIG_MTD_SPI_NOR=m
2511 +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y
2512 +# CONFIG_MTD_SPI_NOR_SWP_DISABLE is not set
2513 +CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE=y
2514 +# CONFIG_MTD_SPI_NOR_SWP_KEEP is not set
2515 +# CONFIG_SPI_INTEL_SPI_PCI is not set
2516 +# CONFIG_SPI_INTEL_SPI_PLATFORM is not set
2517 +CONFIG_MTD_UBI=m
2518 +CONFIG_MTD_UBI_WL_THRESHOLD=4096
2519 +CONFIG_MTD_UBI_BEB_LIMIT=20
2520 +CONFIG_MTD_UBI_FASTMAP=y
2521 +CONFIG_MTD_UBI_GLUEBI=m
2522 +CONFIG_MTD_UBI_BLOCK=y
2523 +CONFIG_MTD_HYPERBUS=m
2524 +# CONFIG_OF is not set
2525 +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y
2526 +CONFIG_PARPORT=m
2527 +CONFIG_PARPORT_PC=m
2528 +CONFIG_PARPORT_SERIAL=m
2529 +CONFIG_PARPORT_PC_FIFO=y
2530 +# CONFIG_PARPORT_PC_SUPERIO is not set
2531 +CONFIG_PARPORT_PC_PCMCIA=m
2532 +CONFIG_PARPORT_AX88796=m
2533 +CONFIG_PARPORT_1284=y
2534 +CONFIG_PARPORT_NOT_PC=y
2535 +CONFIG_PNP=y
2536 +# CONFIG_PNP_DEBUG_MESSAGES is not set
2539 +# Protocols
2541 +CONFIG_PNPACPI=y
2542 +CONFIG_BLK_DEV=y
2543 +CONFIG_BLK_DEV_NULL_BLK=m
2544 +CONFIG_BLK_DEV_FD=m
2545 +CONFIG_CDROM=y
2546 +CONFIG_PARIDE=m
2549 +# Parallel IDE high-level drivers
2551 +CONFIG_PARIDE_PD=m
2552 +CONFIG_PARIDE_PCD=m
2553 +CONFIG_PARIDE_PF=m
2554 +CONFIG_PARIDE_PT=m
2555 +CONFIG_PARIDE_PG=m
2558 +# Parallel IDE protocol modules
2560 +CONFIG_PARIDE_ATEN=m
2561 +CONFIG_PARIDE_BPCK=m
2562 +CONFIG_PARIDE_COMM=m
2563 +CONFIG_PARIDE_DSTR=m
2564 +CONFIG_PARIDE_FIT2=m
2565 +CONFIG_PARIDE_FIT3=m
2566 +CONFIG_PARIDE_EPAT=m
2567 +CONFIG_PARIDE_EPATC8=y
2568 +CONFIG_PARIDE_EPIA=m
2569 +CONFIG_PARIDE_FRIQ=m
2570 +CONFIG_PARIDE_FRPW=m
2571 +CONFIG_PARIDE_KBIC=m
2572 +CONFIG_PARIDE_KTTI=m
2573 +CONFIG_PARIDE_ON20=m
2574 +CONFIG_PARIDE_ON26=m
2575 +CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m
2576 +CONFIG_ZRAM=m
2577 +CONFIG_ZRAM_DEF_COMP_LZORLE=y
2578 +# CONFIG_ZRAM_DEF_COMP_ZSTD is not set
2579 +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set
2580 +# CONFIG_ZRAM_DEF_COMP_LZO is not set
2581 +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set
2582 +# CONFIG_ZRAM_DEF_COMP_842 is not set
2583 +CONFIG_ZRAM_DEF_COMP="lzo-rle"
2584 +CONFIG_ZRAM_WRITEBACK=y
2585 +CONFIG_ZRAM_MEMORY_TRACKING=y
2586 +CONFIG_BLK_DEV_UMEM=m
2587 +CONFIG_BLK_DEV_LOOP=y
2588 +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
2589 +CONFIG_BLK_DEV_CRYPTOLOOP=m
2590 +CONFIG_BLK_DEV_DRBD=m
2591 +# CONFIG_DRBD_FAULT_INJECTION is not set
2592 +CONFIG_BLK_DEV_NBD=m
2593 +CONFIG_BLK_DEV_SX8=m
2594 +CONFIG_BLK_DEV_RAM=m
2595 +CONFIG_BLK_DEV_RAM_COUNT=16
2596 +CONFIG_BLK_DEV_RAM_SIZE=65536
2597 +CONFIG_CDROM_PKTCDVD=m
2598 +CONFIG_CDROM_PKTCDVD_BUFFERS=8
2599 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set
2600 +CONFIG_ATA_OVER_ETH=m
2601 +CONFIG_XEN_BLKDEV_FRONTEND=y
2602 +CONFIG_XEN_BLKDEV_BACKEND=m
2603 +CONFIG_VIRTIO_BLK=m
2604 +CONFIG_BLK_DEV_RBD=m
2605 +CONFIG_BLK_DEV_RSXX=m
2606 +CONFIG_BLK_DEV_RNBD=y
2607 +CONFIG_BLK_DEV_RNBD_CLIENT=m
2608 +CONFIG_BLK_DEV_RNBD_SERVER=m
2611 +# NVME Support
2613 +CONFIG_NVME_CORE=m
2614 +CONFIG_BLK_DEV_NVME=m
2615 +CONFIG_NVME_MULTIPATH=y
2616 +CONFIG_NVME_HWMON=y
2617 +CONFIG_NVME_FABRICS=m
2618 +CONFIG_NVME_RDMA=m
2619 +CONFIG_NVME_FC=m
2620 +CONFIG_NVME_TCP=m
2621 +CONFIG_NVME_TARGET=m
2622 +CONFIG_NVME_TARGET_PASSTHRU=y
2623 +CONFIG_NVME_TARGET_LOOP=m
2624 +CONFIG_NVME_TARGET_RDMA=m
2625 +CONFIG_NVME_TARGET_FC=m
2626 +# CONFIG_NVME_TARGET_FCLOOP is not set
2627 +CONFIG_NVME_TARGET_TCP=m
2628 +# end of NVME Support
2631 +# Misc devices
2633 +CONFIG_SENSORS_LIS3LV02D=m
2634 +CONFIG_AD525X_DPOT=m
2635 +CONFIG_AD525X_DPOT_I2C=m
2636 +CONFIG_AD525X_DPOT_SPI=m
2637 +CONFIG_DUMMY_IRQ=m
2638 +CONFIG_IBM_ASM=m
2639 +CONFIG_PHANTOM=m
2640 +CONFIG_TIFM_CORE=m
2641 +CONFIG_TIFM_7XX1=m
2642 +CONFIG_ICS932S401=m
2643 +CONFIG_ENCLOSURE_SERVICES=m
2644 +CONFIG_SGI_XP=m
2645 +CONFIG_HP_ILO=m
2646 +CONFIG_SGI_GRU=m
2647 +# CONFIG_SGI_GRU_DEBUG is not set
2648 +CONFIG_APDS9802ALS=m
2649 +CONFIG_ISL29003=m
2650 +CONFIG_ISL29020=m
2651 +CONFIG_SENSORS_TSL2550=m
2652 +CONFIG_SENSORS_BH1770=m
2653 +CONFIG_SENSORS_APDS990X=m
2654 +CONFIG_HMC6352=m
2655 +CONFIG_DS1682=m
2656 +CONFIG_VMWARE_BALLOON=m
2657 +CONFIG_LATTICE_ECP3_CONFIG=m
2658 +CONFIG_SRAM=y
2659 +# CONFIG_PCI_ENDPOINT_TEST is not set
2660 +CONFIG_XILINX_SDFEC=m
2661 +CONFIG_MISC_RTSX=m
2662 +CONFIG_PVPANIC=m
2663 +CONFIG_C2PORT=m
2664 +CONFIG_C2PORT_DURAMAR_2150=m
2667 +# EEPROM support
2669 +CONFIG_EEPROM_AT24=m
2670 +CONFIG_EEPROM_AT25=m
2671 +CONFIG_EEPROM_LEGACY=m
2672 +CONFIG_EEPROM_MAX6875=m
2673 +CONFIG_EEPROM_93CX6=m
2674 +CONFIG_EEPROM_93XX46=m
2675 +CONFIG_EEPROM_IDT_89HPESX=m
2676 +CONFIG_EEPROM_EE1004=m
2677 +# end of EEPROM support
2679 +CONFIG_CB710_CORE=m
2680 +# CONFIG_CB710_DEBUG is not set
2681 +CONFIG_CB710_DEBUG_ASSUMPTIONS=y
2684 +# Texas Instruments shared transport line discipline
2686 +CONFIG_TI_ST=m
2687 +# end of Texas Instruments shared transport line discipline
2689 +CONFIG_SENSORS_LIS3_I2C=m
2690 +CONFIG_ALTERA_STAPL=m
2691 +CONFIG_INTEL_MEI=m
2692 +CONFIG_INTEL_MEI_ME=m
2693 +CONFIG_INTEL_MEI_TXE=m
2694 +CONFIG_INTEL_MEI_HDCP=m
2695 +CONFIG_VMWARE_VMCI=m
2696 +CONFIG_GENWQE=m
2697 +CONFIG_GENWQE_PLATFORM_ERROR_RECOVERY=0
2698 +CONFIG_ECHO=m
2699 +CONFIG_BCM_VK=m
2700 +CONFIG_BCM_VK_TTY=y
2701 +CONFIG_MISC_ALCOR_PCI=m
2702 +CONFIG_MISC_RTSX_PCI=m
2703 +CONFIG_MISC_RTSX_USB=m
2704 +CONFIG_HABANA_AI=m
2705 +CONFIG_UACCE=m
2706 +# end of Misc devices
2708 +CONFIG_HAVE_IDE=y
2709 +# CONFIG_IDE is not set
2712 +# SCSI device support
2714 +CONFIG_SCSI_MOD=y
2715 +CONFIG_RAID_ATTRS=m
2716 +CONFIG_SCSI=y
2717 +CONFIG_SCSI_DMA=y
2718 +CONFIG_SCSI_NETLINK=y
2719 +CONFIG_SCSI_PROC_FS=y
2722 +# SCSI support type (disk, tape, CD-ROM)
2724 +CONFIG_BLK_DEV_SD=y
2725 +CONFIG_CHR_DEV_ST=m
2726 +CONFIG_BLK_DEV_SR=y
2727 +CONFIG_CHR_DEV_SG=y
2728 +CONFIG_CHR_DEV_SCH=m
2729 +CONFIG_SCSI_ENCLOSURE=m
2730 +CONFIG_SCSI_CONSTANTS=y
2731 +CONFIG_SCSI_LOGGING=y
2732 +CONFIG_SCSI_SCAN_ASYNC=y
2735 +# SCSI Transports
2737 +CONFIG_SCSI_SPI_ATTRS=m
2738 +CONFIG_SCSI_FC_ATTRS=m
2739 +CONFIG_SCSI_ISCSI_ATTRS=m
2740 +CONFIG_SCSI_SAS_ATTRS=m
2741 +CONFIG_SCSI_SAS_LIBSAS=m
2742 +CONFIG_SCSI_SAS_ATA=y
2743 +CONFIG_SCSI_SAS_HOST_SMP=y
2744 +CONFIG_SCSI_SRP_ATTRS=m
2745 +# end of SCSI Transports
2747 +CONFIG_SCSI_LOWLEVEL=y
2748 +CONFIG_ISCSI_TCP=m
2749 +CONFIG_ISCSI_BOOT_SYSFS=m
2750 +CONFIG_SCSI_CXGB3_ISCSI=m
2751 +CONFIG_SCSI_CXGB4_ISCSI=m
2752 +CONFIG_SCSI_BNX2_ISCSI=m
2753 +CONFIG_SCSI_BNX2X_FCOE=m
2754 +CONFIG_BE2ISCSI=m
2755 +CONFIG_BLK_DEV_3W_XXXX_RAID=m
2756 +CONFIG_SCSI_HPSA=m
2757 +CONFIG_SCSI_3W_9XXX=m
2758 +CONFIG_SCSI_3W_SAS=m
2759 +CONFIG_SCSI_ACARD=m
2760 +CONFIG_SCSI_AACRAID=m
2761 +CONFIG_SCSI_AIC7XXX=m
2762 +CONFIG_AIC7XXX_CMDS_PER_DEVICE=8
2763 +CONFIG_AIC7XXX_RESET_DELAY_MS=5000
2764 +# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
2765 +CONFIG_AIC7XXX_DEBUG_MASK=0
2766 +CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
2767 +CONFIG_SCSI_AIC79XX=m
2768 +CONFIG_AIC79XX_CMDS_PER_DEVICE=32
2769 +CONFIG_AIC79XX_RESET_DELAY_MS=5000
2770 +# CONFIG_AIC79XX_DEBUG_ENABLE is not set
2771 +CONFIG_AIC79XX_DEBUG_MASK=0
2772 +CONFIG_AIC79XX_REG_PRETTY_PRINT=y
2773 +CONFIG_SCSI_AIC94XX=m
2774 +# CONFIG_AIC94XX_DEBUG is not set
2775 +CONFIG_SCSI_MVSAS=m
2776 +# CONFIG_SCSI_MVSAS_DEBUG is not set
2777 +# CONFIG_SCSI_MVSAS_TASKLET is not set
2778 +CONFIG_SCSI_MVUMI=m
2779 +CONFIG_SCSI_DPT_I2O=m
2780 +CONFIG_SCSI_ADVANSYS=m
2781 +CONFIG_SCSI_ARCMSR=m
2782 +CONFIG_SCSI_ESAS2R=m
2783 +CONFIG_MEGARAID_NEWGEN=y
2784 +CONFIG_MEGARAID_MM=m
2785 +CONFIG_MEGARAID_MAILBOX=m
2786 +CONFIG_MEGARAID_LEGACY=m
2787 +CONFIG_MEGARAID_SAS=m
2788 +CONFIG_SCSI_MPT3SAS=m
2789 +CONFIG_SCSI_MPT2SAS_MAX_SGE=128
2790 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128
2791 +CONFIG_SCSI_MPT2SAS=m
2792 +CONFIG_SCSI_SMARTPQI=m
2793 +CONFIG_SCSI_UFSHCD=m
2794 +CONFIG_SCSI_UFSHCD_PCI=m
2795 +CONFIG_SCSI_UFS_DWC_TC_PCI=m
2796 +CONFIG_SCSI_UFSHCD_PLATFORM=m
2797 +CONFIG_SCSI_UFS_CDNS_PLATFORM=m
2798 +CONFIG_SCSI_UFS_DWC_TC_PLATFORM=m
2799 +CONFIG_SCSI_UFS_BSG=y
2800 +CONFIG_SCSI_UFS_CRYPTO=y
2801 +CONFIG_SCSI_HPTIOP=m
2802 +CONFIG_SCSI_BUSLOGIC=m
2803 +CONFIG_SCSI_FLASHPOINT=y
2804 +CONFIG_SCSI_MYRB=m
2805 +CONFIG_SCSI_MYRS=m
2806 +CONFIG_VMWARE_PVSCSI=m
2807 +CONFIG_XEN_SCSI_FRONTEND=m
2808 +CONFIG_HYPERV_STORAGE=m
2809 +CONFIG_LIBFC=m
2810 +CONFIG_LIBFCOE=m
2811 +CONFIG_FCOE=m
2812 +CONFIG_FCOE_FNIC=m
2813 +CONFIG_SCSI_SNIC=m
2814 +# CONFIG_SCSI_SNIC_DEBUG_FS is not set
2815 +CONFIG_SCSI_DMX3191D=m
2816 +CONFIG_SCSI_FDOMAIN=m
2817 +CONFIG_SCSI_FDOMAIN_PCI=m
2818 +CONFIG_SCSI_ISCI=m
2819 +CONFIG_SCSI_IPS=m
2820 +CONFIG_SCSI_INITIO=m
2821 +CONFIG_SCSI_INIA100=m
2822 +CONFIG_SCSI_PPA=m
2823 +CONFIG_SCSI_IMM=m
2824 +# CONFIG_SCSI_IZIP_EPP16 is not set
2825 +# CONFIG_SCSI_IZIP_SLOW_CTR is not set
2826 +CONFIG_SCSI_STEX=m
2827 +CONFIG_SCSI_SYM53C8XX_2=m
2828 +CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
2829 +CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
2830 +CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
2831 +CONFIG_SCSI_SYM53C8XX_MMIO=y
2832 +CONFIG_SCSI_IPR=m
2833 +CONFIG_SCSI_IPR_TRACE=y
2834 +CONFIG_SCSI_IPR_DUMP=y
2835 +CONFIG_SCSI_QLOGIC_1280=m
2836 +CONFIG_SCSI_QLA_FC=m
2837 +CONFIG_TCM_QLA2XXX=m
2838 +# CONFIG_TCM_QLA2XXX_DEBUG is not set
2839 +CONFIG_SCSI_QLA_ISCSI=m
2840 +CONFIG_QEDI=m
2841 +CONFIG_QEDF=m
2842 +CONFIG_SCSI_LPFC=m
2843 +# CONFIG_SCSI_LPFC_DEBUG_FS is not set
2844 +CONFIG_SCSI_DC395x=m
2845 +CONFIG_SCSI_AM53C974=m
2846 +CONFIG_SCSI_WD719X=m
2847 +CONFIG_SCSI_DEBUG=m
2848 +CONFIG_SCSI_PMCRAID=m
2849 +CONFIG_SCSI_PM8001=m
2850 +CONFIG_SCSI_BFA_FC=m
2851 +CONFIG_SCSI_VIRTIO=m
2852 +CONFIG_SCSI_CHELSIO_FCOE=m
2853 +CONFIG_SCSI_LOWLEVEL_PCMCIA=y
2854 +CONFIG_PCMCIA_AHA152X=m
2855 +CONFIG_PCMCIA_FDOMAIN=m
2856 +CONFIG_PCMCIA_QLOGIC=m
2857 +CONFIG_PCMCIA_SYM53C500=m
2858 +CONFIG_SCSI_DH=y
2859 +CONFIG_SCSI_DH_RDAC=m
2860 +CONFIG_SCSI_DH_HP_SW=m
2861 +CONFIG_SCSI_DH_EMC=m
2862 +CONFIG_SCSI_DH_ALUA=m
2863 +# end of SCSI device support
2865 +CONFIG_ATA=y
2866 +CONFIG_SATA_HOST=y
2867 +CONFIG_PATA_TIMINGS=y
2868 +CONFIG_ATA_VERBOSE_ERROR=y
2869 +CONFIG_ATA_FORCE=y
2870 +CONFIG_ATA_ACPI=y
2871 +CONFIG_SATA_ZPODD=y
2872 +CONFIG_SATA_PMP=y
2875 +# Controllers with non-SFF native interface
2877 +CONFIG_SATA_AHCI=m
2878 +CONFIG_SATA_MOBILE_LPM_POLICY=3
2879 +CONFIG_SATA_AHCI_PLATFORM=m
2880 +CONFIG_SATA_INIC162X=m
2881 +CONFIG_SATA_ACARD_AHCI=m
2882 +CONFIG_SATA_SIL24=m
2883 +CONFIG_ATA_SFF=y
2886 +# SFF controllers with custom DMA interface
2888 +CONFIG_PDC_ADMA=m
2889 +CONFIG_SATA_QSTOR=m
2890 +CONFIG_SATA_SX4=m
2891 +CONFIG_ATA_BMDMA=y
2894 +# SATA SFF controllers with BMDMA
2896 +CONFIG_ATA_PIIX=y
2897 +CONFIG_SATA_DWC=m
2898 +CONFIG_SATA_DWC_OLD_DMA=y
2899 +# CONFIG_SATA_DWC_DEBUG is not set
2900 +CONFIG_SATA_MV=m
2901 +CONFIG_SATA_NV=m
2902 +CONFIG_SATA_PROMISE=m
2903 +CONFIG_SATA_SIL=m
2904 +CONFIG_SATA_SIS=m
2905 +CONFIG_SATA_SVW=m
2906 +CONFIG_SATA_ULI=m
2907 +CONFIG_SATA_VIA=m
2908 +CONFIG_SATA_VITESSE=m
2911 +# PATA SFF controllers with BMDMA
2913 +CONFIG_PATA_ALI=m
2914 +CONFIG_PATA_AMD=m
2915 +CONFIG_PATA_ARTOP=m
2916 +CONFIG_PATA_ATIIXP=m
2917 +CONFIG_PATA_ATP867X=m
2918 +CONFIG_PATA_CMD64X=m
2919 +CONFIG_PATA_CYPRESS=m
2920 +CONFIG_PATA_EFAR=m
2921 +CONFIG_PATA_HPT366=m
2922 +CONFIG_PATA_HPT37X=m
2923 +CONFIG_PATA_HPT3X2N=m
2924 +CONFIG_PATA_HPT3X3=m
2925 +# CONFIG_PATA_HPT3X3_DMA is not set
2926 +CONFIG_PATA_IT8213=m
2927 +CONFIG_PATA_IT821X=m
2928 +CONFIG_PATA_JMICRON=m
2929 +CONFIG_PATA_MARVELL=m
2930 +CONFIG_PATA_NETCELL=m
2931 +CONFIG_PATA_NINJA32=m
2932 +CONFIG_PATA_NS87415=m
2933 +CONFIG_PATA_OLDPIIX=m
2934 +CONFIG_PATA_OPTIDMA=m
2935 +CONFIG_PATA_PDC2027X=m
2936 +CONFIG_PATA_PDC_OLD=m
2937 +CONFIG_PATA_RADISYS=m
2938 +CONFIG_PATA_RDC=m
2939 +CONFIG_PATA_SCH=m
2940 +CONFIG_PATA_SERVERWORKS=m
2941 +CONFIG_PATA_SIL680=m
2942 +CONFIG_PATA_SIS=y
2943 +CONFIG_PATA_TOSHIBA=m
2944 +CONFIG_PATA_TRIFLEX=m
2945 +CONFIG_PATA_VIA=m
2946 +CONFIG_PATA_WINBOND=m
2949 +# PIO-only SFF controllers
2951 +CONFIG_PATA_CMD640_PCI=m
2952 +CONFIG_PATA_MPIIX=m
2953 +CONFIG_PATA_NS87410=m
2954 +CONFIG_PATA_OPTI=m
2955 +CONFIG_PATA_PCMCIA=m
2956 +CONFIG_PATA_PLATFORM=m
2957 +CONFIG_PATA_RZ1000=m
2960 +# Generic fallback / legacy drivers
2962 +CONFIG_PATA_ACPI=m
2963 +CONFIG_ATA_GENERIC=y
2964 +CONFIG_PATA_LEGACY=m
2965 +CONFIG_MD=y
2966 +CONFIG_BLK_DEV_MD=y
2967 +CONFIG_MD_AUTODETECT=y
2968 +CONFIG_MD_LINEAR=m
2969 +CONFIG_MD_RAID0=m
2970 +CONFIG_MD_RAID1=m
2971 +CONFIG_MD_RAID10=m
2972 +CONFIG_MD_RAID456=m
2973 +CONFIG_MD_MULTIPATH=m
2974 +CONFIG_MD_FAULTY=m
2975 +CONFIG_MD_CLUSTER=m
2976 +CONFIG_BCACHE=m
2977 +# CONFIG_BCACHE_DEBUG is not set
2978 +# CONFIG_BCACHE_CLOSURES_DEBUG is not set
2979 +CONFIG_BCACHE_ASYNC_REGISTRATION=y
2980 +CONFIG_BLK_DEV_DM_BUILTIN=y
2981 +CONFIG_BLK_DEV_DM=y
2982 +# CONFIG_DM_DEBUG is not set
2983 +CONFIG_DM_BUFIO=m
2984 +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set
2985 +CONFIG_DM_BIO_PRISON=m
2986 +CONFIG_DM_PERSISTENT_DATA=m
2987 +CONFIG_DM_UNSTRIPED=m
2988 +CONFIG_DM_CRYPT=m
2989 +CONFIG_DM_SNAPSHOT=m
2990 +CONFIG_DM_THIN_PROVISIONING=m
2991 +CONFIG_DM_CACHE=m
2992 +CONFIG_DM_CACHE_SMQ=m
2993 +CONFIG_DM_WRITECACHE=m
2994 +CONFIG_DM_EBS=m
2995 +CONFIG_DM_ERA=m
2996 +CONFIG_DM_CLONE=m
2997 +CONFIG_DM_MIRROR=m
2998 +CONFIG_DM_LOG_USERSPACE=m
2999 +CONFIG_DM_RAID=m
3000 +CONFIG_DM_ZERO=m
3001 +CONFIG_DM_MULTIPATH=m
3002 +CONFIG_DM_MULTIPATH_QL=m
3003 +CONFIG_DM_MULTIPATH_ST=m
3004 +CONFIG_DM_MULTIPATH_HST=m
3005 +CONFIG_DM_MULTIPATH_IOA=m
3006 +CONFIG_DM_DELAY=m
3007 +# CONFIG_DM_DUST is not set
3008 +CONFIG_DM_INIT=y
3009 +CONFIG_DM_UEVENT=y
3010 +CONFIG_DM_FLAKEY=m
3011 +CONFIG_DM_VERITY=m
3012 +CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
3013 +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG_SECONDARY_KEYRING is not set
3014 +# CONFIG_DM_VERITY_FEC is not set
3015 +CONFIG_DM_SWITCH=m
3016 +CONFIG_DM_LOG_WRITES=m
3017 +CONFIG_DM_INTEGRITY=m
3018 +CONFIG_DM_ZONED=m
3019 +CONFIG_TARGET_CORE=m
3020 +CONFIG_TCM_IBLOCK=m
3021 +CONFIG_TCM_FILEIO=m
3022 +CONFIG_TCM_PSCSI=m
3023 +CONFIG_TCM_USER2=m
3024 +CONFIG_LOOPBACK_TARGET=m
3025 +CONFIG_TCM_FC=m
3026 +CONFIG_ISCSI_TARGET=m
3027 +CONFIG_ISCSI_TARGET_CXGB4=m
3028 +CONFIG_SBP_TARGET=m
3029 +CONFIG_FUSION=y
3030 +CONFIG_FUSION_SPI=m
3031 +CONFIG_FUSION_FC=m
3032 +CONFIG_FUSION_SAS=m
3033 +CONFIG_FUSION_MAX_SGE=128
3034 +CONFIG_FUSION_CTL=m
3035 +CONFIG_FUSION_LAN=m
3036 +CONFIG_FUSION_LOGGING=y
3039 +# IEEE 1394 (FireWire) support
3041 +CONFIG_FIREWIRE=m
3042 +CONFIG_FIREWIRE_OHCI=m
3043 +CONFIG_FIREWIRE_SBP2=m
3044 +CONFIG_FIREWIRE_NET=m
3045 +CONFIG_FIREWIRE_NOSY=m
3046 +# end of IEEE 1394 (FireWire) support
3048 +CONFIG_MACINTOSH_DRIVERS=y
3049 +CONFIG_MAC_EMUMOUSEBTN=m
3050 +CONFIG_NETDEVICES=y
3051 +CONFIG_MII=m
3052 +CONFIG_NET_CORE=y
3053 +CONFIG_BONDING=m
3054 +CONFIG_DUMMY=m
3055 +CONFIG_WIREGUARD=m
3056 +# CONFIG_WIREGUARD_DEBUG is not set
3057 +CONFIG_EQUALIZER=m
3058 +CONFIG_NET_FC=y
3059 +CONFIG_IFB=m
3060 +CONFIG_NET_TEAM=m
3061 +CONFIG_NET_TEAM_MODE_BROADCAST=m
3062 +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
3063 +CONFIG_NET_TEAM_MODE_RANDOM=m
3064 +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
3065 +CONFIG_NET_TEAM_MODE_LOADBALANCE=m
3066 +CONFIG_MACVLAN=m
3067 +CONFIG_MACVTAP=m
3068 +CONFIG_IPVLAN_L3S=y
3069 +CONFIG_IPVLAN=m
3070 +CONFIG_IPVTAP=m
3071 +CONFIG_VXLAN=m
3072 +CONFIG_GENEVE=m
3073 +CONFIG_BAREUDP=m
3074 +CONFIG_GTP=m
3075 +CONFIG_MACSEC=m
3076 +CONFIG_NETCONSOLE=m
3077 +CONFIG_NETCONSOLE_DYNAMIC=y
3078 +CONFIG_NETPOLL=y
3079 +CONFIG_NET_POLL_CONTROLLER=y
3080 +CONFIG_NTB_NETDEV=m
3081 +CONFIG_RIONET=m
3082 +CONFIG_RIONET_TX_SIZE=128
3083 +CONFIG_RIONET_RX_SIZE=128
3084 +CONFIG_TUN=y
3085 +CONFIG_TAP=m
3086 +# CONFIG_TUN_VNET_CROSS_LE is not set
3087 +CONFIG_VETH=m
3088 +CONFIG_VIRTIO_NET=m
3089 +CONFIG_NLMON=m
3090 +CONFIG_NET_VRF=m
3091 +CONFIG_VSOCKMON=m
3092 +CONFIG_MHI_NET=m
3093 +CONFIG_SUNGEM_PHY=m
3094 +CONFIG_ARCNET=m
3095 +CONFIG_ARCNET_1201=m
3096 +CONFIG_ARCNET_1051=m
3097 +CONFIG_ARCNET_RAW=m
3098 +CONFIG_ARCNET_CAP=m
3099 +CONFIG_ARCNET_COM90xx=m
3100 +CONFIG_ARCNET_COM90xxIO=m
3101 +CONFIG_ARCNET_RIM_I=m
3102 +CONFIG_ARCNET_COM20020=m
3103 +CONFIG_ARCNET_COM20020_PCI=m
3104 +CONFIG_ARCNET_COM20020_CS=m
3105 +CONFIG_ATM_DRIVERS=y
3106 +CONFIG_ATM_DUMMY=m
3107 +CONFIG_ATM_TCP=m
3108 +CONFIG_ATM_LANAI=m
3109 +CONFIG_ATM_ENI=m
3110 +# CONFIG_ATM_ENI_DEBUG is not set
3111 +# CONFIG_ATM_ENI_TUNE_BURST is not set
3112 +CONFIG_ATM_FIRESTREAM=m
3113 +CONFIG_ATM_ZATM=m
3114 +# CONFIG_ATM_ZATM_DEBUG is not set
3115 +CONFIG_ATM_NICSTAR=m
3116 +# CONFIG_ATM_NICSTAR_USE_SUNI is not set
3117 +# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set
3118 +CONFIG_ATM_IDT77252=m
3119 +# CONFIG_ATM_IDT77252_DEBUG is not set
3120 +# CONFIG_ATM_IDT77252_RCV_ALL is not set
3121 +CONFIG_ATM_IDT77252_USE_SUNI=y
3122 +CONFIG_ATM_AMBASSADOR=m
3123 +# CONFIG_ATM_AMBASSADOR_DEBUG is not set
3124 +CONFIG_ATM_HORIZON=m
3125 +# CONFIG_ATM_HORIZON_DEBUG is not set
3126 +CONFIG_ATM_IA=m
3127 +# CONFIG_ATM_IA_DEBUG is not set
3128 +CONFIG_ATM_FORE200E=m
3129 +# CONFIG_ATM_FORE200E_USE_TASKLET is not set
3130 +CONFIG_ATM_FORE200E_TX_RETRY=16
3131 +CONFIG_ATM_FORE200E_DEBUG=0
3132 +CONFIG_ATM_HE=m
3133 +CONFIG_ATM_HE_USE_SUNI=y
3134 +CONFIG_ATM_SOLOS=m
3135 +CONFIG_CAIF_DRIVERS=y
3136 +CONFIG_CAIF_TTY=m
3137 +CONFIG_CAIF_HSI=m
3138 +CONFIG_CAIF_VIRTIO=m
3141 +# Distributed Switch Architecture drivers
3143 +CONFIG_B53=m
3144 +CONFIG_B53_SPI_DRIVER=m
3145 +CONFIG_B53_MDIO_DRIVER=m
3146 +CONFIG_B53_MMAP_DRIVER=m
3147 +CONFIG_B53_SRAB_DRIVER=m
3148 +CONFIG_B53_SERDES=m
3149 +CONFIG_NET_DSA_BCM_SF2=m
3150 +# CONFIG_NET_DSA_LOOP is not set
3151 +CONFIG_NET_DSA_HIRSCHMANN_HELLCREEK=m
3152 +CONFIG_NET_DSA_LANTIQ_GSWIP=m
3153 +CONFIG_NET_DSA_MT7530=m
3154 +CONFIG_NET_DSA_MV88E6060=m
3155 +CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON=m
3156 +CONFIG_NET_DSA_MICROCHIP_KSZ9477=m
3157 +CONFIG_NET_DSA_MICROCHIP_KSZ9477_I2C=m
3158 +CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI=m
3159 +CONFIG_NET_DSA_MICROCHIP_KSZ8795=m
3160 +CONFIG_NET_DSA_MICROCHIP_KSZ8795_SPI=m
3161 +CONFIG_NET_DSA_MV88E6XXX=m
3162 +CONFIG_NET_DSA_MV88E6XXX_PTP=y
3163 +CONFIG_NET_DSA_MSCC_SEVILLE=m
3164 +CONFIG_NET_DSA_AR9331=m
3165 +CONFIG_NET_DSA_SJA1105=m
3166 +CONFIG_NET_DSA_SJA1105_PTP=y
3167 +CONFIG_NET_DSA_SJA1105_TAS=y
3168 +CONFIG_NET_DSA_SJA1105_VL=y
3169 +CONFIG_NET_DSA_XRS700X=m
3170 +CONFIG_NET_DSA_XRS700X_I2C=m
3171 +CONFIG_NET_DSA_XRS700X_MDIO=m
3172 +CONFIG_NET_DSA_QCA8K=m
3173 +CONFIG_NET_DSA_REALTEK_SMI=m
3174 +CONFIG_NET_DSA_SMSC_LAN9303=m
3175 +CONFIG_NET_DSA_SMSC_LAN9303_I2C=m
3176 +CONFIG_NET_DSA_SMSC_LAN9303_MDIO=m
3177 +CONFIG_NET_DSA_VITESSE_VSC73XX=m
3178 +CONFIG_NET_DSA_VITESSE_VSC73XX_SPI=m
3179 +CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM=m
3180 +# end of Distributed Switch Architecture drivers
3182 +CONFIG_ETHERNET=y
3183 +CONFIG_MDIO=m
3184 +CONFIG_NET_VENDOR_3COM=y
3185 +CONFIG_PCMCIA_3C574=m
3186 +CONFIG_PCMCIA_3C589=m
3187 +CONFIG_VORTEX=m
3188 +CONFIG_TYPHOON=m
3189 +CONFIG_NET_VENDOR_ADAPTEC=y
3190 +CONFIG_ADAPTEC_STARFIRE=m
3191 +CONFIG_NET_VENDOR_AGERE=y
3192 +CONFIG_ET131X=m
3193 +CONFIG_NET_VENDOR_ALACRITECH=y
3194 +CONFIG_SLICOSS=m
3195 +CONFIG_NET_VENDOR_ALTEON=y
3196 +CONFIG_ACENIC=m
3197 +# CONFIG_ACENIC_OMIT_TIGON_I is not set
3198 +CONFIG_ALTERA_TSE=m
3199 +CONFIG_NET_VENDOR_AMAZON=y
3200 +CONFIG_ENA_ETHERNET=m
3201 +CONFIG_NET_VENDOR_AMD=y
3202 +CONFIG_AMD8111_ETH=m
3203 +CONFIG_PCNET32=m
3204 +CONFIG_PCMCIA_NMCLAN=m
3205 +CONFIG_AMD_XGBE=m
3206 +CONFIG_AMD_XGBE_DCB=y
3207 +CONFIG_AMD_XGBE_HAVE_ECC=y
3208 +CONFIG_NET_VENDOR_AQUANTIA=y
3209 +CONFIG_AQTION=m
3210 +CONFIG_NET_VENDOR_ARC=y
3211 +CONFIG_NET_VENDOR_ATHEROS=y
3212 +CONFIG_ATL2=m
3213 +CONFIG_ATL1=m
3214 +CONFIG_ATL1E=m
3215 +CONFIG_ATL1C=m
3216 +CONFIG_ALX=m
3217 +CONFIG_NET_VENDOR_BROADCOM=y
3218 +CONFIG_B44=m
3219 +CONFIG_B44_PCI_AUTOSELECT=y
3220 +CONFIG_B44_PCICORE_AUTOSELECT=y
3221 +CONFIG_B44_PCI=y
3222 +CONFIG_BCMGENET=m
3223 +CONFIG_BNX2=m
3224 +CONFIG_CNIC=m
3225 +CONFIG_TIGON3=m
3226 +CONFIG_TIGON3_HWMON=y
3227 +CONFIG_BNX2X=m
3228 +CONFIG_BNX2X_SRIOV=y
3229 +CONFIG_SYSTEMPORT=m
3230 +CONFIG_BNXT=m
3231 +CONFIG_BNXT_SRIOV=y
3232 +CONFIG_BNXT_FLOWER_OFFLOAD=y
3233 +CONFIG_BNXT_DCB=y
3234 +CONFIG_BNXT_HWMON=y
3235 +CONFIG_NET_VENDOR_BROCADE=y
3236 +CONFIG_BNA=m
3237 +CONFIG_NET_VENDOR_CADENCE=y
3238 +CONFIG_MACB=m
3239 +CONFIG_MACB_USE_HWSTAMP=y
3240 +CONFIG_MACB_PCI=m
3241 +CONFIG_NET_VENDOR_CAVIUM=y
3242 +CONFIG_THUNDER_NIC_PF=m
3243 +CONFIG_THUNDER_NIC_VF=m
3244 +CONFIG_THUNDER_NIC_BGX=m
3245 +CONFIG_THUNDER_NIC_RGX=m
3246 +CONFIG_CAVIUM_PTP=m
3247 +CONFIG_LIQUIDIO=m
3248 +CONFIG_LIQUIDIO_VF=m
3249 +CONFIG_NET_VENDOR_CHELSIO=y
3250 +CONFIG_CHELSIO_T1=m
3251 +CONFIG_CHELSIO_T1_1G=y
3252 +CONFIG_CHELSIO_T3=m
3253 +CONFIG_CHELSIO_T4=m
3254 +CONFIG_CHELSIO_T4_DCB=y
3255 +CONFIG_CHELSIO_T4_FCOE=y
3256 +CONFIG_CHELSIO_T4VF=m
3257 +CONFIG_CHELSIO_LIB=m
3258 +CONFIG_CHELSIO_INLINE_CRYPTO=y
3259 +CONFIG_CHELSIO_IPSEC_INLINE=m
3260 +CONFIG_CHELSIO_TLS_DEVICE=m
3261 +CONFIG_NET_VENDOR_CISCO=y
3262 +CONFIG_ENIC=m
3263 +CONFIG_NET_VENDOR_CORTINA=y
3264 +CONFIG_CX_ECAT=m
3265 +CONFIG_DNET=m
3266 +CONFIG_NET_VENDOR_DEC=y
3267 +CONFIG_NET_TULIP=y
3268 +CONFIG_DE2104X=m
3269 +CONFIG_DE2104X_DSL=0
3270 +CONFIG_TULIP=m
3271 +# CONFIG_TULIP_MWI is not set
3272 +# CONFIG_TULIP_MMIO is not set
3273 +# CONFIG_TULIP_NAPI is not set
3274 +CONFIG_DE4X5=m
3275 +CONFIG_WINBOND_840=m
3276 +CONFIG_DM9102=m
3277 +CONFIG_ULI526X=m
3278 +CONFIG_PCMCIA_XIRCOM=m
3279 +CONFIG_NET_VENDOR_DLINK=y
3280 +CONFIG_DL2K=m
3281 +CONFIG_SUNDANCE=m
3282 +# CONFIG_SUNDANCE_MMIO is not set
3283 +CONFIG_NET_VENDOR_EMULEX=y
3284 +CONFIG_BE2NET=m
3285 +CONFIG_BE2NET_HWMON=y
3286 +CONFIG_BE2NET_BE2=y
3287 +CONFIG_BE2NET_BE3=y
3288 +CONFIG_BE2NET_LANCER=y
3289 +CONFIG_BE2NET_SKYHAWK=y
3290 +CONFIG_NET_VENDOR_EZCHIP=y
3291 +CONFIG_NET_VENDOR_FUJITSU=y
3292 +CONFIG_PCMCIA_FMVJ18X=m
3293 +CONFIG_NET_VENDOR_GOOGLE=y
3294 +CONFIG_GVE=m
3295 +CONFIG_NET_VENDOR_HUAWEI=y
3296 +CONFIG_HINIC=m
3297 +CONFIG_NET_VENDOR_I825XX=y
3298 +CONFIG_NET_VENDOR_INTEL=y
3299 +CONFIG_E100=m
3300 +CONFIG_E1000=m
3301 +CONFIG_E1000E=m
3302 +CONFIG_E1000E_HWTS=y
3303 +CONFIG_IGB=m
3304 +CONFIG_IGB_HWMON=y
3305 +CONFIG_IGB_DCA=y
3306 +CONFIG_IGBVF=m
3307 +CONFIG_IXGB=m
3308 +CONFIG_IXGBE=m
3309 +CONFIG_IXGBE_HWMON=y
3310 +CONFIG_IXGBE_DCA=y
3311 +CONFIG_IXGBE_DCB=y
3312 +CONFIG_IXGBE_IPSEC=y
3313 +CONFIG_IXGBEVF=m
3314 +CONFIG_IXGBEVF_IPSEC=y
3315 +CONFIG_I40E=m
3316 +CONFIG_I40E_DCB=y
3317 +CONFIG_IAVF=m
3318 +CONFIG_I40EVF=m
3319 +CONFIG_ICE=m
3320 +CONFIG_FM10K=m
3321 +CONFIG_IGC=m
3322 +CONFIG_JME=m
3323 +CONFIG_NET_VENDOR_MARVELL=y
3324 +CONFIG_MVMDIO=m
3325 +CONFIG_SKGE=m
3326 +# CONFIG_SKGE_DEBUG is not set
3327 +CONFIG_SKGE_GENESIS=y
3328 +CONFIG_SKY2=m
3329 +# CONFIG_SKY2_DEBUG is not set
3330 +CONFIG_PRESTERA=m
3331 +CONFIG_PRESTERA_PCI=m
3332 +CONFIG_NET_VENDOR_MELLANOX=y
3333 +CONFIG_MLX4_EN=m
3334 +CONFIG_MLX4_EN_DCB=y
3335 +CONFIG_MLX4_CORE=m
3336 +CONFIG_MLX4_DEBUG=y
3337 +CONFIG_MLX4_CORE_GEN2=y
3338 +CONFIG_MLX5_CORE=m
3339 +CONFIG_MLX5_ACCEL=y
3340 +CONFIG_MLX5_FPGA=y
3341 +CONFIG_MLX5_CORE_EN=y
3342 +CONFIG_MLX5_EN_ARFS=y
3343 +CONFIG_MLX5_EN_RXNFC=y
3344 +CONFIG_MLX5_MPFS=y
3345 +CONFIG_MLX5_ESWITCH=y
3346 +CONFIG_MLX5_CLS_ACT=y
3347 +CONFIG_MLX5_TC_CT=y
3348 +CONFIG_MLX5_CORE_EN_DCB=y
3349 +CONFIG_MLX5_CORE_IPOIB=y
3350 +CONFIG_MLX5_FPGA_IPSEC=y
3351 +CONFIG_MLX5_IPSEC=y
3352 +CONFIG_MLX5_EN_IPSEC=y
3353 +CONFIG_MLX5_FPGA_TLS=y
3354 +CONFIG_MLX5_TLS=y
3355 +CONFIG_MLX5_EN_TLS=y
3356 +CONFIG_MLX5_SW_STEERING=y
3357 +CONFIG_MLX5_SF=y
3358 +CONFIG_MLX5_SF_MANAGER=y
3359 +CONFIG_MLXSW_CORE=m
3360 +CONFIG_MLXSW_CORE_HWMON=y
3361 +CONFIG_MLXSW_CORE_THERMAL=y
3362 +CONFIG_MLXSW_PCI=m
3363 +CONFIG_MLXSW_I2C=m
3364 +CONFIG_MLXSW_SWITCHIB=m
3365 +CONFIG_MLXSW_SWITCHX2=m
3366 +CONFIG_MLXSW_SPECTRUM=m
3367 +CONFIG_MLXSW_SPECTRUM_DCB=y
3368 +CONFIG_MLXSW_MINIMAL=m
3369 +CONFIG_MLXFW=m
3370 +CONFIG_NET_VENDOR_MICREL=y
3371 +CONFIG_KS8842=m
3372 +CONFIG_KS8851=m
3373 +CONFIG_KS8851_MLL=m
3374 +CONFIG_KSZ884X_PCI=m
3375 +CONFIG_NET_VENDOR_MICROCHIP=y
3376 +CONFIG_ENC28J60=m
3377 +# CONFIG_ENC28J60_WRITEVERIFY is not set
3378 +CONFIG_ENCX24J600=m
3379 +CONFIG_LAN743X=m
3380 +CONFIG_NET_VENDOR_MICROSEMI=y
3381 +CONFIG_MSCC_OCELOT_SWITCH_LIB=m
3382 +CONFIG_NET_VENDOR_MYRI=y
3383 +CONFIG_MYRI10GE=m
3384 +CONFIG_MYRI10GE_DCA=y
3385 +CONFIG_FEALNX=m
3386 +CONFIG_NET_VENDOR_NATSEMI=y
3387 +CONFIG_NATSEMI=m
3388 +CONFIG_NS83820=m
3389 +CONFIG_NET_VENDOR_NETERION=y
3390 +CONFIG_S2IO=m
3391 +CONFIG_VXGE=m
3392 +# CONFIG_VXGE_DEBUG_TRACE_ALL is not set
3393 +CONFIG_NET_VENDOR_NETRONOME=y
3394 +CONFIG_NFP=m
3395 +CONFIG_NFP_APP_FLOWER=y
3396 +CONFIG_NFP_APP_ABM_NIC=y
3397 +# CONFIG_NFP_DEBUG is not set
3398 +CONFIG_NET_VENDOR_NI=y
3399 +CONFIG_NI_XGE_MANAGEMENT_ENET=m
3400 +CONFIG_NET_VENDOR_8390=y
3401 +CONFIG_PCMCIA_AXNET=m
3402 +CONFIG_NE2K_PCI=m
3403 +CONFIG_PCMCIA_PCNET=m
3404 +CONFIG_NET_VENDOR_NVIDIA=y
3405 +CONFIG_FORCEDETH=m
3406 +CONFIG_NET_VENDOR_OKI=y
3407 +CONFIG_ETHOC=m
3408 +CONFIG_NET_VENDOR_PACKET_ENGINES=y
3409 +CONFIG_HAMACHI=m
3410 +CONFIG_YELLOWFIN=m
3411 +CONFIG_NET_VENDOR_PENSANDO=y
3412 +CONFIG_IONIC=m
3413 +CONFIG_NET_VENDOR_QLOGIC=y
3414 +CONFIG_QLA3XXX=m
3415 +CONFIG_QLCNIC=m
3416 +CONFIG_QLCNIC_SRIOV=y
3417 +CONFIG_QLCNIC_DCB=y
3418 +CONFIG_QLCNIC_HWMON=y
3419 +CONFIG_NETXEN_NIC=m
3420 +CONFIG_QED=m
3421 +CONFIG_QED_LL2=y
3422 +CONFIG_QED_SRIOV=y
3423 +CONFIG_QEDE=m
3424 +CONFIG_QED_RDMA=y
3425 +CONFIG_QED_ISCSI=y
3426 +CONFIG_QED_FCOE=y
3427 +CONFIG_QED_OOO=y
3428 +CONFIG_NET_VENDOR_QUALCOMM=y
3429 +CONFIG_QCOM_EMAC=m
3430 +CONFIG_RMNET=m
3431 +CONFIG_NET_VENDOR_RDC=y
3432 +CONFIG_R6040=m
3433 +CONFIG_NET_VENDOR_REALTEK=y
3434 +CONFIG_ATP=m
3435 +CONFIG_8139CP=m
3436 +CONFIG_8139TOO=m
3437 +CONFIG_8139TOO_PIO=y
3438 +# CONFIG_8139TOO_TUNE_TWISTER is not set
3439 +CONFIG_8139TOO_8129=y
3440 +# CONFIG_8139_OLD_RX_RESET is not set
3441 +CONFIG_R8169=m
3442 +CONFIG_NET_VENDOR_RENESAS=y
3443 +CONFIG_NET_VENDOR_ROCKER=y
3444 +CONFIG_ROCKER=m
3445 +CONFIG_NET_VENDOR_SAMSUNG=y
3446 +CONFIG_SXGBE_ETH=m
3447 +CONFIG_NET_VENDOR_SEEQ=y
3448 +CONFIG_NET_VENDOR_SOLARFLARE=y
3449 +CONFIG_SFC=m
3450 +CONFIG_SFC_MTD=y
3451 +CONFIG_SFC_MCDI_MON=y
3452 +CONFIG_SFC_SRIOV=y
3453 +CONFIG_SFC_MCDI_LOGGING=y
3454 +CONFIG_SFC_FALCON=m
3455 +CONFIG_SFC_FALCON_MTD=y
3456 +CONFIG_NET_VENDOR_SILAN=y
3457 +CONFIG_SC92031=m
3458 +CONFIG_NET_VENDOR_SIS=y
3459 +CONFIG_SIS900=m
3460 +CONFIG_SIS190=m
3461 +CONFIG_NET_VENDOR_SMSC=y
3462 +CONFIG_PCMCIA_SMC91C92=m
3463 +CONFIG_EPIC100=m
3464 +CONFIG_SMSC911X=m
3465 +CONFIG_SMSC9420=m
3466 +CONFIG_NET_VENDOR_SOCIONEXT=y
3467 +CONFIG_NET_VENDOR_STMICRO=y
3468 +CONFIG_STMMAC_ETH=m
3469 +# CONFIG_STMMAC_SELFTESTS is not set
3470 +CONFIG_STMMAC_PLATFORM=m
3471 +CONFIG_DWMAC_GENERIC=m
3472 +CONFIG_DWMAC_INTEL=m
3473 +CONFIG_STMMAC_PCI=m
3474 +CONFIG_NET_VENDOR_SUN=y
3475 +CONFIG_HAPPYMEAL=m
3476 +CONFIG_SUNGEM=m
3477 +CONFIG_CASSINI=m
3478 +CONFIG_NIU=m
3479 +CONFIG_NET_VENDOR_SYNOPSYS=y
3480 +CONFIG_DWC_XLGMAC=m
3481 +CONFIG_DWC_XLGMAC_PCI=m
3482 +CONFIG_NET_VENDOR_TEHUTI=y
3483 +CONFIG_TEHUTI=m
3484 +CONFIG_NET_VENDOR_TI=y
3485 +# CONFIG_TI_CPSW_PHY_SEL is not set
3486 +CONFIG_TLAN=m
3487 +CONFIG_NET_VENDOR_VIA=y
3488 +CONFIG_VIA_RHINE=m
3489 +CONFIG_VIA_RHINE_MMIO=y
3490 +CONFIG_VIA_VELOCITY=m
3491 +CONFIG_NET_VENDOR_WIZNET=y
3492 +CONFIG_WIZNET_W5100=m
3493 +CONFIG_WIZNET_W5300=m
3494 +# CONFIG_WIZNET_BUS_DIRECT is not set
3495 +# CONFIG_WIZNET_BUS_INDIRECT is not set
3496 +CONFIG_WIZNET_BUS_ANY=y
3497 +CONFIG_WIZNET_W5100_SPI=m
3498 +CONFIG_NET_VENDOR_XILINX=y
3499 +CONFIG_XILINX_EMACLITE=m
3500 +CONFIG_XILINX_AXI_EMAC=m
3501 +CONFIG_XILINX_LL_TEMAC=m
3502 +CONFIG_NET_VENDOR_XIRCOM=y
3503 +CONFIG_PCMCIA_XIRC2PS=m
3504 +CONFIG_FDDI=y
3505 +CONFIG_DEFXX=m
3506 +# CONFIG_DEFXX_MMIO is not set
3507 +CONFIG_SKFP=m
3508 +# CONFIG_HIPPI is not set
3509 +CONFIG_NET_SB1000=m
3510 +CONFIG_PHYLINK=m
3511 +CONFIG_PHYLIB=m
3512 +CONFIG_SWPHY=y
3513 +CONFIG_LED_TRIGGER_PHY=y
3514 +CONFIG_FIXED_PHY=m
3515 +CONFIG_SFP=m
3518 +# MII PHY device drivers
3520 +CONFIG_AMD_PHY=m
3521 +CONFIG_ADIN_PHY=m
3522 +CONFIG_AQUANTIA_PHY=m
3523 +CONFIG_AX88796B_PHY=m
3524 +CONFIG_BROADCOM_PHY=m
3525 +CONFIG_BCM54140_PHY=m
3526 +CONFIG_BCM7XXX_PHY=m
3527 +CONFIG_BCM84881_PHY=m
3528 +CONFIG_BCM87XX_PHY=m
3529 +CONFIG_BCM_NET_PHYLIB=m
3530 +CONFIG_CICADA_PHY=m
3531 +CONFIG_CORTINA_PHY=m
3532 +CONFIG_DAVICOM_PHY=m
3533 +CONFIG_ICPLUS_PHY=m
3534 +CONFIG_LXT_PHY=m
3535 +CONFIG_INTEL_XWAY_PHY=m
3536 +CONFIG_LSI_ET1011C_PHY=m
3537 +CONFIG_MARVELL_PHY=m
3538 +CONFIG_MARVELL_10G_PHY=m
3539 +CONFIG_MICREL_PHY=m
3540 +CONFIG_MICROCHIP_PHY=m
3541 +CONFIG_MICROCHIP_T1_PHY=m
3542 +CONFIG_MICROSEMI_PHY=m
3543 +CONFIG_NATIONAL_PHY=m
3544 +CONFIG_NXP_TJA11XX_PHY=m
3545 +CONFIG_AT803X_PHY=m
3546 +CONFIG_QSEMI_PHY=m
3547 +CONFIG_REALTEK_PHY=m
3548 +CONFIG_RENESAS_PHY=m
3549 +CONFIG_ROCKCHIP_PHY=m
3550 +CONFIG_SMSC_PHY=m
3551 +CONFIG_STE10XP=m
3552 +CONFIG_TERANETICS_PHY=m
3553 +CONFIG_DP83822_PHY=m
3554 +CONFIG_DP83TC811_PHY=m
3555 +CONFIG_DP83848_PHY=m
3556 +CONFIG_DP83867_PHY=m
3557 +CONFIG_DP83869_PHY=m
3558 +CONFIG_VITESSE_PHY=m
3559 +CONFIG_XILINX_GMII2RGMII=m
3560 +CONFIG_MICREL_KS8995MA=m
3561 +CONFIG_MDIO_DEVICE=m
3562 +CONFIG_MDIO_BUS=m
3563 +CONFIG_MDIO_DEVRES=m
3564 +CONFIG_MDIO_BITBANG=m
3565 +CONFIG_MDIO_BCM_UNIMAC=m
3566 +CONFIG_MDIO_CAVIUM=m
3567 +CONFIG_MDIO_GPIO=m
3568 +CONFIG_MDIO_I2C=m
3569 +CONFIG_MDIO_MVUSB=m
3570 +CONFIG_MDIO_MSCC_MIIM=m
3571 +CONFIG_MDIO_THUNDER=m
3574 +# MDIO Multiplexers
3578 +# PCS device drivers
3580 +CONFIG_PCS_XPCS=m
3581 +CONFIG_PCS_LYNX=m
3582 +# end of PCS device drivers
3584 +CONFIG_PLIP=m
3585 +CONFIG_PPP=y
3586 +CONFIG_PPP_BSDCOMP=m
3587 +CONFIG_PPP_DEFLATE=m
3588 +CONFIG_PPP_FILTER=y
3589 +CONFIG_PPP_MPPE=m
3590 +CONFIG_PPP_MULTILINK=y
3591 +CONFIG_PPPOATM=m
3592 +CONFIG_PPPOE=m
3593 +CONFIG_PPTP=m
3594 +CONFIG_PPPOL2TP=m
3595 +CONFIG_PPP_ASYNC=m
3596 +CONFIG_PPP_SYNC_TTY=m
3597 +CONFIG_SLIP=m
3598 +CONFIG_SLHC=y
3599 +CONFIG_SLIP_COMPRESSED=y
3600 +CONFIG_SLIP_SMART=y
3601 +CONFIG_SLIP_MODE_SLIP6=y
3602 +CONFIG_USB_NET_DRIVERS=m
3603 +CONFIG_USB_CATC=m
3604 +CONFIG_USB_KAWETH=m
3605 +CONFIG_USB_PEGASUS=m
3606 +CONFIG_USB_RTL8150=m
3607 +CONFIG_USB_RTL8152=m
3608 +CONFIG_USB_LAN78XX=m
3609 +CONFIG_USB_USBNET=m
3610 +CONFIG_USB_NET_AX8817X=m
3611 +CONFIG_USB_NET_AX88179_178A=m
3612 +CONFIG_USB_NET_CDCETHER=m
3613 +CONFIG_USB_NET_CDC_EEM=m
3614 +CONFIG_USB_NET_CDC_NCM=m
3615 +CONFIG_USB_NET_HUAWEI_CDC_NCM=m
3616 +CONFIG_USB_NET_CDC_MBIM=m
3617 +CONFIG_USB_NET_DM9601=m
3618 +CONFIG_USB_NET_SR9700=m
3619 +CONFIG_USB_NET_SR9800=m
3620 +CONFIG_USB_NET_SMSC75XX=m
3621 +CONFIG_USB_NET_SMSC95XX=m
3622 +CONFIG_USB_NET_GL620A=m
3623 +CONFIG_USB_NET_NET1080=m
3624 +CONFIG_USB_NET_PLUSB=m
3625 +CONFIG_USB_NET_MCS7830=m
3626 +CONFIG_USB_NET_RNDIS_HOST=m
3627 +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m
3628 +CONFIG_USB_NET_CDC_SUBSET=m
3629 +CONFIG_USB_ALI_M5632=y
3630 +CONFIG_USB_AN2720=y
3631 +CONFIG_USB_BELKIN=y
3632 +CONFIG_USB_ARMLINUX=y
3633 +CONFIG_USB_EPSON2888=y
3634 +CONFIG_USB_KC2190=y
3635 +CONFIG_USB_NET_ZAURUS=m
3636 +CONFIG_USB_NET_CX82310_ETH=m
3637 +CONFIG_USB_NET_KALMIA=m
3638 +CONFIG_USB_NET_QMI_WWAN=m
3639 +CONFIG_USB_HSO=m
3640 +CONFIG_USB_NET_INT51X1=m
3641 +CONFIG_USB_CDC_PHONET=m
3642 +CONFIG_USB_IPHETH=m
3643 +CONFIG_USB_SIERRA_NET=m
3644 +CONFIG_USB_VL600=m
3645 +CONFIG_USB_NET_CH9200=m
3646 +CONFIG_USB_NET_AQC111=m
3647 +CONFIG_USB_RTL8153_ECM=m
3648 +CONFIG_WLAN=y
3649 +CONFIG_WLAN_VENDOR_ADMTEK=y
3650 +CONFIG_ADM8211=m
3651 +CONFIG_ATH_COMMON=m
3652 +CONFIG_WLAN_VENDOR_ATH=y
3653 +# CONFIG_ATH_DEBUG is not set
3654 +CONFIG_ATH5K=m
3655 +# CONFIG_ATH5K_DEBUG is not set
3656 +CONFIG_ATH5K_PCI=y
3657 +CONFIG_ATH9K_HW=m
3658 +CONFIG_ATH9K_COMMON=m
3659 +CONFIG_ATH9K_COMMON_DEBUG=y
3660 +CONFIG_ATH9K_BTCOEX_SUPPORT=y
3661 +CONFIG_ATH9K=m
3662 +CONFIG_ATH9K_PCI=y
3663 +CONFIG_ATH9K_AHB=y
3664 +CONFIG_ATH9K_DEBUGFS=y
3665 +CONFIG_ATH9K_STATION_STATISTICS=y
3666 +# CONFIG_ATH9K_DYNACK is not set
3667 +CONFIG_ATH9K_WOW=y
3668 +CONFIG_ATH9K_RFKILL=y
3669 +CONFIG_ATH9K_CHANNEL_CONTEXT=y
3670 +CONFIG_ATH9K_PCOEM=y
3671 +CONFIG_ATH9K_PCI_NO_EEPROM=m
3672 +CONFIG_ATH9K_HTC=m
3673 +CONFIG_ATH9K_HTC_DEBUGFS=y
3674 +CONFIG_ATH9K_HWRNG=y
3675 +CONFIG_ATH9K_COMMON_SPECTRAL=y
3676 +CONFIG_CARL9170=m
3677 +CONFIG_CARL9170_LEDS=y
3678 +# CONFIG_CARL9170_DEBUGFS is not set
3679 +CONFIG_CARL9170_WPC=y
3680 +CONFIG_CARL9170_HWRNG=y
3681 +CONFIG_ATH6KL=m
3682 +CONFIG_ATH6KL_SDIO=m
3683 +CONFIG_ATH6KL_USB=m
3684 +# CONFIG_ATH6KL_DEBUG is not set
3685 +CONFIG_AR5523=m
3686 +CONFIG_WIL6210=m
3687 +CONFIG_WIL6210_ISR_COR=y
3688 +CONFIG_WIL6210_DEBUGFS=y
3689 +CONFIG_ATH10K=m
3690 +CONFIG_ATH10K_CE=y
3691 +CONFIG_ATH10K_PCI=m
3692 +CONFIG_ATH10K_SDIO=m
3693 +CONFIG_ATH10K_USB=m
3694 +# CONFIG_ATH10K_DEBUG is not set
3695 +CONFIG_ATH10K_DEBUGFS=y
3696 +CONFIG_ATH10K_SPECTRAL=y
3697 +CONFIG_WCN36XX=m
3698 +# CONFIG_WCN36XX_DEBUGFS is not set
3699 +CONFIG_ATH11K=m
3700 +CONFIG_ATH11K_AHB=m
3701 +CONFIG_ATH11K_PCI=m
3702 +# CONFIG_ATH11K_DEBUG is not set
3703 +CONFIG_ATH11K_DEBUGFS=y
3704 +CONFIG_ATH11K_SPECTRAL=y
3705 +CONFIG_WLAN_VENDOR_ATMEL=y
3706 +CONFIG_ATMEL=m
3707 +CONFIG_PCI_ATMEL=m
3708 +CONFIG_PCMCIA_ATMEL=m
3709 +CONFIG_AT76C50X_USB=m
3710 +CONFIG_WLAN_VENDOR_BROADCOM=y
3711 +CONFIG_B43=m
3712 +CONFIG_B43_BCMA=y
3713 +CONFIG_B43_SSB=y
3714 +CONFIG_B43_BUSES_BCMA_AND_SSB=y
3715 +# CONFIG_B43_BUSES_BCMA is not set
3716 +# CONFIG_B43_BUSES_SSB is not set
3717 +CONFIG_B43_PCI_AUTOSELECT=y
3718 +CONFIG_B43_PCICORE_AUTOSELECT=y
3719 +# CONFIG_B43_SDIO is not set
3720 +CONFIG_B43_BCMA_PIO=y
3721 +CONFIG_B43_PIO=y
3722 +CONFIG_B43_PHY_G=y
3723 +CONFIG_B43_PHY_N=y
3724 +CONFIG_B43_PHY_LP=y
3725 +CONFIG_B43_PHY_HT=y
3726 +CONFIG_B43_LEDS=y
3727 +CONFIG_B43_HWRNG=y
3728 +# CONFIG_B43_DEBUG is not set
3729 +CONFIG_B43LEGACY=m
3730 +CONFIG_B43LEGACY_PCI_AUTOSELECT=y
3731 +CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y
3732 +CONFIG_B43LEGACY_LEDS=y
3733 +CONFIG_B43LEGACY_HWRNG=y
3734 +# CONFIG_B43LEGACY_DEBUG is not set
3735 +CONFIG_B43LEGACY_DMA=y
3736 +CONFIG_B43LEGACY_PIO=y
3737 +CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
3738 +# CONFIG_B43LEGACY_DMA_MODE is not set
3739 +# CONFIG_B43LEGACY_PIO_MODE is not set
3740 +CONFIG_BRCMUTIL=m
3741 +CONFIG_BRCMSMAC=m
3742 +CONFIG_BRCMFMAC=m
3743 +CONFIG_BRCMFMAC_PROTO_BCDC=y
3744 +CONFIG_BRCMFMAC_PROTO_MSGBUF=y
3745 +CONFIG_BRCMFMAC_SDIO=y
3746 +CONFIG_BRCMFMAC_USB=y
3747 +CONFIG_BRCMFMAC_PCIE=y
3748 +CONFIG_BRCM_TRACING=y
3749 +# CONFIG_BRCMDBG is not set
3750 +CONFIG_WLAN_VENDOR_CISCO=y
3751 +CONFIG_AIRO=m
3752 +CONFIG_AIRO_CS=m
3753 +CONFIG_WLAN_VENDOR_INTEL=y
3754 +CONFIG_IPW2100=m
3755 +CONFIG_IPW2100_MONITOR=y
3756 +# CONFIG_IPW2100_DEBUG is not set
3757 +CONFIG_IPW2200=m
3758 +CONFIG_IPW2200_MONITOR=y
3759 +CONFIG_IPW2200_RADIOTAP=y
3760 +CONFIG_IPW2200_PROMISCUOUS=y
3761 +CONFIG_IPW2200_QOS=y
3762 +# CONFIG_IPW2200_DEBUG is not set
3763 +CONFIG_LIBIPW=m
3764 +# CONFIG_LIBIPW_DEBUG is not set
3765 +CONFIG_IWLEGACY=m
3766 +CONFIG_IWL4965=m
3767 +CONFIG_IWL3945=m
3770 +# iwl3945 / iwl4965 Debugging Options
3772 +# CONFIG_IWLEGACY_DEBUG is not set
3773 +CONFIG_IWLEGACY_DEBUGFS=y
3774 +# end of iwl3945 / iwl4965 Debugging Options
3776 +CONFIG_IWLWIFI=m
3777 +CONFIG_IWLWIFI_LEDS=y
3778 +CONFIG_IWLDVM=m
3779 +CONFIG_IWLMVM=m
3780 +CONFIG_IWLWIFI_OPMODE_MODULAR=y
3781 +# CONFIG_IWLWIFI_BCAST_FILTERING is not set
3784 +# Debugging Options
3786 +# CONFIG_IWLWIFI_DEBUG is not set
3787 +CONFIG_IWLWIFI_DEBUGFS=y
3788 +# end of Debugging Options
3790 +CONFIG_WLAN_VENDOR_INTERSIL=y
3791 +CONFIG_HOSTAP=m
3792 +CONFIG_HOSTAP_FIRMWARE=y
3793 +CONFIG_HOSTAP_FIRMWARE_NVRAM=y
3794 +CONFIG_HOSTAP_PLX=m
3795 +CONFIG_HOSTAP_PCI=m
3796 +CONFIG_HOSTAP_CS=m
3797 +CONFIG_HERMES=m
3798 +# CONFIG_HERMES_PRISM is not set
3799 +CONFIG_HERMES_CACHE_FW_ON_INIT=y
3800 +CONFIG_PLX_HERMES=m
3801 +CONFIG_TMD_HERMES=m
3802 +CONFIG_NORTEL_HERMES=m
3803 +CONFIG_PCMCIA_HERMES=m
3804 +CONFIG_PCMCIA_SPECTRUM=m
3805 +CONFIG_ORINOCO_USB=m
3806 +CONFIG_P54_COMMON=m
3807 +CONFIG_P54_USB=m
3808 +CONFIG_P54_PCI=m
3809 +CONFIG_P54_SPI=m
3810 +# CONFIG_P54_SPI_DEFAULT_EEPROM is not set
3811 +CONFIG_P54_LEDS=y
3812 +# CONFIG_PRISM54 is not set
3813 +CONFIG_WLAN_VENDOR_MARVELL=y
3814 +CONFIG_LIBERTAS=m
3815 +CONFIG_LIBERTAS_USB=m
3816 +CONFIG_LIBERTAS_CS=m
3817 +CONFIG_LIBERTAS_SDIO=m
3818 +CONFIG_LIBERTAS_SPI=m
3819 +# CONFIG_LIBERTAS_DEBUG is not set
3820 +CONFIG_LIBERTAS_MESH=y
3821 +CONFIG_LIBERTAS_THINFIRM=m
3822 +# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set
3823 +CONFIG_LIBERTAS_THINFIRM_USB=m
3824 +CONFIG_MWIFIEX=m
3825 +CONFIG_MWIFIEX_SDIO=m
3826 +CONFIG_MWIFIEX_PCIE=m
3827 +CONFIG_MWIFIEX_USB=m
3828 +CONFIG_MWL8K=m
3829 +CONFIG_WLAN_VENDOR_MEDIATEK=y
3830 +CONFIG_MT7601U=m
3831 +CONFIG_MT76_CORE=m
3832 +CONFIG_MT76_LEDS=y
3833 +CONFIG_MT76_USB=m
3834 +CONFIG_MT76_SDIO=m
3835 +CONFIG_MT76x02_LIB=m
3836 +CONFIG_MT76x02_USB=m
3837 +CONFIG_MT76_CONNAC_LIB=m
3838 +CONFIG_MT76x0_COMMON=m
3839 +CONFIG_MT76x0U=m
3840 +CONFIG_MT76x0E=m
3841 +CONFIG_MT76x2_COMMON=m
3842 +CONFIG_MT76x2E=m
3843 +CONFIG_MT76x2U=m
3844 +CONFIG_MT7603E=m
3845 +CONFIG_MT7615_COMMON=m
3846 +CONFIG_MT7615E=m
3847 +CONFIG_MT7663_USB_SDIO_COMMON=m
3848 +CONFIG_MT7663U=m
3849 +CONFIG_MT7663S=m
3850 +CONFIG_MT7915E=m
3851 +CONFIG_MT7921E=m
3852 +CONFIG_WLAN_VENDOR_MICROCHIP=y
3853 +CONFIG_WILC1000=m
3854 +CONFIG_WILC1000_SDIO=m
3855 +CONFIG_WILC1000_SPI=m
3856 +CONFIG_WILC1000_HW_OOB_INTR=y
3857 +CONFIG_WLAN_VENDOR_RALINK=y
3858 +CONFIG_RT2X00=m
3859 +CONFIG_RT2400PCI=m
3860 +CONFIG_RT2500PCI=m
3861 +CONFIG_RT61PCI=m
3862 +CONFIG_RT2800PCI=m
3863 +CONFIG_RT2800PCI_RT33XX=y
3864 +CONFIG_RT2800PCI_RT35XX=y
3865 +CONFIG_RT2800PCI_RT53XX=y
3866 +CONFIG_RT2800PCI_RT3290=y
3867 +CONFIG_RT2500USB=m
3868 +CONFIG_RT73USB=m
3869 +CONFIG_RT2800USB=m
3870 +CONFIG_RT2800USB_RT33XX=y
3871 +CONFIG_RT2800USB_RT35XX=y
3872 +CONFIG_RT2800USB_RT3573=y
3873 +CONFIG_RT2800USB_RT53XX=y
3874 +CONFIG_RT2800USB_RT55XX=y
3875 +CONFIG_RT2800USB_UNKNOWN=y
3876 +CONFIG_RT2800_LIB=m
3877 +CONFIG_RT2800_LIB_MMIO=m
3878 +CONFIG_RT2X00_LIB_MMIO=m
3879 +CONFIG_RT2X00_LIB_PCI=m
3880 +CONFIG_RT2X00_LIB_USB=m
3881 +CONFIG_RT2X00_LIB=m
3882 +CONFIG_RT2X00_LIB_FIRMWARE=y
3883 +CONFIG_RT2X00_LIB_CRYPTO=y
3884 +CONFIG_RT2X00_LIB_LEDS=y
3885 +# CONFIG_RT2X00_LIB_DEBUGFS is not set
3886 +# CONFIG_RT2X00_DEBUG is not set
3887 +CONFIG_WLAN_VENDOR_REALTEK=y
3888 +CONFIG_RTL8180=m
3889 +CONFIG_RTL8187=m
3890 +CONFIG_RTL8187_LEDS=y
3891 +CONFIG_RTL_CARDS=m
3892 +CONFIG_RTL8192CE=m
3893 +CONFIG_RTL8192SE=m
3894 +CONFIG_RTL8192DE=m
3895 +CONFIG_RTL8723AE=m
3896 +CONFIG_RTL8723BE=m
3897 +CONFIG_RTL8188EE=m
3898 +CONFIG_RTL8192EE=m
3899 +CONFIG_RTL8821AE=m
3900 +CONFIG_RTL8192CU=m
3901 +CONFIG_RTLWIFI=m
3902 +CONFIG_RTLWIFI_PCI=m
3903 +CONFIG_RTLWIFI_USB=m
3904 +# CONFIG_RTLWIFI_DEBUG is not set
3905 +CONFIG_RTL8192C_COMMON=m
3906 +CONFIG_RTL8723_COMMON=m
3907 +CONFIG_RTLBTCOEXIST=m
3908 +CONFIG_RTL8XXXU=m
3909 +CONFIG_RTL8XXXU_UNTESTED=y
3910 +CONFIG_RTW88=m
3911 +CONFIG_RTW88_CORE=m
3912 +CONFIG_RTW88_PCI=m
3913 +CONFIG_RTW88_8822B=m
3914 +CONFIG_RTW88_8822C=m
3915 +CONFIG_RTW88_8723D=m
3916 +CONFIG_RTW88_8821C=m
3917 +CONFIG_RTW88_8822BE=m
3918 +CONFIG_RTW88_8822CE=m
3919 +CONFIG_RTW88_8723DE=m
3920 +CONFIG_RTW88_8821CE=m
3921 +CONFIG_RTW88_DEBUG=y
3922 +CONFIG_RTW88_DEBUGFS=y
3923 +CONFIG_WLAN_VENDOR_RSI=y
3924 +CONFIG_RSI_91X=m
3925 +# CONFIG_RSI_DEBUGFS is not set
3926 +CONFIG_RSI_SDIO=m
3927 +CONFIG_RSI_USB=m
3928 +CONFIG_RSI_COEX=y
3929 +CONFIG_WLAN_VENDOR_ST=y
3930 +CONFIG_CW1200=m
3931 +CONFIG_CW1200_WLAN_SDIO=m
3932 +CONFIG_CW1200_WLAN_SPI=m
3933 +CONFIG_WLAN_VENDOR_TI=y
3934 +CONFIG_WL1251=m
3935 +CONFIG_WL1251_SPI=m
3936 +CONFIG_WL1251_SDIO=m
3937 +CONFIG_WL12XX=m
3938 +CONFIG_WL18XX=m
3939 +CONFIG_WLCORE=m
3940 +CONFIG_WLCORE_SDIO=m
3941 +CONFIG_WILINK_PLATFORM_DATA=y
3942 +CONFIG_WLAN_VENDOR_ZYDAS=y
3943 +CONFIG_USB_ZD1201=m
3944 +CONFIG_ZD1211RW=m
3945 +# CONFIG_ZD1211RW_DEBUG is not set
3946 +CONFIG_WLAN_VENDOR_QUANTENNA=y
3947 +CONFIG_QTNFMAC=m
3948 +CONFIG_QTNFMAC_PCIE=m
3949 +CONFIG_PCMCIA_RAYCS=m
3950 +CONFIG_PCMCIA_WL3501=m
3951 +CONFIG_MAC80211_HWSIM=m
3952 +CONFIG_USB_NET_RNDIS_WLAN=m
3953 +CONFIG_VIRT_WIFI=m
3954 +CONFIG_WAN=y
3955 +CONFIG_LANMEDIA=m
3956 +CONFIG_HDLC=m
3957 +CONFIG_HDLC_RAW=m
3958 +CONFIG_HDLC_RAW_ETH=m
3959 +CONFIG_HDLC_CISCO=m
3960 +CONFIG_HDLC_FR=m
3961 +CONFIG_HDLC_PPP=m
3962 +CONFIG_HDLC_X25=m
3963 +CONFIG_PCI200SYN=m
3964 +CONFIG_WANXL=m
3965 +CONFIG_PC300TOO=m
3966 +CONFIG_FARSYNC=m
3967 +CONFIG_LAPBETHER=m
3968 +CONFIG_SBNI=m
3969 +# CONFIG_SBNI_MULTILINE is not set
3970 +CONFIG_IEEE802154_DRIVERS=m
3971 +CONFIG_IEEE802154_FAKELB=m
3972 +CONFIG_IEEE802154_AT86RF230=m
3973 +CONFIG_IEEE802154_AT86RF230_DEBUGFS=y
3974 +CONFIG_IEEE802154_MRF24J40=m
3975 +CONFIG_IEEE802154_CC2520=m
3976 +CONFIG_IEEE802154_ATUSB=m
3977 +CONFIG_IEEE802154_ADF7242=m
3978 +CONFIG_IEEE802154_CA8210=m
3979 +CONFIG_IEEE802154_CA8210_DEBUGFS=y
3980 +CONFIG_IEEE802154_MCR20A=m
3981 +CONFIG_IEEE802154_HWSIM=m
3982 +CONFIG_XEN_NETDEV_FRONTEND=y
3983 +CONFIG_XEN_NETDEV_BACKEND=m
3984 +CONFIG_VMXNET3=m
3985 +CONFIG_FUJITSU_ES=m
3986 +CONFIG_USB4_NET=m
3987 +CONFIG_HYPERV_NET=m
3988 +CONFIG_NETDEVSIM=m
3989 +CONFIG_NET_FAILOVER=m
3990 +CONFIG_ISDN=y
3991 +CONFIG_ISDN_CAPI=y
3992 +CONFIG_CAPI_TRACE=y
3993 +CONFIG_ISDN_CAPI_MIDDLEWARE=y
3994 +CONFIG_MISDN=m
3995 +CONFIG_MISDN_DSP=m
3996 +CONFIG_MISDN_L1OIP=m
3999 +# mISDN hardware drivers
4001 +CONFIG_MISDN_HFCPCI=m
4002 +CONFIG_MISDN_HFCMULTI=m
4003 +CONFIG_MISDN_HFCUSB=m
4004 +CONFIG_MISDN_AVMFRITZ=m
4005 +CONFIG_MISDN_SPEEDFAX=m
4006 +CONFIG_MISDN_INFINEON=m
4007 +CONFIG_MISDN_W6692=m
4008 +CONFIG_MISDN_NETJET=m
4009 +CONFIG_MISDN_HDLC=m
4010 +CONFIG_MISDN_IPAC=m
4011 +CONFIG_MISDN_ISAR=m
4012 +CONFIG_NVM=y
4013 +CONFIG_NVM_PBLK=m
4014 +# CONFIG_NVM_PBLK_DEBUG is not set
4017 +# Input device support
4019 +CONFIG_INPUT=y
4020 +CONFIG_INPUT_LEDS=m
4021 +CONFIG_INPUT_FF_MEMLESS=m
4022 +CONFIG_INPUT_SPARSEKMAP=m
4023 +CONFIG_INPUT_MATRIXKMAP=m
4026 +# Userland interfaces
4028 +CONFIG_INPUT_MOUSEDEV=y
4029 +CONFIG_INPUT_MOUSEDEV_PSAUX=y
4030 +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
4031 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
4032 +CONFIG_INPUT_JOYDEV=m
4033 +CONFIG_INPUT_EVDEV=y
4034 +# CONFIG_INPUT_EVBUG is not set
4037 +# Input Device Drivers
4039 +CONFIG_INPUT_KEYBOARD=y
4040 +CONFIG_KEYBOARD_ADC=m
4041 +CONFIG_KEYBOARD_ADP5520=m
4042 +CONFIG_KEYBOARD_ADP5588=m
4043 +CONFIG_KEYBOARD_ADP5589=m
4044 +CONFIG_KEYBOARD_APPLESPI=m
4045 +CONFIG_KEYBOARD_ATKBD=y
4046 +CONFIG_KEYBOARD_QT1050=m
4047 +CONFIG_KEYBOARD_QT1070=m
4048 +CONFIG_KEYBOARD_QT2160=m
4049 +CONFIG_KEYBOARD_DLINK_DIR685=m
4050 +CONFIG_KEYBOARD_LKKBD=m
4051 +CONFIG_KEYBOARD_GPIO=m
4052 +CONFIG_KEYBOARD_GPIO_POLLED=m
4053 +CONFIG_KEYBOARD_TCA6416=m
4054 +CONFIG_KEYBOARD_TCA8418=m
4055 +CONFIG_KEYBOARD_MATRIX=m
4056 +CONFIG_KEYBOARD_LM8323=m
4057 +CONFIG_KEYBOARD_LM8333=m
4058 +CONFIG_KEYBOARD_MAX7359=m
4059 +CONFIG_KEYBOARD_MCS=m
4060 +CONFIG_KEYBOARD_MPR121=m
4061 +CONFIG_KEYBOARD_NEWTON=m
4062 +CONFIG_KEYBOARD_OPENCORES=m
4063 +CONFIG_KEYBOARD_SAMSUNG=m
4064 +CONFIG_KEYBOARD_STOWAWAY=m
4065 +CONFIG_KEYBOARD_SUNKBD=m
4066 +CONFIG_KEYBOARD_IQS62X=m
4067 +CONFIG_KEYBOARD_TM2_TOUCHKEY=m
4068 +CONFIG_KEYBOARD_TWL4030=m
4069 +CONFIG_KEYBOARD_XTKBD=m
4070 +CONFIG_KEYBOARD_CROS_EC=m
4071 +CONFIG_KEYBOARD_MTK_PMIC=m
4072 +CONFIG_INPUT_MOUSE=y
4073 +CONFIG_MOUSE_PS2=m
4074 +CONFIG_MOUSE_PS2_ALPS=y
4075 +CONFIG_MOUSE_PS2_BYD=y
4076 +CONFIG_MOUSE_PS2_LOGIPS2PP=y
4077 +CONFIG_MOUSE_PS2_SYNAPTICS=y
4078 +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y
4079 +CONFIG_MOUSE_PS2_CYPRESS=y
4080 +CONFIG_MOUSE_PS2_LIFEBOOK=y
4081 +CONFIG_MOUSE_PS2_TRACKPOINT=y
4082 +CONFIG_MOUSE_PS2_ELANTECH=y
4083 +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y
4084 +CONFIG_MOUSE_PS2_SENTELIC=y
4085 +CONFIG_MOUSE_PS2_TOUCHKIT=y
4086 +CONFIG_MOUSE_PS2_FOCALTECH=y
4087 +CONFIG_MOUSE_PS2_VMMOUSE=y
4088 +CONFIG_MOUSE_PS2_SMBUS=y
4089 +CONFIG_MOUSE_SERIAL=m
4090 +CONFIG_MOUSE_APPLETOUCH=m
4091 +CONFIG_MOUSE_BCM5974=m
4092 +CONFIG_MOUSE_CYAPA=m
4093 +CONFIG_MOUSE_ELAN_I2C=m
4094 +CONFIG_MOUSE_ELAN_I2C_I2C=y
4095 +CONFIG_MOUSE_ELAN_I2C_SMBUS=y
4096 +CONFIG_MOUSE_VSXXXAA=m
4097 +CONFIG_MOUSE_GPIO=m
4098 +CONFIG_MOUSE_SYNAPTICS_I2C=m
4099 +CONFIG_MOUSE_SYNAPTICS_USB=m
4100 +CONFIG_INPUT_JOYSTICK=y
4101 +CONFIG_JOYSTICK_ANALOG=m
4102 +CONFIG_JOYSTICK_A3D=m
4103 +CONFIG_JOYSTICK_ADC=m
4104 +CONFIG_JOYSTICK_ADI=m
4105 +CONFIG_JOYSTICK_COBRA=m
4106 +CONFIG_JOYSTICK_GF2K=m
4107 +CONFIG_JOYSTICK_GRIP=m
4108 +CONFIG_JOYSTICK_GRIP_MP=m
4109 +CONFIG_JOYSTICK_GUILLEMOT=m
4110 +CONFIG_JOYSTICK_INTERACT=m
4111 +CONFIG_JOYSTICK_SIDEWINDER=m
4112 +CONFIG_JOYSTICK_TMDC=m
4113 +CONFIG_JOYSTICK_IFORCE=m
4114 +CONFIG_JOYSTICK_IFORCE_USB=m
4115 +CONFIG_JOYSTICK_IFORCE_232=m
4116 +CONFIG_JOYSTICK_WARRIOR=m
4117 +CONFIG_JOYSTICK_MAGELLAN=m
4118 +CONFIG_JOYSTICK_SPACEORB=m
4119 +CONFIG_JOYSTICK_SPACEBALL=m
4120 +CONFIG_JOYSTICK_STINGER=m
4121 +CONFIG_JOYSTICK_TWIDJOY=m
4122 +CONFIG_JOYSTICK_ZHENHUA=m
4123 +CONFIG_JOYSTICK_DB9=m
4124 +CONFIG_JOYSTICK_GAMECON=m
4125 +CONFIG_JOYSTICK_TURBOGRAFX=m
4126 +CONFIG_JOYSTICK_AS5011=m
4127 +CONFIG_JOYSTICK_JOYDUMP=m
4128 +CONFIG_JOYSTICK_XPAD=m
4129 +CONFIG_JOYSTICK_XPAD_FF=y
4130 +CONFIG_JOYSTICK_XPAD_LEDS=y
4131 +CONFIG_JOYSTICK_WALKERA0701=m
4132 +CONFIG_JOYSTICK_PSXPAD_SPI=m
4133 +CONFIG_JOYSTICK_PSXPAD_SPI_FF=y
4134 +CONFIG_JOYSTICK_PXRC=m
4135 +CONFIG_JOYSTICK_FSIA6B=m
4136 +CONFIG_INPUT_TABLET=y
4137 +CONFIG_TABLET_USB_ACECAD=m
4138 +CONFIG_TABLET_USB_AIPTEK=m
4139 +CONFIG_TABLET_USB_HANWANG=m
4140 +CONFIG_TABLET_USB_KBTAB=m
4141 +CONFIG_TABLET_USB_PEGASUS=m
4142 +CONFIG_TABLET_SERIAL_WACOM4=m
4143 +CONFIG_INPUT_TOUCHSCREEN=y
4144 +CONFIG_TOUCHSCREEN_PROPERTIES=y
4145 +CONFIG_TOUCHSCREEN_88PM860X=m
4146 +CONFIG_TOUCHSCREEN_ADS7846=m
4147 +CONFIG_TOUCHSCREEN_AD7877=m
4148 +CONFIG_TOUCHSCREEN_AD7879=m
4149 +CONFIG_TOUCHSCREEN_AD7879_I2C=m
4150 +CONFIG_TOUCHSCREEN_AD7879_SPI=m
4151 +CONFIG_TOUCHSCREEN_ADC=m
4152 +CONFIG_TOUCHSCREEN_ATMEL_MXT=m
4153 +CONFIG_TOUCHSCREEN_ATMEL_MXT_T37=y
4154 +CONFIG_TOUCHSCREEN_AUO_PIXCIR=m
4155 +CONFIG_TOUCHSCREEN_BU21013=m
4156 +CONFIG_TOUCHSCREEN_BU21029=m
4157 +CONFIG_TOUCHSCREEN_CHIPONE_ICN8505=m
4158 +CONFIG_TOUCHSCREEN_CY8CTMA140=m
4159 +CONFIG_TOUCHSCREEN_CY8CTMG110=m
4160 +CONFIG_TOUCHSCREEN_CYTTSP_CORE=m
4161 +CONFIG_TOUCHSCREEN_CYTTSP_I2C=m
4162 +CONFIG_TOUCHSCREEN_CYTTSP_SPI=m
4163 +CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m
4164 +CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m
4165 +CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m
4166 +CONFIG_TOUCHSCREEN_DA9034=m
4167 +CONFIG_TOUCHSCREEN_DA9052=m
4168 +CONFIG_TOUCHSCREEN_DYNAPRO=m
4169 +CONFIG_TOUCHSCREEN_HAMPSHIRE=m
4170 +CONFIG_TOUCHSCREEN_EETI=m
4171 +CONFIG_TOUCHSCREEN_EGALAX_SERIAL=m
4172 +CONFIG_TOUCHSCREEN_EXC3000=m
4173 +CONFIG_TOUCHSCREEN_FUJITSU=m
4174 +CONFIG_TOUCHSCREEN_GOODIX=m
4175 +CONFIG_TOUCHSCREEN_HIDEEP=m
4176 +CONFIG_TOUCHSCREEN_ILI210X=m
4177 +CONFIG_TOUCHSCREEN_S6SY761=m
4178 +CONFIG_TOUCHSCREEN_GUNZE=m
4179 +CONFIG_TOUCHSCREEN_EKTF2127=m
4180 +CONFIG_TOUCHSCREEN_ELAN=y
4181 +CONFIG_TOUCHSCREEN_ELO=m
4182 +CONFIG_TOUCHSCREEN_WACOM_W8001=m
4183 +CONFIG_TOUCHSCREEN_WACOM_I2C=m
4184 +CONFIG_TOUCHSCREEN_MAX11801=m
4185 +CONFIG_TOUCHSCREEN_MCS5000=m
4186 +CONFIG_TOUCHSCREEN_MMS114=m
4187 +CONFIG_TOUCHSCREEN_MELFAS_MIP4=m
4188 +CONFIG_TOUCHSCREEN_MTOUCH=m
4189 +CONFIG_TOUCHSCREEN_INEXIO=m
4190 +CONFIG_TOUCHSCREEN_MK712=m
4191 +CONFIG_TOUCHSCREEN_PENMOUNT=m
4192 +CONFIG_TOUCHSCREEN_EDT_FT5X06=m
4193 +CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
4194 +CONFIG_TOUCHSCREEN_TOUCHWIN=m
4195 +CONFIG_TOUCHSCREEN_TI_AM335X_TSC=m
4196 +CONFIG_TOUCHSCREEN_UCB1400=m
4197 +CONFIG_TOUCHSCREEN_PIXCIR=m
4198 +CONFIG_TOUCHSCREEN_WDT87XX_I2C=m
4199 +CONFIG_TOUCHSCREEN_WM831X=m
4200 +CONFIG_TOUCHSCREEN_WM97XX=m
4201 +CONFIG_TOUCHSCREEN_WM9705=y
4202 +CONFIG_TOUCHSCREEN_WM9712=y
4203 +CONFIG_TOUCHSCREEN_WM9713=y
4204 +CONFIG_TOUCHSCREEN_USB_COMPOSITE=m
4205 +CONFIG_TOUCHSCREEN_MC13783=m
4206 +CONFIG_TOUCHSCREEN_USB_EGALAX=y
4207 +CONFIG_TOUCHSCREEN_USB_PANJIT=y
4208 +CONFIG_TOUCHSCREEN_USB_3M=y
4209 +CONFIG_TOUCHSCREEN_USB_ITM=y
4210 +CONFIG_TOUCHSCREEN_USB_ETURBO=y
4211 +CONFIG_TOUCHSCREEN_USB_GUNZE=y
4212 +CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y
4213 +CONFIG_TOUCHSCREEN_USB_IRTOUCH=y
4214 +CONFIG_TOUCHSCREEN_USB_IDEALTEK=y
4215 +CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y
4216 +CONFIG_TOUCHSCREEN_USB_GOTOP=y
4217 +CONFIG_TOUCHSCREEN_USB_JASTEC=y
4218 +CONFIG_TOUCHSCREEN_USB_ELO=y
4219 +CONFIG_TOUCHSCREEN_USB_E2I=y
4220 +CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y
4221 +CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y
4222 +CONFIG_TOUCHSCREEN_USB_NEXIO=y
4223 +CONFIG_TOUCHSCREEN_USB_EASYTOUCH=y
4224 +CONFIG_TOUCHSCREEN_TOUCHIT213=m
4225 +CONFIG_TOUCHSCREEN_TSC_SERIO=m
4226 +CONFIG_TOUCHSCREEN_TSC200X_CORE=m
4227 +CONFIG_TOUCHSCREEN_TSC2004=m
4228 +CONFIG_TOUCHSCREEN_TSC2005=m
4229 +CONFIG_TOUCHSCREEN_TSC2007=m
4230 +CONFIG_TOUCHSCREEN_TSC2007_IIO=y
4231 +CONFIG_TOUCHSCREEN_PCAP=m
4232 +CONFIG_TOUCHSCREEN_RM_TS=m
4233 +CONFIG_TOUCHSCREEN_SILEAD=m
4234 +CONFIG_TOUCHSCREEN_SIS_I2C=m
4235 +CONFIG_TOUCHSCREEN_ST1232=m
4236 +CONFIG_TOUCHSCREEN_STMFTS=m
4237 +CONFIG_TOUCHSCREEN_SUR40=m
4238 +CONFIG_TOUCHSCREEN_SURFACE3_SPI=m
4239 +CONFIG_TOUCHSCREEN_SX8654=m
4240 +CONFIG_TOUCHSCREEN_TPS6507X=m
4241 +CONFIG_TOUCHSCREEN_ZET6223=m
4242 +CONFIG_TOUCHSCREEN_ZFORCE=m
4243 +CONFIG_TOUCHSCREEN_ROHM_BU21023=m
4244 +CONFIG_TOUCHSCREEN_IQS5XX=m
4245 +CONFIG_TOUCHSCREEN_ZINITIX=m
4246 +CONFIG_INPUT_MISC=y
4247 +CONFIG_INPUT_88PM860X_ONKEY=m
4248 +CONFIG_INPUT_88PM80X_ONKEY=m
4249 +CONFIG_INPUT_AD714X=m
4250 +CONFIG_INPUT_AD714X_I2C=m
4251 +CONFIG_INPUT_AD714X_SPI=m
4252 +CONFIG_INPUT_ARIZONA_HAPTICS=m
4253 +CONFIG_INPUT_BMA150=m
4254 +CONFIG_INPUT_E3X0_BUTTON=m
4255 +CONFIG_INPUT_PCSPKR=m
4256 +CONFIG_INPUT_MAX77693_HAPTIC=m
4257 +CONFIG_INPUT_MAX8925_ONKEY=m
4258 +CONFIG_INPUT_MAX8997_HAPTIC=m
4259 +CONFIG_INPUT_MC13783_PWRBUTTON=m
4260 +CONFIG_INPUT_MMA8450=m
4261 +CONFIG_INPUT_APANEL=m
4262 +CONFIG_INPUT_GPIO_BEEPER=m
4263 +CONFIG_INPUT_GPIO_DECODER=m
4264 +CONFIG_INPUT_GPIO_VIBRA=m
4265 +CONFIG_INPUT_ATLAS_BTNS=m
4266 +CONFIG_INPUT_ATI_REMOTE2=m
4267 +CONFIG_INPUT_KEYSPAN_REMOTE=m
4268 +CONFIG_INPUT_KXTJ9=m
4269 +CONFIG_INPUT_POWERMATE=m
4270 +CONFIG_INPUT_YEALINK=m
4271 +CONFIG_INPUT_CM109=m
4272 +CONFIG_INPUT_REGULATOR_HAPTIC=m
4273 +CONFIG_INPUT_RETU_PWRBUTTON=m
4274 +CONFIG_INPUT_AXP20X_PEK=m
4275 +CONFIG_INPUT_TWL4030_PWRBUTTON=m
4276 +CONFIG_INPUT_TWL4030_VIBRA=m
4277 +CONFIG_INPUT_TWL6040_VIBRA=m
4278 +CONFIG_INPUT_UINPUT=y
4279 +CONFIG_INPUT_PALMAS_PWRBUTTON=m
4280 +CONFIG_INPUT_PCF50633_PMU=m
4281 +CONFIG_INPUT_PCF8574=m
4282 +CONFIG_INPUT_PWM_BEEPER=m
4283 +CONFIG_INPUT_PWM_VIBRA=m
4284 +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
4285 +CONFIG_INPUT_DA7280_HAPTICS=m
4286 +CONFIG_INPUT_DA9052_ONKEY=m
4287 +CONFIG_INPUT_DA9055_ONKEY=m
4288 +CONFIG_INPUT_DA9063_ONKEY=m
4289 +CONFIG_INPUT_WM831X_ON=m
4290 +CONFIG_INPUT_PCAP=m
4291 +CONFIG_INPUT_ADXL34X=m
4292 +CONFIG_INPUT_ADXL34X_I2C=m
4293 +CONFIG_INPUT_ADXL34X_SPI=m
4294 +CONFIG_INPUT_IMS_PCU=m
4295 +CONFIG_INPUT_IQS269A=m
4296 +CONFIG_INPUT_CMA3000=m
4297 +CONFIG_INPUT_CMA3000_I2C=m
4298 +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m
4299 +CONFIG_INPUT_IDEAPAD_SLIDEBAR=m
4300 +CONFIG_INPUT_SOC_BUTTON_ARRAY=m
4301 +CONFIG_INPUT_DRV260X_HAPTICS=m
4302 +CONFIG_INPUT_DRV2665_HAPTICS=m
4303 +CONFIG_INPUT_DRV2667_HAPTICS=m
4304 +CONFIG_INPUT_RAVE_SP_PWRBUTTON=m
4305 +CONFIG_RMI4_CORE=m
4306 +CONFIG_RMI4_I2C=m
4307 +CONFIG_RMI4_SPI=m
4308 +CONFIG_RMI4_SMB=m
4309 +CONFIG_RMI4_F03=y
4310 +CONFIG_RMI4_F03_SERIO=m
4311 +CONFIG_RMI4_2D_SENSOR=y
4312 +CONFIG_RMI4_F11=y
4313 +CONFIG_RMI4_F12=y
4314 +CONFIG_RMI4_F30=y
4315 +CONFIG_RMI4_F34=y
4316 +CONFIG_RMI4_F3A=y
4317 +CONFIG_RMI4_F54=y
4318 +CONFIG_RMI4_F55=y
4321 +# Hardware I/O ports
4323 +CONFIG_SERIO=y
4324 +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y
4325 +CONFIG_SERIO_I8042=y
4326 +CONFIG_SERIO_SERPORT=m
4327 +CONFIG_SERIO_CT82C710=m
4328 +CONFIG_SERIO_PARKBD=m
4329 +CONFIG_SERIO_PCIPS2=m
4330 +CONFIG_SERIO_LIBPS2=y
4331 +CONFIG_SERIO_RAW=m
4332 +CONFIG_SERIO_ALTERA_PS2=m
4333 +CONFIG_SERIO_PS2MULT=m
4334 +CONFIG_SERIO_ARC_PS2=m
4335 +CONFIG_HYPERV_KEYBOARD=m
4336 +CONFIG_SERIO_GPIO_PS2=m
4337 +CONFIG_USERIO=m
4338 +CONFIG_GAMEPORT=m
4339 +CONFIG_GAMEPORT_NS558=m
4340 +CONFIG_GAMEPORT_L4=m
4341 +CONFIG_GAMEPORT_EMU10K1=m
4342 +CONFIG_GAMEPORT_FM801=m
4343 +# end of Hardware I/O ports
4344 +# end of Input device support
4347 +# Character devices
4349 +CONFIG_TTY=y
4350 +CONFIG_VT=y
4351 +CONFIG_CONSOLE_TRANSLATIONS=y
4352 +CONFIG_VT_CONSOLE=y
4353 +CONFIG_VT_CONSOLE_SLEEP=y
4354 +CONFIG_HW_CONSOLE=y
4355 +CONFIG_VT_HW_CONSOLE_BINDING=y
4356 +CONFIG_UNIX98_PTYS=y
4357 +CONFIG_LEGACY_PTYS=y
4358 +CONFIG_LEGACY_PTY_COUNT=0
4359 +CONFIG_LDISC_AUTOLOAD=y
4362 +# Serial drivers
4364 +CONFIG_SERIAL_EARLYCON=y
4365 +CONFIG_SERIAL_8250=y
4366 +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
4367 +CONFIG_SERIAL_8250_PNP=y
4368 +CONFIG_SERIAL_8250_16550A_VARIANTS=y
4369 +CONFIG_SERIAL_8250_FINTEK=y
4370 +CONFIG_SERIAL_8250_CONSOLE=y
4371 +CONFIG_SERIAL_8250_DMA=y
4372 +CONFIG_SERIAL_8250_PCI=y
4373 +CONFIG_SERIAL_8250_EXAR=m
4374 +CONFIG_SERIAL_8250_CS=m
4375 +CONFIG_SERIAL_8250_MEN_MCB=m
4376 +CONFIG_SERIAL_8250_NR_UARTS=48
4377 +CONFIG_SERIAL_8250_RUNTIME_UARTS=32
4378 +CONFIG_SERIAL_8250_EXTENDED=y
4379 +CONFIG_SERIAL_8250_MANY_PORTS=y
4380 +CONFIG_SERIAL_8250_SHARE_IRQ=y
4381 +# CONFIG_SERIAL_8250_DETECT_IRQ is not set
4382 +CONFIG_SERIAL_8250_RSA=y
4383 +CONFIG_SERIAL_8250_DWLIB=y
4384 +CONFIG_SERIAL_8250_DW=m
4385 +CONFIG_SERIAL_8250_RT288X=y
4386 +CONFIG_SERIAL_8250_LPSS=m
4387 +CONFIG_SERIAL_8250_MID=m
4390 +# Non-8250 serial port support
4392 +CONFIG_SERIAL_KGDB_NMI=y
4393 +CONFIG_SERIAL_MAX3100=m
4394 +CONFIG_SERIAL_MAX310X=y
4395 +CONFIG_SERIAL_UARTLITE=m
4396 +CONFIG_SERIAL_UARTLITE_NR_UARTS=1
4397 +CONFIG_SERIAL_CORE=y
4398 +CONFIG_SERIAL_CORE_CONSOLE=y
4399 +CONFIG_CONSOLE_POLL=y
4400 +CONFIG_SERIAL_JSM=m
4401 +CONFIG_SERIAL_LANTIQ=m
4402 +CONFIG_SERIAL_SCCNXP=y
4403 +CONFIG_SERIAL_SCCNXP_CONSOLE=y
4404 +CONFIG_SERIAL_SC16IS7XX_CORE=m
4405 +CONFIG_SERIAL_SC16IS7XX=m
4406 +CONFIG_SERIAL_SC16IS7XX_I2C=y
4407 +CONFIG_SERIAL_SC16IS7XX_SPI=y
4408 +CONFIG_SERIAL_BCM63XX=m
4409 +CONFIG_SERIAL_ALTERA_JTAGUART=m
4410 +CONFIG_SERIAL_ALTERA_UART=m
4411 +CONFIG_SERIAL_ALTERA_UART_MAXPORTS=4
4412 +CONFIG_SERIAL_ALTERA_UART_BAUDRATE=115200
4413 +CONFIG_SERIAL_ARC=m
4414 +CONFIG_SERIAL_ARC_NR_PORTS=1
4415 +CONFIG_SERIAL_RP2=m
4416 +CONFIG_SERIAL_RP2_NR_UARTS=32
4417 +CONFIG_SERIAL_FSL_LPUART=m
4418 +CONFIG_SERIAL_FSL_LINFLEXUART=m
4419 +CONFIG_SERIAL_MEN_Z135=m
4420 +CONFIG_SERIAL_SPRD=m
4421 +# end of Serial drivers
4423 +CONFIG_SERIAL_MCTRL_GPIO=y
4424 +CONFIG_SERIAL_NONSTANDARD=y
4425 +CONFIG_ROCKETPORT=m
4426 +CONFIG_CYCLADES=m
4427 +# CONFIG_CYZ_INTR is not set
4428 +CONFIG_MOXA_INTELLIO=m
4429 +CONFIG_MOXA_SMARTIO=m
4430 +CONFIG_SYNCLINK_GT=m
4431 +CONFIG_ISI=m
4432 +CONFIG_N_HDLC=m
4433 +CONFIG_N_GSM=m
4434 +CONFIG_NOZOMI=m
4435 +CONFIG_NULL_TTY=m
4436 +CONFIG_TRACE_ROUTER=m
4437 +CONFIG_TRACE_SINK=m
4438 +CONFIG_HVC_DRIVER=y
4439 +CONFIG_HVC_IRQ=y
4440 +CONFIG_HVC_XEN=y
4441 +CONFIG_HVC_XEN_FRONTEND=y
4442 +CONFIG_SERIAL_DEV_BUS=y
4443 +CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
4444 +CONFIG_TTY_PRINTK=y
4445 +CONFIG_TTY_PRINTK_LEVEL=6
4446 +CONFIG_PRINTER=m
4447 +# CONFIG_LP_CONSOLE is not set
4448 +CONFIG_PPDEV=m
4449 +CONFIG_VIRTIO_CONSOLE=y
4450 +CONFIG_IPMI_HANDLER=m
4451 +CONFIG_IPMI_DMI_DECODE=y
4452 +CONFIG_IPMI_PLAT_DATA=y
4453 +# CONFIG_IPMI_PANIC_EVENT is not set
4454 +CONFIG_IPMI_DEVICE_INTERFACE=m
4455 +CONFIG_IPMI_SI=m
4456 +CONFIG_IPMI_SSIF=m
4457 +CONFIG_IPMI_WATCHDOG=m
4458 +CONFIG_IPMI_POWEROFF=m
4459 +CONFIG_HW_RANDOM=y
4460 +CONFIG_HW_RANDOM_TIMERIOMEM=m
4461 +CONFIG_HW_RANDOM_INTEL=m
4462 +CONFIG_HW_RANDOM_AMD=m
4463 +CONFIG_HW_RANDOM_BA431=m
4464 +CONFIG_HW_RANDOM_VIA=m
4465 +CONFIG_HW_RANDOM_VIRTIO=m
4466 +CONFIG_HW_RANDOM_XIPHERA=m
4467 +CONFIG_APPLICOM=m
4470 +# PCMCIA character devices
4472 +CONFIG_SYNCLINK_CS=m
4473 +CONFIG_CARDMAN_4000=m
4474 +CONFIG_CARDMAN_4040=m
4475 +CONFIG_SCR24X=m
4476 +CONFIG_IPWIRELESS=m
4477 +# end of PCMCIA character devices
4479 +CONFIG_MWAVE=m
4480 +CONFIG_DEVMEM=y
4481 +# CONFIG_DEVKMEM is not set
4482 +CONFIG_NVRAM=m
4483 +CONFIG_RAW_DRIVER=m
4484 +CONFIG_MAX_RAW_DEVS=256
4485 +CONFIG_DEVPORT=y
4486 +CONFIG_HPET=y
4487 +CONFIG_HPET_MMAP=y
4488 +CONFIG_HPET_MMAP_DEFAULT=y
4489 +CONFIG_HANGCHECK_TIMER=m
4490 +CONFIG_UV_MMTIMER=m
4491 +CONFIG_TCG_TPM=y
4492 +CONFIG_HW_RANDOM_TPM=y
4493 +CONFIG_TCG_TIS_CORE=y
4494 +CONFIG_TCG_TIS=y
4495 +CONFIG_TCG_TIS_SPI=m
4496 +CONFIG_TCG_TIS_SPI_CR50=y
4497 +CONFIG_TCG_TIS_I2C_CR50=m
4498 +CONFIG_TCG_TIS_I2C_ATMEL=m
4499 +CONFIG_TCG_TIS_I2C_INFINEON=m
4500 +CONFIG_TCG_TIS_I2C_NUVOTON=m
4501 +CONFIG_TCG_NSC=m
4502 +CONFIG_TCG_ATMEL=m
4503 +CONFIG_TCG_INFINEON=m
4504 +CONFIG_TCG_XEN=m
4505 +CONFIG_TCG_CRB=y
4506 +CONFIG_TCG_VTPM_PROXY=m
4507 +CONFIG_TCG_TIS_ST33ZP24=m
4508 +CONFIG_TCG_TIS_ST33ZP24_I2C=m
4509 +CONFIG_TCG_TIS_ST33ZP24_SPI=m
4510 +CONFIG_TELCLOCK=m
4511 +CONFIG_XILLYBUS=m
4512 +CONFIG_XILLYBUS_PCIE=m
4513 +# end of Character devices
4515 +CONFIG_RANDOM_TRUST_CPU=y
4516 +CONFIG_RANDOM_TRUST_BOOTLOADER=y
4519 +# I2C support
4521 +CONFIG_I2C=y
4522 +CONFIG_ACPI_I2C_OPREGION=y
4523 +CONFIG_I2C_BOARDINFO=y
4524 +CONFIG_I2C_COMPAT=y
4525 +CONFIG_I2C_CHARDEV=y
4526 +CONFIG_I2C_MUX=m
4529 +# Multiplexer I2C Chip support
4531 +CONFIG_I2C_MUX_GPIO=m
4532 +CONFIG_I2C_MUX_LTC4306=m
4533 +CONFIG_I2C_MUX_PCA9541=m
4534 +CONFIG_I2C_MUX_PCA954x=m
4535 +CONFIG_I2C_MUX_REG=m
4536 +CONFIG_I2C_MUX_MLXCPLD=m
4537 +# end of Multiplexer I2C Chip support
4539 +CONFIG_I2C_HELPER_AUTO=y
4540 +CONFIG_I2C_SMBUS=m
4541 +CONFIG_I2C_ALGOBIT=m
4542 +CONFIG_I2C_ALGOPCA=m
4545 +# I2C Hardware Bus support
4549 +# PC SMBus host controller drivers
4551 +CONFIG_I2C_ALI1535=m
4552 +CONFIG_I2C_ALI1563=m
4553 +CONFIG_I2C_ALI15X3=m
4554 +CONFIG_I2C_AMD756=m
4555 +CONFIG_I2C_AMD756_S4882=m
4556 +CONFIG_I2C_AMD8111=m
4557 +CONFIG_I2C_AMD_MP2=m
4558 +CONFIG_I2C_I801=m
4559 +CONFIG_I2C_ISCH=m
4560 +CONFIG_I2C_ISMT=m
4561 +CONFIG_I2C_PIIX4=m
4562 +CONFIG_I2C_CHT_WC=m
4563 +CONFIG_I2C_NFORCE2=m
4564 +CONFIG_I2C_NFORCE2_S4985=m
4565 +CONFIG_I2C_NVIDIA_GPU=m
4566 +CONFIG_I2C_SIS5595=m
4567 +CONFIG_I2C_SIS630=m
4568 +CONFIG_I2C_SIS96X=m
4569 +CONFIG_I2C_VIA=m
4570 +CONFIG_I2C_VIAPRO=m
4573 +# ACPI drivers
4575 +CONFIG_I2C_SCMI=m
4578 +# I2C system bus drivers (mostly embedded / system-on-chip)
4580 +CONFIG_I2C_CBUS_GPIO=m
4581 +CONFIG_I2C_DESIGNWARE_CORE=y
4582 +# CONFIG_I2C_DESIGNWARE_SLAVE is not set
4583 +CONFIG_I2C_DESIGNWARE_PLATFORM=y
4584 +CONFIG_I2C_DESIGNWARE_BAYTRAIL=y
4585 +CONFIG_I2C_DESIGNWARE_PCI=m
4586 +# CONFIG_I2C_EMEV2 is not set
4587 +CONFIG_I2C_GPIO=m
4588 +# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set
4589 +CONFIG_I2C_KEMPLD=m
4590 +CONFIG_I2C_OCORES=m
4591 +CONFIG_I2C_PCA_PLATFORM=m
4592 +CONFIG_I2C_SIMTEC=m
4593 +CONFIG_I2C_XILINX=m
4596 +# External I2C/SMBus adapter drivers
4598 +CONFIG_I2C_DIOLAN_U2C=m
4599 +CONFIG_I2C_DLN2=m
4600 +CONFIG_I2C_PARPORT=m
4601 +CONFIG_I2C_ROBOTFUZZ_OSIF=m
4602 +CONFIG_I2C_TAOS_EVM=m
4603 +CONFIG_I2C_TINY_USB=m
4604 +CONFIG_I2C_VIPERBOARD=m
4607 +# Other I2C/SMBus bus drivers
4609 +CONFIG_I2C_MLXCPLD=m
4610 +CONFIG_I2C_CROS_EC_TUNNEL=m
4611 +# end of I2C Hardware Bus support
4613 +CONFIG_I2C_STUB=m
4614 +# CONFIG_I2C_SLAVE is not set
4615 +# CONFIG_I2C_DEBUG_CORE is not set
4616 +# CONFIG_I2C_DEBUG_ALGO is not set
4617 +# CONFIG_I2C_DEBUG_BUS is not set
4618 +# end of I2C support
4620 +CONFIG_I3C=m
4621 +CONFIG_CDNS_I3C_MASTER=m
4622 +CONFIG_DW_I3C_MASTER=m
4623 +CONFIG_SVC_I3C_MASTER=m
4624 +CONFIG_MIPI_I3C_HCI=m
4625 +CONFIG_SPI=y
4626 +# CONFIG_SPI_DEBUG is not set
4627 +CONFIG_SPI_MASTER=y
4628 +CONFIG_SPI_MEM=y
4631 +# SPI Master Controller Drivers
4633 +CONFIG_SPI_ALTERA=m
4634 +CONFIG_SPI_AXI_SPI_ENGINE=m
4635 +CONFIG_SPI_BITBANG=m
4636 +CONFIG_SPI_BUTTERFLY=m
4637 +CONFIG_SPI_CADENCE=m
4638 +CONFIG_SPI_DESIGNWARE=m
4639 +CONFIG_SPI_DW_DMA=y
4640 +CONFIG_SPI_DW_PCI=m
4641 +CONFIG_SPI_DW_MMIO=m
4642 +CONFIG_SPI_DLN2=m
4643 +CONFIG_SPI_NXP_FLEXSPI=m
4644 +CONFIG_SPI_GPIO=m
4645 +CONFIG_SPI_LM70_LLP=m
4646 +CONFIG_SPI_LANTIQ_SSC=m
4647 +CONFIG_SPI_OC_TINY=m
4648 +CONFIG_SPI_PXA2XX=m
4649 +CONFIG_SPI_PXA2XX_PCI=m
4650 +# CONFIG_SPI_ROCKCHIP is not set
4651 +CONFIG_SPI_SC18IS602=m
4652 +CONFIG_SPI_SIFIVE=m
4653 +CONFIG_SPI_MXIC=m
4654 +CONFIG_SPI_XCOMM=m
4655 +# CONFIG_SPI_XILINX is not set
4656 +CONFIG_SPI_ZYNQMP_GQSPI=m
4657 +CONFIG_SPI_AMD=m
4660 +# SPI Multiplexer support
4662 +CONFIG_SPI_MUX=m
4665 +# SPI Protocol Masters
4667 +CONFIG_SPI_SPIDEV=m
4668 +CONFIG_SPI_LOOPBACK_TEST=m
4669 +CONFIG_SPI_TLE62X0=m
4670 +CONFIG_SPI_SLAVE=y
4671 +CONFIG_SPI_SLAVE_TIME=m
4672 +CONFIG_SPI_SLAVE_SYSTEM_CONTROL=m
4673 +CONFIG_SPI_DYNAMIC=y
4674 +CONFIG_SPMI=m
4675 +CONFIG_HSI=m
4676 +CONFIG_HSI_BOARDINFO=y
4679 +# HSI controllers
4683 +# HSI clients
4685 +CONFIG_HSI_CHAR=m
4686 +CONFIG_PPS=y
4687 +# CONFIG_PPS_DEBUG is not set
4690 +# PPS clients support
4692 +# CONFIG_PPS_CLIENT_KTIMER is not set
4693 +CONFIG_PPS_CLIENT_LDISC=m
4694 +CONFIG_PPS_CLIENT_PARPORT=m
4695 +CONFIG_PPS_CLIENT_GPIO=m
4698 +# PPS generators support
4702 +# PTP clock support
4704 +CONFIG_PTP_1588_CLOCK=y
4705 +CONFIG_DP83640_PHY=m
4706 +CONFIG_PTP_1588_CLOCK_INES=m
4707 +CONFIG_PTP_1588_CLOCK_KVM=m
4708 +CONFIG_PTP_1588_CLOCK_IDT82P33=m
4709 +CONFIG_PTP_1588_CLOCK_IDTCM=m
4710 +CONFIG_PTP_1588_CLOCK_VMW=m
4711 +CONFIG_PTP_1588_CLOCK_OCP=m
4712 +# end of PTP clock support
4714 +CONFIG_PINCTRL=y
4715 +CONFIG_PINMUX=y
4716 +CONFIG_PINCONF=y
4717 +CONFIG_GENERIC_PINCONF=y
4718 +# CONFIG_DEBUG_PINCTRL is not set
4719 +CONFIG_PINCTRL_AMD=y
4720 +CONFIG_PINCTRL_DA9062=m
4721 +CONFIG_PINCTRL_MCP23S08_I2C=m
4722 +CONFIG_PINCTRL_MCP23S08_SPI=m
4723 +CONFIG_PINCTRL_MCP23S08=m
4724 +CONFIG_PINCTRL_SX150X=y
4725 +CONFIG_PINCTRL_BAYTRAIL=y
4726 +CONFIG_PINCTRL_CHERRYVIEW=y
4727 +CONFIG_PINCTRL_LYNXPOINT=m
4728 +CONFIG_PINCTRL_INTEL=y
4729 +CONFIG_PINCTRL_ALDERLAKE=m
4730 +CONFIG_PINCTRL_BROXTON=m
4731 +CONFIG_PINCTRL_CANNONLAKE=m
4732 +CONFIG_PINCTRL_CEDARFORK=m
4733 +CONFIG_PINCTRL_DENVERTON=m
4734 +CONFIG_PINCTRL_ELKHARTLAKE=m
4735 +CONFIG_PINCTRL_EMMITSBURG=m
4736 +CONFIG_PINCTRL_GEMINILAKE=m
4737 +CONFIG_PINCTRL_ICELAKE=m
4738 +CONFIG_PINCTRL_JASPERLAKE=m
4739 +CONFIG_PINCTRL_LAKEFIELD=m
4740 +CONFIG_PINCTRL_LEWISBURG=m
4741 +CONFIG_PINCTRL_SUNRISEPOINT=m
4742 +CONFIG_PINCTRL_TIGERLAKE=m
4745 +# Renesas pinctrl drivers
4747 +# end of Renesas pinctrl drivers
4749 +CONFIG_PINCTRL_MADERA=m
4750 +CONFIG_PINCTRL_CS47L15=y
4751 +CONFIG_PINCTRL_CS47L35=y
4752 +CONFIG_PINCTRL_CS47L85=y
4753 +CONFIG_PINCTRL_CS47L90=y
4754 +CONFIG_PINCTRL_CS47L92=y
4755 +CONFIG_GPIOLIB=y
4756 +CONFIG_GPIOLIB_FASTPATH_LIMIT=512
4757 +CONFIG_GPIO_ACPI=y
4758 +CONFIG_GPIOLIB_IRQCHIP=y
4759 +# CONFIG_DEBUG_GPIO is not set
4760 +CONFIG_GPIO_SYSFS=y
4761 +CONFIG_GPIO_CDEV=y
4762 +# CONFIG_GPIO_CDEV_V1 is not set
4763 +CONFIG_GPIO_GENERIC=m
4764 +CONFIG_GPIO_MAX730X=m
4767 +# Memory mapped GPIO drivers
4769 +CONFIG_GPIO_AMDPT=m
4770 +CONFIG_GPIO_DWAPB=m
4771 +CONFIG_GPIO_EXAR=m
4772 +CONFIG_GPIO_GENERIC_PLATFORM=m
4773 +CONFIG_GPIO_ICH=m
4774 +CONFIG_GPIO_MB86S7X=m
4775 +CONFIG_GPIO_MENZ127=m
4776 +CONFIG_GPIO_SIOX=m
4777 +CONFIG_GPIO_VX855=m
4778 +CONFIG_GPIO_AMD_FCH=m
4779 +# end of Memory mapped GPIO drivers
4782 +# Port-mapped I/O GPIO drivers
4784 +CONFIG_GPIO_104_DIO_48E=m
4785 +CONFIG_GPIO_104_IDIO_16=m
4786 +CONFIG_GPIO_104_IDI_48=m
4787 +CONFIG_GPIO_F7188X=m
4788 +CONFIG_GPIO_GPIO_MM=m
4789 +CONFIG_GPIO_IT87=m
4790 +CONFIG_GPIO_SCH=m
4791 +CONFIG_GPIO_SCH311X=m
4792 +CONFIG_GPIO_WINBOND=m
4793 +CONFIG_GPIO_WS16C48=m
4794 +# end of Port-mapped I/O GPIO drivers
4797 +# I2C GPIO expanders
4799 +CONFIG_GPIO_ADP5588=m
4800 +CONFIG_GPIO_MAX7300=m
4801 +CONFIG_GPIO_MAX732X=m
4802 +CONFIG_GPIO_PCA953X=m
4803 +CONFIG_GPIO_PCA953X_IRQ=y
4804 +CONFIG_GPIO_PCA9570=m
4805 +CONFIG_GPIO_PCF857X=m
4806 +CONFIG_GPIO_TPIC2810=m
4807 +# end of I2C GPIO expanders
4810 +# MFD GPIO expanders
4812 +CONFIG_GPIO_ADP5520=m
4813 +CONFIG_GPIO_ARIZONA=m
4814 +CONFIG_GPIO_BD9571MWV=m
4815 +CONFIG_GPIO_CRYSTAL_COVE=y
4816 +CONFIG_GPIO_DA9052=m
4817 +CONFIG_GPIO_DA9055=m
4818 +CONFIG_GPIO_DLN2=m
4819 +CONFIG_GPIO_JANZ_TTL=m
4820 +CONFIG_GPIO_KEMPLD=m
4821 +CONFIG_GPIO_LP3943=m
4822 +CONFIG_GPIO_LP873X=m
4823 +CONFIG_GPIO_MADERA=m
4824 +CONFIG_GPIO_PALMAS=y
4825 +CONFIG_GPIO_RC5T583=y
4826 +CONFIG_GPIO_TPS65086=m
4827 +CONFIG_GPIO_TPS6586X=y
4828 +CONFIG_GPIO_TPS65910=y
4829 +CONFIG_GPIO_TPS65912=m
4830 +CONFIG_GPIO_TPS68470=y
4831 +CONFIG_GPIO_TQMX86=m
4832 +CONFIG_GPIO_TWL4030=m
4833 +CONFIG_GPIO_TWL6040=m
4834 +CONFIG_GPIO_UCB1400=m
4835 +CONFIG_GPIO_WHISKEY_COVE=m
4836 +CONFIG_GPIO_WM831X=m
4837 +CONFIG_GPIO_WM8350=m
4838 +CONFIG_GPIO_WM8994=m
4839 +# end of MFD GPIO expanders
4842 +# PCI GPIO expanders
4844 +CONFIG_GPIO_AMD8111=m
4845 +CONFIG_GPIO_ML_IOH=m
4846 +CONFIG_GPIO_PCI_IDIO_16=m
4847 +CONFIG_GPIO_PCIE_IDIO_24=m
4848 +CONFIG_GPIO_RDC321X=m
4849 +# end of PCI GPIO expanders
4852 +# SPI GPIO expanders
4854 +CONFIG_GPIO_MAX3191X=m
4855 +CONFIG_GPIO_MAX7301=m
4856 +CONFIG_GPIO_MC33880=m
4857 +CONFIG_GPIO_PISOSR=m
4858 +CONFIG_GPIO_XRA1403=m
4859 +# end of SPI GPIO expanders
4862 +# USB GPIO expanders
4864 +CONFIG_GPIO_VIPERBOARD=m
4865 +# end of USB GPIO expanders
4868 +# Virtual GPIO drivers
4870 +CONFIG_GPIO_AGGREGATOR=m
4871 +# CONFIG_GPIO_MOCKUP is not set
4872 +# end of Virtual GPIO drivers
4874 +CONFIG_W1=m
4875 +CONFIG_W1_CON=y
4878 +# 1-wire Bus Masters
4880 +CONFIG_W1_MASTER_MATROX=m
4881 +CONFIG_W1_MASTER_DS2490=m
4882 +CONFIG_W1_MASTER_DS2482=m
4883 +CONFIG_W1_MASTER_DS1WM=m
4884 +CONFIG_W1_MASTER_GPIO=m
4885 +CONFIG_W1_MASTER_SGI=m
4886 +# end of 1-wire Bus Masters
4889 +# 1-wire Slaves
4891 +CONFIG_W1_SLAVE_THERM=m
4892 +CONFIG_W1_SLAVE_SMEM=m
4893 +CONFIG_W1_SLAVE_DS2405=m
4894 +CONFIG_W1_SLAVE_DS2408=m
4895 +CONFIG_W1_SLAVE_DS2408_READBACK=y
4896 +CONFIG_W1_SLAVE_DS2413=m
4897 +CONFIG_W1_SLAVE_DS2406=m
4898 +CONFIG_W1_SLAVE_DS2423=m
4899 +CONFIG_W1_SLAVE_DS2805=m
4900 +CONFIG_W1_SLAVE_DS2430=m
4901 +CONFIG_W1_SLAVE_DS2431=m
4902 +CONFIG_W1_SLAVE_DS2433=m
4903 +# CONFIG_W1_SLAVE_DS2433_CRC is not set
4904 +CONFIG_W1_SLAVE_DS2438=m
4905 +CONFIG_W1_SLAVE_DS250X=m
4906 +CONFIG_W1_SLAVE_DS2780=m
4907 +CONFIG_W1_SLAVE_DS2781=m
4908 +CONFIG_W1_SLAVE_DS28E04=m
4909 +CONFIG_W1_SLAVE_DS28E17=m
4910 +# end of 1-wire Slaves
4912 +CONFIG_POWER_RESET=y
4913 +CONFIG_POWER_RESET_MT6323=y
4914 +CONFIG_POWER_RESET_RESTART=y
4915 +CONFIG_POWER_SUPPLY=y
4916 +# CONFIG_POWER_SUPPLY_DEBUG is not set
4917 +CONFIG_POWER_SUPPLY_HWMON=y
4918 +CONFIG_PDA_POWER=m
4919 +CONFIG_GENERIC_ADC_BATTERY=m
4920 +CONFIG_MAX8925_POWER=m
4921 +CONFIG_WM831X_BACKUP=m
4922 +CONFIG_WM831X_POWER=m
4923 +CONFIG_WM8350_POWER=m
4924 +CONFIG_TEST_POWER=m
4925 +CONFIG_BATTERY_88PM860X=m
4926 +CONFIG_CHARGER_ADP5061=m
4927 +CONFIG_BATTERY_CW2015=m
4928 +CONFIG_BATTERY_DS2760=m
4929 +CONFIG_BATTERY_DS2780=m
4930 +CONFIG_BATTERY_DS2781=m
4931 +CONFIG_BATTERY_DS2782=m
4932 +CONFIG_BATTERY_SBS=m
4933 +CONFIG_CHARGER_SBS=m
4934 +CONFIG_MANAGER_SBS=m
4935 +CONFIG_BATTERY_BQ27XXX=m
4936 +CONFIG_BATTERY_BQ27XXX_I2C=m
4937 +CONFIG_BATTERY_BQ27XXX_HDQ=m
4938 +# CONFIG_BATTERY_BQ27XXX_DT_UPDATES_NVM is not set
4939 +CONFIG_BATTERY_DA9030=m
4940 +CONFIG_BATTERY_DA9052=m
4941 +CONFIG_CHARGER_DA9150=m
4942 +CONFIG_BATTERY_DA9150=m
4943 +CONFIG_CHARGER_AXP20X=m
4944 +CONFIG_BATTERY_AXP20X=m
4945 +CONFIG_AXP20X_POWER=m
4946 +CONFIG_AXP288_CHARGER=m
4947 +CONFIG_AXP288_FUEL_GAUGE=m
4948 +CONFIG_BATTERY_MAX17040=m
4949 +CONFIG_BATTERY_MAX17042=m
4950 +CONFIG_BATTERY_MAX1721X=m
4951 +CONFIG_BATTERY_TWL4030_MADC=m
4952 +CONFIG_CHARGER_88PM860X=m
4953 +CONFIG_CHARGER_PCF50633=m
4954 +CONFIG_BATTERY_RX51=m
4955 +CONFIG_CHARGER_ISP1704=m
4956 +CONFIG_CHARGER_MAX8903=m
4957 +CONFIG_CHARGER_TWL4030=m
4958 +CONFIG_CHARGER_LP8727=m
4959 +CONFIG_CHARGER_LP8788=m
4960 +CONFIG_CHARGER_GPIO=m
4961 +CONFIG_CHARGER_MANAGER=y
4962 +CONFIG_CHARGER_LT3651=m
4963 +CONFIG_CHARGER_LTC4162L=m
4964 +CONFIG_CHARGER_MAX14577=m
4965 +CONFIG_CHARGER_MAX77693=m
4966 +CONFIG_CHARGER_MAX8997=m
4967 +CONFIG_CHARGER_MAX8998=m
4968 +CONFIG_CHARGER_MP2629=m
4969 +CONFIG_CHARGER_BQ2415X=m
4970 +CONFIG_CHARGER_BQ24190=m
4971 +CONFIG_CHARGER_BQ24257=m
4972 +CONFIG_CHARGER_BQ24735=m
4973 +CONFIG_CHARGER_BQ2515X=m
4974 +CONFIG_CHARGER_BQ25890=m
4975 +CONFIG_CHARGER_BQ25980=m
4976 +CONFIG_CHARGER_BQ256XX=m
4977 +CONFIG_CHARGER_SMB347=m
4978 +CONFIG_CHARGER_TPS65090=m
4979 +CONFIG_BATTERY_GAUGE_LTC2941=m
4980 +CONFIG_BATTERY_RT5033=m
4981 +CONFIG_CHARGER_RT9455=m
4982 +CONFIG_CHARGER_CROS_USBPD=m
4983 +CONFIG_CHARGER_BD99954=m
4984 +CONFIG_CHARGER_WILCO=m
4985 +CONFIG_HWMON=y
4986 +CONFIG_HWMON_VID=m
4987 +# CONFIG_HWMON_DEBUG_CHIP is not set
4990 +# Native drivers
4992 +CONFIG_SENSORS_ABITUGURU=m
4993 +CONFIG_SENSORS_ABITUGURU3=m
4994 +CONFIG_SENSORS_AD7314=m
4995 +CONFIG_SENSORS_AD7414=m
4996 +CONFIG_SENSORS_AD7418=m
4997 +CONFIG_SENSORS_ADM1021=m
4998 +CONFIG_SENSORS_ADM1025=m
4999 +CONFIG_SENSORS_ADM1026=m
5000 +CONFIG_SENSORS_ADM1029=m
5001 +CONFIG_SENSORS_ADM1031=m
5002 +CONFIG_SENSORS_ADM1177=m
5003 +CONFIG_SENSORS_ADM9240=m
5004 +CONFIG_SENSORS_ADT7X10=m
5005 +CONFIG_SENSORS_ADT7310=m
5006 +CONFIG_SENSORS_ADT7410=m
5007 +CONFIG_SENSORS_ADT7411=m
5008 +CONFIG_SENSORS_ADT7462=m
5009 +CONFIG_SENSORS_ADT7470=m
5010 +CONFIG_SENSORS_ADT7475=m
5011 +CONFIG_SENSORS_AHT10=m
5012 +CONFIG_SENSORS_AS370=m
5013 +CONFIG_SENSORS_ASC7621=m
5014 +CONFIG_SENSORS_AXI_FAN_CONTROL=m
5015 +CONFIG_SENSORS_K8TEMP=m
5016 +CONFIG_SENSORS_K10TEMP=m
5017 +CONFIG_SENSORS_FAM15H_POWER=m
5018 +CONFIG_SENSORS_AMD_ENERGY=m
5019 +CONFIG_SENSORS_APPLESMC=m
5020 +CONFIG_SENSORS_ASB100=m
5021 +CONFIG_SENSORS_ASPEED=m
5022 +CONFIG_SENSORS_ATXP1=m
5023 +CONFIG_SENSORS_CORSAIR_CPRO=m
5024 +CONFIG_SENSORS_CORSAIR_PSU=m
5025 +CONFIG_SENSORS_DRIVETEMP=m
5026 +CONFIG_SENSORS_DS620=m
5027 +CONFIG_SENSORS_DS1621=m
5028 +CONFIG_SENSORS_DELL_SMM=m
5029 +CONFIG_SENSORS_DA9052_ADC=m
5030 +CONFIG_SENSORS_DA9055=m
5031 +CONFIG_SENSORS_I5K_AMB=m
5032 +CONFIG_SENSORS_F71805F=m
5033 +CONFIG_SENSORS_F71882FG=m
5034 +CONFIG_SENSORS_F75375S=m
5035 +CONFIG_SENSORS_MC13783_ADC=m
5036 +CONFIG_SENSORS_FSCHMD=m
5037 +CONFIG_SENSORS_FTSTEUTATES=m
5038 +CONFIG_SENSORS_GL518SM=m
5039 +CONFIG_SENSORS_GL520SM=m
5040 +CONFIG_SENSORS_G760A=m
5041 +CONFIG_SENSORS_G762=m
5042 +CONFIG_SENSORS_HIH6130=m
5043 +CONFIG_SENSORS_IBMAEM=m
5044 +CONFIG_SENSORS_IBMPEX=m
5045 +CONFIG_SENSORS_IIO_HWMON=m
5046 +CONFIG_SENSORS_I5500=m
5047 +CONFIG_SENSORS_CORETEMP=m
5048 +CONFIG_SENSORS_IT87=m
5049 +CONFIG_SENSORS_JC42=m
5050 +CONFIG_SENSORS_POWR1220=m
5051 +CONFIG_SENSORS_LINEAGE=m
5052 +CONFIG_SENSORS_LTC2945=m
5053 +CONFIG_SENSORS_LTC2947=m
5054 +CONFIG_SENSORS_LTC2947_I2C=m
5055 +CONFIG_SENSORS_LTC2947_SPI=m
5056 +CONFIG_SENSORS_LTC2990=m
5057 +CONFIG_SENSORS_LTC2992=m
5058 +CONFIG_SENSORS_LTC4151=m
5059 +CONFIG_SENSORS_LTC4215=m
5060 +CONFIG_SENSORS_LTC4222=m
5061 +CONFIG_SENSORS_LTC4245=m
5062 +CONFIG_SENSORS_LTC4260=m
5063 +CONFIG_SENSORS_LTC4261=m
5064 +CONFIG_SENSORS_MAX1111=m
5065 +CONFIG_SENSORS_MAX127=m
5066 +CONFIG_SENSORS_MAX16065=m
5067 +CONFIG_SENSORS_MAX1619=m
5068 +CONFIG_SENSORS_MAX1668=m
5069 +CONFIG_SENSORS_MAX197=m
5070 +CONFIG_SENSORS_MAX31722=m
5071 +CONFIG_SENSORS_MAX31730=m
5072 +CONFIG_SENSORS_MAX6621=m
5073 +CONFIG_SENSORS_MAX6639=m
5074 +CONFIG_SENSORS_MAX6642=m
5075 +CONFIG_SENSORS_MAX6650=m
5076 +CONFIG_SENSORS_MAX6697=m
5077 +CONFIG_SENSORS_MAX31790=m
5078 +CONFIG_SENSORS_MCP3021=m
5079 +CONFIG_SENSORS_MLXREG_FAN=m
5080 +CONFIG_SENSORS_TC654=m
5081 +CONFIG_SENSORS_TPS23861=m
5082 +CONFIG_SENSORS_MENF21BMC_HWMON=m
5083 +CONFIG_SENSORS_MR75203=m
5084 +CONFIG_SENSORS_ADCXX=m
5085 +CONFIG_SENSORS_LM63=m
5086 +CONFIG_SENSORS_LM70=m
5087 +CONFIG_SENSORS_LM73=m
5088 +CONFIG_SENSORS_LM75=m
5089 +CONFIG_SENSORS_LM77=m
5090 +CONFIG_SENSORS_LM78=m
5091 +CONFIG_SENSORS_LM80=m
5092 +CONFIG_SENSORS_LM83=m
5093 +CONFIG_SENSORS_LM85=m
5094 +CONFIG_SENSORS_LM87=m
5095 +CONFIG_SENSORS_LM90=m
5096 +CONFIG_SENSORS_LM92=m
5097 +CONFIG_SENSORS_LM93=m
5098 +CONFIG_SENSORS_LM95234=m
5099 +CONFIG_SENSORS_LM95241=m
5100 +CONFIG_SENSORS_LM95245=m
5101 +CONFIG_SENSORS_PC87360=m
5102 +CONFIG_SENSORS_PC87427=m
5103 +CONFIG_SENSORS_NTC_THERMISTOR=m
5104 +CONFIG_SENSORS_NCT6683=m
5105 +CONFIG_SENSORS_NCT6775=m
5106 +CONFIG_SENSORS_NCT7802=m
5107 +CONFIG_SENSORS_NCT7904=m
5108 +CONFIG_SENSORS_NPCM7XX=m
5109 +CONFIG_SENSORS_PCF8591=m
5110 +CONFIG_PMBUS=m
5111 +CONFIG_SENSORS_PMBUS=m
5112 +CONFIG_SENSORS_ADM1266=m
5113 +CONFIG_SENSORS_ADM1275=m
5114 +CONFIG_SENSORS_BEL_PFE=m
5115 +CONFIG_SENSORS_IBM_CFFPS=m
5116 +CONFIG_SENSORS_INSPUR_IPSPS=m
5117 +CONFIG_SENSORS_IR35221=m
5118 +CONFIG_SENSORS_IR38064=m
5119 +CONFIG_SENSORS_IRPS5401=m
5120 +CONFIG_SENSORS_ISL68137=m
5121 +CONFIG_SENSORS_LM25066=m
5122 +CONFIG_SENSORS_LTC2978=m
5123 +CONFIG_SENSORS_LTC2978_REGULATOR=y
5124 +CONFIG_SENSORS_LTC3815=m
5125 +CONFIG_SENSORS_MAX16064=m
5126 +CONFIG_SENSORS_MAX16601=m
5127 +CONFIG_SENSORS_MAX20730=m
5128 +CONFIG_SENSORS_MAX20751=m
5129 +CONFIG_SENSORS_MAX31785=m
5130 +CONFIG_SENSORS_MAX34440=m
5131 +CONFIG_SENSORS_MAX8688=m
5132 +CONFIG_SENSORS_MP2975=m
5133 +CONFIG_SENSORS_PM6764TR=m
5134 +CONFIG_SENSORS_PXE1610=m
5135 +CONFIG_SENSORS_Q54SJ108A2=m
5136 +CONFIG_SENSORS_TPS40422=m
5137 +CONFIG_SENSORS_TPS53679=m
5138 +CONFIG_SENSORS_UCD9000=m
5139 +CONFIG_SENSORS_UCD9200=m
5140 +CONFIG_SENSORS_XDPE122=m
5141 +CONFIG_SENSORS_ZL6100=m
5142 +CONFIG_SENSORS_SBTSI=m
5143 +CONFIG_SENSORS_SHT15=m
5144 +CONFIG_SENSORS_SHT21=m
5145 +CONFIG_SENSORS_SHT3x=m
5146 +CONFIG_SENSORS_SHTC1=m
5147 +CONFIG_SENSORS_SIS5595=m
5148 +CONFIG_SENSORS_DME1737=m
5149 +CONFIG_SENSORS_EMC1403=m
5150 +CONFIG_SENSORS_EMC2103=m
5151 +CONFIG_SENSORS_EMC6W201=m
5152 +CONFIG_SENSORS_SMSC47M1=m
5153 +CONFIG_SENSORS_SMSC47M192=m
5154 +CONFIG_SENSORS_SMSC47B397=m
5155 +CONFIG_SENSORS_SCH56XX_COMMON=m
5156 +CONFIG_SENSORS_SCH5627=m
5157 +CONFIG_SENSORS_SCH5636=m
5158 +CONFIG_SENSORS_STTS751=m
5159 +CONFIG_SENSORS_SMM665=m
5160 +CONFIG_SENSORS_ADC128D818=m
5161 +CONFIG_SENSORS_ADS7828=m
5162 +CONFIG_SENSORS_ADS7871=m
5163 +CONFIG_SENSORS_AMC6821=m
5164 +CONFIG_SENSORS_INA209=m
5165 +CONFIG_SENSORS_INA2XX=m
5166 +CONFIG_SENSORS_INA3221=m
5167 +CONFIG_SENSORS_TC74=m
5168 +CONFIG_SENSORS_THMC50=m
5169 +CONFIG_SENSORS_TMP102=m
5170 +CONFIG_SENSORS_TMP103=m
5171 +CONFIG_SENSORS_TMP108=m
5172 +CONFIG_SENSORS_TMP401=m
5173 +CONFIG_SENSORS_TMP421=m
5174 +CONFIG_SENSORS_TMP513=m
5175 +CONFIG_SENSORS_VIA_CPUTEMP=m
5176 +CONFIG_SENSORS_VIA686A=m
5177 +CONFIG_SENSORS_VT1211=m
5178 +CONFIG_SENSORS_VT8231=m
5179 +CONFIG_SENSORS_W83773G=m
5180 +CONFIG_SENSORS_W83781D=m
5181 +CONFIG_SENSORS_W83791D=m
5182 +CONFIG_SENSORS_W83792D=m
5183 +CONFIG_SENSORS_W83793=m
5184 +CONFIG_SENSORS_W83795=m
5185 +# CONFIG_SENSORS_W83795_FANCTRL is not set
5186 +CONFIG_SENSORS_W83L785TS=m
5187 +CONFIG_SENSORS_W83L786NG=m
5188 +CONFIG_SENSORS_W83627HF=m
5189 +CONFIG_SENSORS_W83627EHF=m
5190 +CONFIG_SENSORS_WM831X=m
5191 +CONFIG_SENSORS_WM8350=m
5192 +CONFIG_SENSORS_XGENE=m
5193 +CONFIG_SENSORS_INTEL_M10_BMC_HWMON=m
5196 +# ACPI drivers
5198 +CONFIG_SENSORS_ACPI_POWER=m
5199 +CONFIG_SENSORS_ATK0110=m
5200 +CONFIG_THERMAL=y
5201 +CONFIG_THERMAL_NETLINK=y
5202 +CONFIG_THERMAL_STATISTICS=y
5203 +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0
5204 +CONFIG_THERMAL_HWMON=y
5205 +CONFIG_THERMAL_WRITABLE_TRIPS=y
5206 +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
5207 +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
5208 +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
5209 +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set
5210 +CONFIG_THERMAL_GOV_FAIR_SHARE=y
5211 +CONFIG_THERMAL_GOV_STEP_WISE=y
5212 +CONFIG_THERMAL_GOV_BANG_BANG=y
5213 +CONFIG_THERMAL_GOV_USER_SPACE=y
5214 +CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
5215 +CONFIG_DEVFREQ_THERMAL=y
5216 +CONFIG_THERMAL_EMULATION=y
5219 +# Intel thermal drivers
5221 +CONFIG_INTEL_POWERCLAMP=m
5222 +CONFIG_X86_THERMAL_VECTOR=y
5223 +CONFIG_X86_PKG_TEMP_THERMAL=m
5224 +CONFIG_INTEL_SOC_DTS_IOSF_CORE=m
5225 +CONFIG_INTEL_SOC_DTS_THERMAL=m
5228 +# ACPI INT340X thermal drivers
5230 +CONFIG_INT340X_THERMAL=m
5231 +CONFIG_ACPI_THERMAL_REL=m
5232 +CONFIG_INT3406_THERMAL=m
5233 +CONFIG_PROC_THERMAL_MMIO_RAPL=m
5234 +# end of ACPI INT340X thermal drivers
5236 +CONFIG_INTEL_BXT_PMIC_THERMAL=m
5237 +CONFIG_INTEL_PCH_THERMAL=m
5238 +# end of Intel thermal drivers
5240 +CONFIG_GENERIC_ADC_THERMAL=m
5241 +CONFIG_WATCHDOG=y
5242 +CONFIG_WATCHDOG_CORE=y
5243 +# CONFIG_WATCHDOG_NOWAYOUT is not set
5244 +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y
5245 +CONFIG_WATCHDOG_OPEN_TIMEOUT=0
5246 +CONFIG_WATCHDOG_SYSFS=y
5249 +# Watchdog Pretimeout Governors
5251 +CONFIG_WATCHDOG_PRETIMEOUT_GOV=y
5252 +CONFIG_WATCHDOG_PRETIMEOUT_GOV_SEL=m
5253 +CONFIG_WATCHDOG_PRETIMEOUT_GOV_NOOP=y
5254 +CONFIG_WATCHDOG_PRETIMEOUT_GOV_PANIC=m
5255 +CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP=y
5256 +# CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC is not set
5259 +# Watchdog Device Drivers
5261 +CONFIG_SOFT_WATCHDOG=m
5262 +CONFIG_SOFT_WATCHDOG_PRETIMEOUT=y
5263 +CONFIG_DA9052_WATCHDOG=m
5264 +CONFIG_DA9055_WATCHDOG=m
5265 +CONFIG_DA9063_WATCHDOG=m
5266 +CONFIG_DA9062_WATCHDOG=m
5267 +CONFIG_MENF21BMC_WATCHDOG=m
5268 +CONFIG_MENZ069_WATCHDOG=m
5269 +CONFIG_WDAT_WDT=m
5270 +CONFIG_WM831X_WATCHDOG=m
5271 +CONFIG_WM8350_WATCHDOG=m
5272 +CONFIG_XILINX_WATCHDOG=m
5273 +CONFIG_ZIIRAVE_WATCHDOG=m
5274 +CONFIG_RAVE_SP_WATCHDOG=m
5275 +CONFIG_MLX_WDT=m
5276 +CONFIG_CADENCE_WATCHDOG=m
5277 +CONFIG_DW_WATCHDOG=m
5278 +CONFIG_TWL4030_WATCHDOG=m
5279 +CONFIG_MAX63XX_WATCHDOG=m
5280 +CONFIG_RETU_WATCHDOG=m
5281 +CONFIG_ACQUIRE_WDT=m
5282 +CONFIG_ADVANTECH_WDT=m
5283 +CONFIG_ALIM1535_WDT=m
5284 +CONFIG_ALIM7101_WDT=m
5285 +CONFIG_EBC_C384_WDT=m
5286 +CONFIG_F71808E_WDT=m
5287 +CONFIG_SP5100_TCO=m
5288 +CONFIG_SBC_FITPC2_WATCHDOG=m
5289 +CONFIG_EUROTECH_WDT=m
5290 +CONFIG_IB700_WDT=m
5291 +CONFIG_IBMASR=m
5292 +CONFIG_WAFER_WDT=m
5293 +CONFIG_I6300ESB_WDT=m
5294 +CONFIG_IE6XX_WDT=m
5295 +CONFIG_ITCO_WDT=m
5296 +CONFIG_ITCO_VENDOR_SUPPORT=y
5297 +CONFIG_IT8712F_WDT=m
5298 +CONFIG_IT87_WDT=m
5299 +CONFIG_HP_WATCHDOG=m
5300 +CONFIG_HPWDT_NMI_DECODING=y
5301 +CONFIG_KEMPLD_WDT=m
5302 +CONFIG_SC1200_WDT=m
5303 +CONFIG_PC87413_WDT=m
5304 +CONFIG_NV_TCO=m
5305 +CONFIG_60XX_WDT=m
5306 +CONFIG_CPU5_WDT=m
5307 +CONFIG_SMSC_SCH311X_WDT=m
5308 +CONFIG_SMSC37B787_WDT=m
5309 +CONFIG_TQMX86_WDT=m
5310 +CONFIG_VIA_WDT=m
5311 +CONFIG_W83627HF_WDT=m
5312 +CONFIG_W83877F_WDT=m
5313 +CONFIG_W83977F_WDT=m
5314 +CONFIG_MACHZ_WDT=m
5315 +CONFIG_SBC_EPX_C3_WATCHDOG=m
5316 +CONFIG_INTEL_MEI_WDT=m
5317 +CONFIG_NI903X_WDT=m
5318 +CONFIG_NIC7018_WDT=m
5319 +CONFIG_MEN_A21_WDT=m
5320 +CONFIG_XEN_WDT=m
5323 +# PCI-based Watchdog Cards
5325 +CONFIG_PCIPCWATCHDOG=m
5326 +CONFIG_WDTPCI=m
5329 +# USB-based Watchdog Cards
5331 +CONFIG_USBPCWATCHDOG=m
5332 +CONFIG_SSB_POSSIBLE=y
5333 +CONFIG_SSB=m
5334 +CONFIG_SSB_SPROM=y
5335 +CONFIG_SSB_BLOCKIO=y
5336 +CONFIG_SSB_PCIHOST_POSSIBLE=y
5337 +CONFIG_SSB_PCIHOST=y
5338 +CONFIG_SSB_B43_PCI_BRIDGE=y
5339 +CONFIG_SSB_PCMCIAHOST_POSSIBLE=y
5340 +# CONFIG_SSB_PCMCIAHOST is not set
5341 +CONFIG_SSB_SDIOHOST_POSSIBLE=y
5342 +CONFIG_SSB_SDIOHOST=y
5343 +CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
5344 +CONFIG_SSB_DRIVER_PCICORE=y
5345 +CONFIG_SSB_DRIVER_GPIO=y
5346 +CONFIG_BCMA_POSSIBLE=y
5347 +CONFIG_BCMA=m
5348 +CONFIG_BCMA_BLOCKIO=y
5349 +CONFIG_BCMA_HOST_PCI_POSSIBLE=y
5350 +CONFIG_BCMA_HOST_PCI=y
5351 +CONFIG_BCMA_HOST_SOC=y
5352 +CONFIG_BCMA_DRIVER_PCI=y
5353 +CONFIG_BCMA_SFLASH=y
5354 +CONFIG_BCMA_DRIVER_GMAC_CMN=y
5355 +CONFIG_BCMA_DRIVER_GPIO=y
5356 +# CONFIG_BCMA_DEBUG is not set
5359 +# Multifunction device drivers
5361 +CONFIG_MFD_CORE=y
5362 +CONFIG_MFD_AS3711=y
5363 +CONFIG_PMIC_ADP5520=y
5364 +CONFIG_MFD_AAT2870_CORE=y
5365 +CONFIG_MFD_BCM590XX=m
5366 +CONFIG_MFD_BD9571MWV=m
5367 +CONFIG_MFD_AXP20X=m
5368 +CONFIG_MFD_AXP20X_I2C=m
5369 +CONFIG_MFD_CROS_EC_DEV=m
5370 +CONFIG_MFD_MADERA=m
5371 +CONFIG_MFD_MADERA_I2C=m
5372 +CONFIG_MFD_MADERA_SPI=m
5373 +CONFIG_MFD_CS47L15=y
5374 +CONFIG_MFD_CS47L35=y
5375 +CONFIG_MFD_CS47L85=y
5376 +CONFIG_MFD_CS47L90=y
5377 +CONFIG_MFD_CS47L92=y
5378 +CONFIG_PMIC_DA903X=y
5379 +CONFIG_PMIC_DA9052=y
5380 +CONFIG_MFD_DA9052_SPI=y
5381 +CONFIG_MFD_DA9052_I2C=y
5382 +CONFIG_MFD_DA9055=y
5383 +CONFIG_MFD_DA9062=m
5384 +CONFIG_MFD_DA9063=y
5385 +CONFIG_MFD_DA9150=m
5386 +CONFIG_MFD_DLN2=m
5387 +CONFIG_MFD_MC13XXX=m
5388 +CONFIG_MFD_MC13XXX_SPI=m
5389 +CONFIG_MFD_MC13XXX_I2C=m
5390 +CONFIG_MFD_MP2629=m
5391 +CONFIG_HTC_PASIC3=m
5392 +CONFIG_HTC_I2CPLD=y
5393 +CONFIG_MFD_INTEL_QUARK_I2C_GPIO=m
5394 +CONFIG_LPC_ICH=m
5395 +CONFIG_LPC_SCH=m
5396 +CONFIG_INTEL_SOC_PMIC=y
5397 +CONFIG_INTEL_SOC_PMIC_BXTWC=m
5398 +CONFIG_INTEL_SOC_PMIC_CHTWC=y
5399 +CONFIG_INTEL_SOC_PMIC_CHTDC_TI=m
5400 +CONFIG_INTEL_SOC_PMIC_MRFLD=m
5401 +CONFIG_MFD_INTEL_LPSS=m
5402 +CONFIG_MFD_INTEL_LPSS_ACPI=m
5403 +CONFIG_MFD_INTEL_LPSS_PCI=m
5404 +CONFIG_MFD_INTEL_PMC_BXT=m
5405 +CONFIG_MFD_INTEL_PMT=m
5406 +CONFIG_MFD_IQS62X=m
5407 +CONFIG_MFD_JANZ_CMODIO=m
5408 +CONFIG_MFD_KEMPLD=m
5409 +CONFIG_MFD_88PM800=m
5410 +CONFIG_MFD_88PM805=m
5411 +CONFIG_MFD_88PM860X=y
5412 +CONFIG_MFD_MAX14577=y
5413 +CONFIG_MFD_MAX77693=y
5414 +CONFIG_MFD_MAX77843=y
5415 +CONFIG_MFD_MAX8907=m
5416 +CONFIG_MFD_MAX8925=y
5417 +CONFIG_MFD_MAX8997=y
5418 +CONFIG_MFD_MAX8998=y
5419 +CONFIG_MFD_MT6360=m
5420 +CONFIG_MFD_MT6397=m
5421 +CONFIG_MFD_MENF21BMC=m
5422 +CONFIG_EZX_PCAP=y
5423 +CONFIG_MFD_VIPERBOARD=m
5424 +CONFIG_MFD_RETU=m
5425 +CONFIG_MFD_PCF50633=m
5426 +CONFIG_PCF50633_ADC=m
5427 +CONFIG_PCF50633_GPIO=m
5428 +CONFIG_UCB1400_CORE=m
5429 +CONFIG_MFD_RDC321X=m
5430 +CONFIG_MFD_RT5033=m
5431 +CONFIG_MFD_RC5T583=y
5432 +CONFIG_MFD_SEC_CORE=y
5433 +CONFIG_MFD_SI476X_CORE=m
5434 +CONFIG_MFD_SM501=m
5435 +CONFIG_MFD_SM501_GPIO=y
5436 +CONFIG_MFD_SKY81452=m
5437 +CONFIG_ABX500_CORE=y
5438 +CONFIG_AB3100_CORE=y
5439 +CONFIG_AB3100_OTP=m
5440 +CONFIG_MFD_SYSCON=y
5441 +CONFIG_MFD_TI_AM335X_TSCADC=m
5442 +CONFIG_MFD_LP3943=m
5443 +CONFIG_MFD_LP8788=y
5444 +CONFIG_MFD_TI_LMU=m
5445 +CONFIG_MFD_PALMAS=y
5446 +CONFIG_TPS6105X=m
5447 +CONFIG_TPS65010=m
5448 +CONFIG_TPS6507X=m
5449 +CONFIG_MFD_TPS65086=m
5450 +CONFIG_MFD_TPS65090=y
5451 +CONFIG_MFD_TPS68470=y
5452 +CONFIG_MFD_TI_LP873X=m
5453 +CONFIG_MFD_TPS6586X=y
5454 +CONFIG_MFD_TPS65910=y
5455 +CONFIG_MFD_TPS65912=y
5456 +CONFIG_MFD_TPS65912_I2C=y
5457 +CONFIG_MFD_TPS65912_SPI=y
5458 +CONFIG_MFD_TPS80031=y
5459 +CONFIG_TWL4030_CORE=y
5460 +CONFIG_MFD_TWL4030_AUDIO=y
5461 +CONFIG_TWL6040_CORE=y
5462 +CONFIG_MFD_WL1273_CORE=m
5463 +CONFIG_MFD_LM3533=m
5464 +CONFIG_MFD_TQMX86=m
5465 +CONFIG_MFD_VX855=m
5466 +CONFIG_MFD_ARIZONA=y
5467 +CONFIG_MFD_ARIZONA_I2C=m
5468 +CONFIG_MFD_ARIZONA_SPI=m
5469 +CONFIG_MFD_CS47L24=y
5470 +CONFIG_MFD_WM5102=y
5471 +CONFIG_MFD_WM5110=y
5472 +CONFIG_MFD_WM8997=y
5473 +CONFIG_MFD_WM8998=y
5474 +CONFIG_MFD_WM8400=y
5475 +CONFIG_MFD_WM831X=y
5476 +CONFIG_MFD_WM831X_I2C=y
5477 +CONFIG_MFD_WM831X_SPI=y
5478 +CONFIG_MFD_WM8350=y
5479 +CONFIG_MFD_WM8350_I2C=y
5480 +CONFIG_MFD_WM8994=m
5481 +CONFIG_MFD_WCD934X=m
5482 +CONFIG_RAVE_SP_CORE=m
5483 +CONFIG_MFD_INTEL_M10_BMC=m
5484 +# end of Multifunction device drivers
5486 +CONFIG_REGULATOR=y
5487 +# CONFIG_REGULATOR_DEBUG is not set
5488 +CONFIG_REGULATOR_FIXED_VOLTAGE=m
5489 +CONFIG_REGULATOR_VIRTUAL_CONSUMER=m
5490 +CONFIG_REGULATOR_USERSPACE_CONSUMER=m
5491 +CONFIG_REGULATOR_88PG86X=m
5492 +CONFIG_REGULATOR_88PM800=m
5493 +CONFIG_REGULATOR_88PM8607=m
5494 +CONFIG_REGULATOR_ACT8865=m
5495 +CONFIG_REGULATOR_AD5398=m
5496 +CONFIG_REGULATOR_AAT2870=m
5497 +CONFIG_REGULATOR_ARIZONA_LDO1=m
5498 +CONFIG_REGULATOR_ARIZONA_MICSUPP=m
5499 +CONFIG_REGULATOR_AS3711=m
5500 +CONFIG_REGULATOR_AXP20X=m
5501 +CONFIG_REGULATOR_BCM590XX=m
5502 +CONFIG_REGULATOR_BD9571MWV=m
5503 +CONFIG_REGULATOR_DA903X=m
5504 +CONFIG_REGULATOR_DA9052=m
5505 +CONFIG_REGULATOR_DA9055=m
5506 +CONFIG_REGULATOR_DA9062=m
5507 +CONFIG_REGULATOR_DA9210=m
5508 +CONFIG_REGULATOR_DA9211=m
5509 +CONFIG_REGULATOR_FAN53555=m
5510 +CONFIG_REGULATOR_GPIO=m
5511 +CONFIG_REGULATOR_ISL9305=m
5512 +CONFIG_REGULATOR_ISL6271A=m
5513 +CONFIG_REGULATOR_LM363X=m
5514 +CONFIG_REGULATOR_LP3971=m
5515 +CONFIG_REGULATOR_LP3972=m
5516 +CONFIG_REGULATOR_LP872X=m
5517 +CONFIG_REGULATOR_LP8755=m
5518 +CONFIG_REGULATOR_LP8788=m
5519 +CONFIG_REGULATOR_LTC3589=m
5520 +CONFIG_REGULATOR_LTC3676=m
5521 +CONFIG_REGULATOR_MAX14577=m
5522 +CONFIG_REGULATOR_MAX1586=m
5523 +CONFIG_REGULATOR_MAX8649=m
5524 +CONFIG_REGULATOR_MAX8660=m
5525 +CONFIG_REGULATOR_MAX8907=m
5526 +CONFIG_REGULATOR_MAX8925=m
5527 +CONFIG_REGULATOR_MAX8952=m
5528 +CONFIG_REGULATOR_MAX8997=m
5529 +CONFIG_REGULATOR_MAX8998=m
5530 +CONFIG_REGULATOR_MAX77693=m
5531 +CONFIG_REGULATOR_MAX77826=m
5532 +CONFIG_REGULATOR_MC13XXX_CORE=m
5533 +CONFIG_REGULATOR_MC13783=m
5534 +CONFIG_REGULATOR_MC13892=m
5535 +CONFIG_REGULATOR_MP8859=m
5536 +CONFIG_REGULATOR_MT6311=m
5537 +CONFIG_REGULATOR_MT6315=m
5538 +CONFIG_REGULATOR_MT6323=m
5539 +CONFIG_REGULATOR_MT6358=m
5540 +CONFIG_REGULATOR_MT6360=m
5541 +CONFIG_REGULATOR_MT6397=m
5542 +CONFIG_REGULATOR_PALMAS=m
5543 +CONFIG_REGULATOR_PCA9450=m
5544 +CONFIG_REGULATOR_PCAP=m
5545 +CONFIG_REGULATOR_PCF50633=m
5546 +CONFIG_REGULATOR_PV88060=m
5547 +CONFIG_REGULATOR_PV88080=m
5548 +CONFIG_REGULATOR_PV88090=m
5549 +CONFIG_REGULATOR_PWM=m
5550 +CONFIG_REGULATOR_QCOM_SPMI=m
5551 +CONFIG_REGULATOR_QCOM_USB_VBUS=m
5552 +CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY=m
5553 +CONFIG_REGULATOR_RC5T583=m
5554 +CONFIG_REGULATOR_RT4801=m
5555 +CONFIG_REGULATOR_RT5033=m
5556 +CONFIG_REGULATOR_RTMV20=m
5557 +CONFIG_REGULATOR_S2MPA01=m
5558 +CONFIG_REGULATOR_S2MPS11=m
5559 +CONFIG_REGULATOR_S5M8767=m
5560 +CONFIG_REGULATOR_SKY81452=m
5561 +CONFIG_REGULATOR_SLG51000=m
5562 +CONFIG_REGULATOR_TPS51632=m
5563 +CONFIG_REGULATOR_TPS6105X=m
5564 +CONFIG_REGULATOR_TPS62360=m
5565 +CONFIG_REGULATOR_TPS65023=m
5566 +CONFIG_REGULATOR_TPS6507X=m
5567 +CONFIG_REGULATOR_TPS65086=m
5568 +CONFIG_REGULATOR_TPS65090=m
5569 +CONFIG_REGULATOR_TPS65132=m
5570 +CONFIG_REGULATOR_TPS6524X=m
5571 +CONFIG_REGULATOR_TPS6586X=m
5572 +CONFIG_REGULATOR_TPS65910=m
5573 +CONFIG_REGULATOR_TPS65912=m
5574 +CONFIG_REGULATOR_TPS80031=m
5575 +CONFIG_REGULATOR_TWL4030=m
5576 +CONFIG_REGULATOR_WM831X=m
5577 +CONFIG_REGULATOR_WM8350=m
5578 +CONFIG_REGULATOR_WM8400=m
5579 +CONFIG_REGULATOR_WM8994=m
5580 +CONFIG_REGULATOR_QCOM_LABIBB=m
5581 +CONFIG_RC_CORE=m
5582 +CONFIG_RC_MAP=m
5583 +CONFIG_LIRC=y
5584 +CONFIG_RC_DECODERS=y
5585 +CONFIG_IR_NEC_DECODER=m
5586 +CONFIG_IR_RC5_DECODER=m
5587 +CONFIG_IR_RC6_DECODER=m
5588 +CONFIG_IR_JVC_DECODER=m
5589 +CONFIG_IR_SONY_DECODER=m
5590 +CONFIG_IR_SANYO_DECODER=m
5591 +CONFIG_IR_SHARP_DECODER=m
5592 +CONFIG_IR_MCE_KBD_DECODER=m
5593 +CONFIG_IR_XMP_DECODER=m
5594 +CONFIG_IR_IMON_DECODER=m
5595 +CONFIG_IR_RCMM_DECODER=m
5596 +CONFIG_RC_DEVICES=y
5597 +CONFIG_RC_ATI_REMOTE=m
5598 +CONFIG_IR_ENE=m
5599 +CONFIG_IR_IMON=m
5600 +CONFIG_IR_IMON_RAW=m
5601 +CONFIG_IR_MCEUSB=m
5602 +CONFIG_IR_ITE_CIR=m
5603 +CONFIG_IR_FINTEK=m
5604 +CONFIG_IR_NUVOTON=m
5605 +CONFIG_IR_REDRAT3=m
5606 +CONFIG_IR_STREAMZAP=m
5607 +CONFIG_IR_WINBOND_CIR=m
5608 +CONFIG_IR_IGORPLUGUSB=m
5609 +CONFIG_IR_IGUANA=m
5610 +CONFIG_IR_TTUSBIR=m
5611 +CONFIG_RC_LOOPBACK=m
5612 +CONFIG_IR_SERIAL=m
5613 +CONFIG_IR_SERIAL_TRANSMITTER=y
5614 +CONFIG_IR_SIR=m
5615 +CONFIG_RC_XBOX_DVD=m
5616 +CONFIG_IR_TOY=m
5617 +CONFIG_CEC_CORE=m
5618 +CONFIG_CEC_NOTIFIER=y
5619 +CONFIG_CEC_PIN=y
5620 +CONFIG_MEDIA_CEC_RC=y
5621 +# CONFIG_CEC_PIN_ERROR_INJ is not set
5622 +CONFIG_MEDIA_CEC_SUPPORT=y
5623 +CONFIG_CEC_CH7322=m
5624 +CONFIG_CEC_CROS_EC=m
5625 +CONFIG_CEC_GPIO=m
5626 +CONFIG_CEC_SECO=m
5627 +CONFIG_CEC_SECO_RC=y
5628 +CONFIG_USB_PULSE8_CEC=m
5629 +CONFIG_USB_RAINSHADOW_CEC=m
5630 +CONFIG_MEDIA_SUPPORT=m
5631 +CONFIG_MEDIA_SUPPORT_FILTER=y
5632 +CONFIG_MEDIA_SUBDRV_AUTOSELECT=y
5635 +# Media device types
5637 +CONFIG_MEDIA_CAMERA_SUPPORT=y
5638 +CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
5639 +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
5640 +CONFIG_MEDIA_RADIO_SUPPORT=y
5641 +CONFIG_MEDIA_SDR_SUPPORT=y
5642 +CONFIG_MEDIA_PLATFORM_SUPPORT=y
5643 +CONFIG_MEDIA_TEST_SUPPORT=y
5644 +# end of Media device types
5646 +CONFIG_VIDEO_DEV=m
5647 +CONFIG_MEDIA_CONTROLLER=y
5648 +CONFIG_DVB_CORE=m
5651 +# Video4Linux options
5653 +CONFIG_VIDEO_V4L2=m
5654 +CONFIG_VIDEO_V4L2_I2C=y
5655 +CONFIG_VIDEO_V4L2_SUBDEV_API=y
5656 +# CONFIG_VIDEO_ADV_DEBUG is not set
5657 +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
5658 +CONFIG_VIDEO_TUNER=m
5659 +CONFIG_V4L2_MEM2MEM_DEV=m
5660 +CONFIG_V4L2_FLASH_LED_CLASS=m
5661 +CONFIG_V4L2_FWNODE=m
5662 +CONFIG_VIDEOBUF_GEN=m
5663 +CONFIG_VIDEOBUF_DMA_SG=m
5664 +CONFIG_VIDEOBUF_VMALLOC=m
5665 +# end of Video4Linux options
5668 +# Media controller options
5670 +CONFIG_MEDIA_CONTROLLER_DVB=y
5671 +CONFIG_MEDIA_CONTROLLER_REQUEST_API=y
5674 +# Please notice that the enabled Media controller Request API is EXPERIMENTAL
5676 +# end of Media controller options
5679 +# Digital TV options
5681 +# CONFIG_DVB_MMAP is not set
5682 +CONFIG_DVB_NET=y
5683 +CONFIG_DVB_MAX_ADAPTERS=8
5684 +CONFIG_DVB_DYNAMIC_MINORS=y
5685 +# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set
5686 +# CONFIG_DVB_ULE_DEBUG is not set
5687 +# end of Digital TV options
5690 +# Media drivers
5694 +# Drivers filtered as selected at 'Filter media drivers'
5696 +CONFIG_TTPCI_EEPROM=m
5697 +CONFIG_MEDIA_USB_SUPPORT=y
5700 +# Webcam devices
5702 +CONFIG_USB_VIDEO_CLASS=m
5703 +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
5704 +CONFIG_USB_GSPCA=m
5705 +CONFIG_USB_M5602=m
5706 +CONFIG_USB_STV06XX=m
5707 +CONFIG_USB_GL860=m
5708 +CONFIG_USB_GSPCA_BENQ=m
5709 +CONFIG_USB_GSPCA_CONEX=m
5710 +CONFIG_USB_GSPCA_CPIA1=m
5711 +CONFIG_USB_GSPCA_DTCS033=m
5712 +CONFIG_USB_GSPCA_ETOMS=m
5713 +CONFIG_USB_GSPCA_FINEPIX=m
5714 +CONFIG_USB_GSPCA_JEILINJ=m
5715 +CONFIG_USB_GSPCA_JL2005BCD=m
5716 +CONFIG_USB_GSPCA_KINECT=m
5717 +CONFIG_USB_GSPCA_KONICA=m
5718 +CONFIG_USB_GSPCA_MARS=m
5719 +CONFIG_USB_GSPCA_MR97310A=m
5720 +CONFIG_USB_GSPCA_NW80X=m
5721 +CONFIG_USB_GSPCA_OV519=m
5722 +CONFIG_USB_GSPCA_OV534=m
5723 +CONFIG_USB_GSPCA_OV534_9=m
5724 +CONFIG_USB_GSPCA_PAC207=m
5725 +CONFIG_USB_GSPCA_PAC7302=m
5726 +CONFIG_USB_GSPCA_PAC7311=m
5727 +CONFIG_USB_GSPCA_SE401=m
5728 +CONFIG_USB_GSPCA_SN9C2028=m
5729 +CONFIG_USB_GSPCA_SN9C20X=m
5730 +CONFIG_USB_GSPCA_SONIXB=m
5731 +CONFIG_USB_GSPCA_SONIXJ=m
5732 +CONFIG_USB_GSPCA_SPCA500=m
5733 +CONFIG_USB_GSPCA_SPCA501=m
5734 +CONFIG_USB_GSPCA_SPCA505=m
5735 +CONFIG_USB_GSPCA_SPCA506=m
5736 +CONFIG_USB_GSPCA_SPCA508=m
5737 +CONFIG_USB_GSPCA_SPCA561=m
5738 +CONFIG_USB_GSPCA_SPCA1528=m
5739 +CONFIG_USB_GSPCA_SQ905=m
5740 +CONFIG_USB_GSPCA_SQ905C=m
5741 +CONFIG_USB_GSPCA_SQ930X=m
5742 +CONFIG_USB_GSPCA_STK014=m
5743 +CONFIG_USB_GSPCA_STK1135=m
5744 +CONFIG_USB_GSPCA_STV0680=m
5745 +CONFIG_USB_GSPCA_SUNPLUS=m
5746 +CONFIG_USB_GSPCA_T613=m
5747 +CONFIG_USB_GSPCA_TOPRO=m
5748 +CONFIG_USB_GSPCA_TOUPTEK=m
5749 +CONFIG_USB_GSPCA_TV8532=m
5750 +CONFIG_USB_GSPCA_VC032X=m
5751 +CONFIG_USB_GSPCA_VICAM=m
5752 +CONFIG_USB_GSPCA_XIRLINK_CIT=m
5753 +CONFIG_USB_GSPCA_ZC3XX=m
5754 +CONFIG_USB_PWC=m
5755 +# CONFIG_USB_PWC_DEBUG is not set
5756 +CONFIG_USB_PWC_INPUT_EVDEV=y
5757 +CONFIG_VIDEO_CPIA2=m
5758 +CONFIG_USB_ZR364XX=m
5759 +CONFIG_USB_STKWEBCAM=m
5760 +CONFIG_USB_S2255=m
5761 +CONFIG_VIDEO_USBTV=m
5764 +# Analog TV USB devices
5766 +CONFIG_VIDEO_PVRUSB2=m
5767 +CONFIG_VIDEO_PVRUSB2_SYSFS=y
5768 +CONFIG_VIDEO_PVRUSB2_DVB=y
5769 +# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
5770 +CONFIG_VIDEO_HDPVR=m
5771 +CONFIG_VIDEO_STK1160_COMMON=m
5772 +CONFIG_VIDEO_STK1160=m
5773 +CONFIG_VIDEO_GO7007=m
5774 +CONFIG_VIDEO_GO7007_USB=m
5775 +CONFIG_VIDEO_GO7007_LOADER=m
5776 +CONFIG_VIDEO_GO7007_USB_S2250_BOARD=m
5779 +# Analog/digital TV USB devices
5781 +CONFIG_VIDEO_AU0828=m
5782 +CONFIG_VIDEO_AU0828_V4L2=y
5783 +CONFIG_VIDEO_AU0828_RC=y
5784 +CONFIG_VIDEO_CX231XX=m
5785 +CONFIG_VIDEO_CX231XX_RC=y
5786 +CONFIG_VIDEO_CX231XX_ALSA=m
5787 +CONFIG_VIDEO_CX231XX_DVB=m
5788 +CONFIG_VIDEO_TM6000=m
5789 +CONFIG_VIDEO_TM6000_ALSA=m
5790 +CONFIG_VIDEO_TM6000_DVB=m
5793 +# Digital TV USB devices
5795 +CONFIG_DVB_USB=m
5796 +# CONFIG_DVB_USB_DEBUG is not set
5797 +CONFIG_DVB_USB_DIB3000MC=m
5798 +CONFIG_DVB_USB_A800=m
5799 +CONFIG_DVB_USB_DIBUSB_MB=m
5800 +# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set
5801 +CONFIG_DVB_USB_DIBUSB_MC=m
5802 +CONFIG_DVB_USB_DIB0700=m
5803 +CONFIG_DVB_USB_UMT_010=m
5804 +CONFIG_DVB_USB_CXUSB=m
5805 +CONFIG_DVB_USB_CXUSB_ANALOG=y
5806 +CONFIG_DVB_USB_M920X=m
5807 +CONFIG_DVB_USB_DIGITV=m
5808 +CONFIG_DVB_USB_VP7045=m
5809 +CONFIG_DVB_USB_VP702X=m
5810 +CONFIG_DVB_USB_GP8PSK=m
5811 +CONFIG_DVB_USB_NOVA_T_USB2=m
5812 +CONFIG_DVB_USB_TTUSB2=m
5813 +CONFIG_DVB_USB_DTT200U=m
5814 +CONFIG_DVB_USB_OPERA1=m
5815 +CONFIG_DVB_USB_AF9005=m
5816 +CONFIG_DVB_USB_AF9005_REMOTE=m
5817 +CONFIG_DVB_USB_PCTV452E=m
5818 +CONFIG_DVB_USB_DW2102=m
5819 +CONFIG_DVB_USB_CINERGY_T2=m
5820 +CONFIG_DVB_USB_DTV5100=m
5821 +CONFIG_DVB_USB_AZ6027=m
5822 +CONFIG_DVB_USB_TECHNISAT_USB2=m
5823 +CONFIG_DVB_USB_V2=m
5824 +CONFIG_DVB_USB_AF9015=m
5825 +CONFIG_DVB_USB_AF9035=m
5826 +CONFIG_DVB_USB_ANYSEE=m
5827 +CONFIG_DVB_USB_AU6610=m
5828 +CONFIG_DVB_USB_AZ6007=m
5829 +CONFIG_DVB_USB_CE6230=m
5830 +CONFIG_DVB_USB_EC168=m
5831 +CONFIG_DVB_USB_GL861=m
5832 +CONFIG_DVB_USB_LME2510=m
5833 +CONFIG_DVB_USB_MXL111SF=m
5834 +CONFIG_DVB_USB_RTL28XXU=m
5835 +CONFIG_DVB_USB_DVBSKY=m
5836 +CONFIG_DVB_USB_ZD1301=m
5837 +CONFIG_DVB_TTUSB_BUDGET=m
5838 +CONFIG_DVB_TTUSB_DEC=m
5839 +CONFIG_SMS_USB_DRV=m
5840 +CONFIG_DVB_B2C2_FLEXCOP_USB=m
5841 +# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set
5842 +CONFIG_DVB_AS102=m
5845 +# Webcam, TV (analog/digital) USB devices
5847 +CONFIG_VIDEO_EM28XX=m
5848 +CONFIG_VIDEO_EM28XX_V4L2=m
5849 +CONFIG_VIDEO_EM28XX_ALSA=m
5850 +CONFIG_VIDEO_EM28XX_DVB=m
5851 +CONFIG_VIDEO_EM28XX_RC=m
5854 +# Software defined radio USB devices
5856 +CONFIG_USB_AIRSPY=m
5857 +CONFIG_USB_HACKRF=m
5858 +CONFIG_USB_MSI2500=m
5859 +CONFIG_MEDIA_PCI_SUPPORT=y
5862 +# Media capture support
5864 +CONFIG_VIDEO_MEYE=m
5865 +CONFIG_VIDEO_SOLO6X10=m
5866 +CONFIG_VIDEO_TW5864=m
5867 +CONFIG_VIDEO_TW68=m
5868 +CONFIG_VIDEO_TW686X=m
5871 +# Media capture/analog TV support
5873 +CONFIG_VIDEO_IVTV=m
5874 +# CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS is not set
5875 +CONFIG_VIDEO_IVTV_ALSA=m
5876 +CONFIG_VIDEO_FB_IVTV=m
5877 +CONFIG_VIDEO_FB_IVTV_FORCE_PAT=y
5878 +CONFIG_VIDEO_HEXIUM_GEMINI=m
5879 +CONFIG_VIDEO_HEXIUM_ORION=m
5880 +CONFIG_VIDEO_MXB=m
5881 +CONFIG_VIDEO_DT3155=m
5884 +# Media capture/analog/hybrid TV support
5886 +CONFIG_VIDEO_CX18=m
5887 +CONFIG_VIDEO_CX18_ALSA=m
5888 +CONFIG_VIDEO_CX23885=m
5889 +CONFIG_MEDIA_ALTERA_CI=m
5890 +CONFIG_VIDEO_CX25821=m
5891 +CONFIG_VIDEO_CX25821_ALSA=m
5892 +CONFIG_VIDEO_CX88=m
5893 +CONFIG_VIDEO_CX88_ALSA=m
5894 +CONFIG_VIDEO_CX88_BLACKBIRD=m
5895 +CONFIG_VIDEO_CX88_DVB=m
5896 +CONFIG_VIDEO_CX88_ENABLE_VP3054=y
5897 +CONFIG_VIDEO_CX88_VP3054=m
5898 +CONFIG_VIDEO_CX88_MPEG=m
5899 +CONFIG_VIDEO_BT848=m
5900 +CONFIG_DVB_BT8XX=m
5901 +CONFIG_VIDEO_SAA7134=m
5902 +CONFIG_VIDEO_SAA7134_ALSA=m
5903 +CONFIG_VIDEO_SAA7134_RC=y
5904 +CONFIG_VIDEO_SAA7134_DVB=m
5905 +CONFIG_VIDEO_SAA7134_GO7007=m
5906 +CONFIG_VIDEO_SAA7164=m
5907 +CONFIG_VIDEO_COBALT=m
5910 +# Media digital TV PCI Adapters
5912 +CONFIG_DVB_AV7110_IR=y
5913 +CONFIG_DVB_AV7110=m
5914 +CONFIG_DVB_AV7110_OSD=y
5915 +CONFIG_DVB_BUDGET_CORE=m
5916 +CONFIG_DVB_BUDGET=m
5917 +CONFIG_DVB_BUDGET_CI=m
5918 +CONFIG_DVB_BUDGET_AV=m
5919 +CONFIG_DVB_BUDGET_PATCH=m
5920 +CONFIG_DVB_B2C2_FLEXCOP_PCI=m
5921 +# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set
5922 +CONFIG_DVB_PLUTO2=m
5923 +CONFIG_DVB_DM1105=m
5924 +CONFIG_DVB_PT1=m
5925 +CONFIG_DVB_PT3=m
5926 +CONFIG_MANTIS_CORE=m
5927 +CONFIG_DVB_MANTIS=m
5928 +CONFIG_DVB_HOPPER=m
5929 +CONFIG_DVB_NGENE=m
5930 +CONFIG_DVB_DDBRIDGE=m
5931 +# CONFIG_DVB_DDBRIDGE_MSIENABLE is not set
5932 +CONFIG_DVB_SMIPCIE=m
5933 +CONFIG_DVB_NETUP_UNIDVB=m
5934 +CONFIG_VIDEO_IPU3_CIO2=m
5935 +CONFIG_CIO2_BRIDGE=y
5936 +# CONFIG_VIDEO_PCI_SKELETON is not set
5937 +CONFIG_RADIO_ADAPTERS=y
5938 +CONFIG_RADIO_TEA575X=m
5939 +CONFIG_RADIO_SI470X=m
5940 +CONFIG_USB_SI470X=m
5941 +CONFIG_I2C_SI470X=m
5942 +CONFIG_RADIO_SI4713=m
5943 +CONFIG_USB_SI4713=m
5944 +CONFIG_PLATFORM_SI4713=m
5945 +CONFIG_I2C_SI4713=m
5946 +CONFIG_RADIO_SI476X=m
5947 +CONFIG_USB_MR800=m
5948 +CONFIG_USB_DSBR=m
5949 +CONFIG_RADIO_MAXIRADIO=m
5950 +CONFIG_RADIO_SHARK=m
5951 +CONFIG_RADIO_SHARK2=m
5952 +CONFIG_USB_KEENE=m
5953 +CONFIG_USB_RAREMONO=m
5954 +CONFIG_USB_MA901=m
5955 +CONFIG_RADIO_TEA5764=m
5956 +CONFIG_RADIO_SAA7706H=m
5957 +CONFIG_RADIO_TEF6862=m
5958 +CONFIG_RADIO_WL1273=m
5959 +CONFIG_RADIO_WL128X=m
5960 +CONFIG_MEDIA_COMMON_OPTIONS=y
5963 +# common driver options
5965 +CONFIG_VIDEO_CX2341X=m
5966 +CONFIG_VIDEO_TVEEPROM=m
5967 +CONFIG_CYPRESS_FIRMWARE=m
5968 +CONFIG_VIDEOBUF2_CORE=m
5969 +CONFIG_VIDEOBUF2_V4L2=m
5970 +CONFIG_VIDEOBUF2_MEMOPS=m
5971 +CONFIG_VIDEOBUF2_DMA_CONTIG=m
5972 +CONFIG_VIDEOBUF2_VMALLOC=m
5973 +CONFIG_VIDEOBUF2_DMA_SG=m
5974 +CONFIG_VIDEOBUF2_DVB=m
5975 +CONFIG_DVB_B2C2_FLEXCOP=m
5976 +CONFIG_VIDEO_SAA7146=m
5977 +CONFIG_VIDEO_SAA7146_VV=m
5978 +CONFIG_SMS_SIANO_MDTV=m
5979 +CONFIG_SMS_SIANO_RC=y
5980 +CONFIG_SMS_SIANO_DEBUGFS=y
5981 +CONFIG_VIDEO_V4L2_TPG=m
5982 +CONFIG_V4L_PLATFORM_DRIVERS=y
5983 +CONFIG_VIDEO_CAFE_CCIC=m
5984 +CONFIG_VIDEO_VIA_CAMERA=m
5985 +CONFIG_VIDEO_CADENCE=y
5986 +CONFIG_VIDEO_CADENCE_CSI2RX=m
5987 +CONFIG_VIDEO_CADENCE_CSI2TX=m
5988 +CONFIG_VIDEO_ASPEED=m
5989 +CONFIG_V4L_MEM2MEM_DRIVERS=y
5990 +CONFIG_VIDEO_MEM2MEM_DEINTERLACE=m
5991 +CONFIG_DVB_PLATFORM_DRIVERS=y
5992 +CONFIG_SDR_PLATFORM_DRIVERS=y
5995 +# MMC/SDIO DVB adapters
5997 +CONFIG_SMS_SDIO_DRV=m
5998 +CONFIG_V4L_TEST_DRIVERS=y
5999 +CONFIG_VIDEO_VIMC=m
6000 +CONFIG_VIDEO_VIVID=m
6001 +CONFIG_VIDEO_VIVID_CEC=y
6002 +CONFIG_VIDEO_VIVID_MAX_DEVS=64
6003 +CONFIG_VIDEO_VIM2M=m
6004 +CONFIG_VIDEO_VICODEC=m
6005 +# CONFIG_DVB_TEST_DRIVERS is not set
6008 +# FireWire (IEEE 1394) Adapters
6010 +CONFIG_DVB_FIREDTV=m
6011 +CONFIG_DVB_FIREDTV_INPUT=y
6012 +# end of Media drivers
6015 +# Media ancillary drivers
6017 +CONFIG_MEDIA_ATTACH=y
6020 +# IR I2C driver auto-selected by 'Autoselect ancillary drivers'
6022 +CONFIG_VIDEO_IR_I2C=m
6025 +# Audio decoders, processors and mixers
6027 +CONFIG_VIDEO_TVAUDIO=m
6028 +CONFIG_VIDEO_TDA7432=m
6029 +CONFIG_VIDEO_TDA9840=m
6030 +CONFIG_VIDEO_TDA1997X=m
6031 +CONFIG_VIDEO_TEA6415C=m
6032 +CONFIG_VIDEO_TEA6420=m
6033 +CONFIG_VIDEO_MSP3400=m
6034 +CONFIG_VIDEO_CS3308=m
6035 +CONFIG_VIDEO_CS5345=m
6036 +CONFIG_VIDEO_CS53L32A=m
6037 +CONFIG_VIDEO_TLV320AIC23B=m
6038 +CONFIG_VIDEO_UDA1342=m
6039 +CONFIG_VIDEO_WM8775=m
6040 +CONFIG_VIDEO_WM8739=m
6041 +CONFIG_VIDEO_VP27SMPX=m
6042 +CONFIG_VIDEO_SONY_BTF_MPX=m
6043 +# end of Audio decoders, processors and mixers
6046 +# RDS decoders
6048 +CONFIG_VIDEO_SAA6588=m
6049 +# end of RDS decoders
6052 +# Video decoders
6054 +CONFIG_VIDEO_ADV7180=m
6055 +CONFIG_VIDEO_ADV7183=m
6056 +CONFIG_VIDEO_ADV7604=m
6057 +CONFIG_VIDEO_ADV7604_CEC=y
6058 +CONFIG_VIDEO_ADV7842=m
6059 +CONFIG_VIDEO_ADV7842_CEC=y
6060 +CONFIG_VIDEO_BT819=m
6061 +CONFIG_VIDEO_BT856=m
6062 +CONFIG_VIDEO_BT866=m
6063 +CONFIG_VIDEO_KS0127=m
6064 +CONFIG_VIDEO_ML86V7667=m
6065 +CONFIG_VIDEO_SAA7110=m
6066 +CONFIG_VIDEO_SAA711X=m
6067 +CONFIG_VIDEO_TC358743=m
6068 +CONFIG_VIDEO_TC358743_CEC=y
6069 +CONFIG_VIDEO_TVP514X=m
6070 +CONFIG_VIDEO_TVP5150=m
6071 +CONFIG_VIDEO_TVP7002=m
6072 +CONFIG_VIDEO_TW2804=m
6073 +CONFIG_VIDEO_TW9903=m
6074 +CONFIG_VIDEO_TW9906=m
6075 +CONFIG_VIDEO_TW9910=m
6076 +CONFIG_VIDEO_VPX3220=m
6079 +# Video and audio decoders
6081 +CONFIG_VIDEO_SAA717X=m
6082 +CONFIG_VIDEO_CX25840=m
6083 +# end of Video decoders
6086 +# Video encoders
6088 +CONFIG_VIDEO_SAA7127=m
6089 +CONFIG_VIDEO_SAA7185=m
6090 +CONFIG_VIDEO_ADV7170=m
6091 +CONFIG_VIDEO_ADV7175=m
6092 +CONFIG_VIDEO_ADV7343=m
6093 +CONFIG_VIDEO_ADV7393=m
6094 +CONFIG_VIDEO_ADV7511=m
6095 +CONFIG_VIDEO_ADV7511_CEC=y
6096 +CONFIG_VIDEO_AD9389B=m
6097 +CONFIG_VIDEO_AK881X=m
6098 +CONFIG_VIDEO_THS8200=m
6099 +# end of Video encoders
6102 +# Video improvement chips
6104 +CONFIG_VIDEO_UPD64031A=m
6105 +CONFIG_VIDEO_UPD64083=m
6106 +# end of Video improvement chips
6109 +# Audio/Video compression chips
6111 +CONFIG_VIDEO_SAA6752HS=m
6112 +# end of Audio/Video compression chips
6115 +# SDR tuner chips
6117 +CONFIG_SDR_MAX2175=m
6118 +# end of SDR tuner chips
6121 +# Miscellaneous helper chips
6123 +CONFIG_VIDEO_THS7303=m
6124 +CONFIG_VIDEO_M52790=m
6125 +CONFIG_VIDEO_I2C=m
6126 +CONFIG_VIDEO_ST_MIPID02=m
6127 +# end of Miscellaneous helper chips
6130 +# Camera sensor devices
6132 +CONFIG_VIDEO_APTINA_PLL=m
6133 +CONFIG_VIDEO_CCS_PLL=m
6134 +CONFIG_VIDEO_HI556=m
6135 +CONFIG_VIDEO_IMX214=m
6136 +CONFIG_VIDEO_IMX219=m
6137 +CONFIG_VIDEO_IMX258=m
6138 +CONFIG_VIDEO_IMX274=m
6139 +CONFIG_VIDEO_IMX290=m
6140 +CONFIG_VIDEO_IMX319=m
6141 +CONFIG_VIDEO_IMX355=m
6142 +CONFIG_VIDEO_OV02A10=m
6143 +CONFIG_VIDEO_OV2640=m
6144 +CONFIG_VIDEO_OV2659=m
6145 +CONFIG_VIDEO_OV2680=m
6146 +CONFIG_VIDEO_OV2685=m
6147 +CONFIG_VIDEO_OV2740=m
6148 +CONFIG_VIDEO_OV5647=m
6149 +CONFIG_VIDEO_OV5648=m
6150 +CONFIG_VIDEO_OV6650=m
6151 +CONFIG_VIDEO_OV5670=m
6152 +CONFIG_VIDEO_OV5675=m
6153 +CONFIG_VIDEO_OV5695=m
6154 +CONFIG_VIDEO_OV7251=m
6155 +CONFIG_VIDEO_OV772X=m
6156 +CONFIG_VIDEO_OV7640=m
6157 +CONFIG_VIDEO_OV7670=m
6158 +CONFIG_VIDEO_OV7740=m
6159 +CONFIG_VIDEO_OV8856=m
6160 +CONFIG_VIDEO_OV8865=m
6161 +CONFIG_VIDEO_OV9640=m
6162 +CONFIG_VIDEO_OV9650=m
6163 +CONFIG_VIDEO_OV9734=m
6164 +CONFIG_VIDEO_OV13858=m
6165 +CONFIG_VIDEO_VS6624=m
6166 +CONFIG_VIDEO_MT9M001=m
6167 +CONFIG_VIDEO_MT9M032=m
6168 +CONFIG_VIDEO_MT9M111=m
6169 +CONFIG_VIDEO_MT9P031=m
6170 +CONFIG_VIDEO_MT9T001=m
6171 +CONFIG_VIDEO_MT9T112=m
6172 +CONFIG_VIDEO_MT9V011=m
6173 +CONFIG_VIDEO_MT9V032=m
6174 +CONFIG_VIDEO_MT9V111=m
6175 +CONFIG_VIDEO_SR030PC30=m
6176 +CONFIG_VIDEO_NOON010PC30=m
6177 +CONFIG_VIDEO_M5MOLS=m
6178 +CONFIG_VIDEO_MAX9271_LIB=m
6179 +CONFIG_VIDEO_RDACM20=m
6180 +CONFIG_VIDEO_RDACM21=m
6181 +CONFIG_VIDEO_RJ54N1=m
6182 +CONFIG_VIDEO_S5K6AA=m
6183 +CONFIG_VIDEO_S5K6A3=m
6184 +CONFIG_VIDEO_S5K4ECGX=m
6185 +CONFIG_VIDEO_S5K5BAF=m
6186 +CONFIG_VIDEO_CCS=m
6187 +CONFIG_VIDEO_ET8EK8=m
6188 +CONFIG_VIDEO_S5C73M3=m
6189 +# end of Camera sensor devices
6192 +# Lens drivers
6194 +CONFIG_VIDEO_AD5820=m
6195 +CONFIG_VIDEO_AK7375=m
6196 +CONFIG_VIDEO_DW9714=m
6197 +CONFIG_VIDEO_DW9768=m
6198 +CONFIG_VIDEO_DW9807_VCM=m
6199 +# end of Lens drivers
6202 +# Flash devices
6204 +CONFIG_VIDEO_ADP1653=m
6205 +CONFIG_VIDEO_LM3560=m
6206 +CONFIG_VIDEO_LM3646=m
6207 +# end of Flash devices
6210 +# SPI helper chips
6212 +CONFIG_VIDEO_GS1662=m
6213 +# end of SPI helper chips
6216 +# Media SPI Adapters
6218 +CONFIG_CXD2880_SPI_DRV=m
6219 +# end of Media SPI Adapters
6221 +CONFIG_MEDIA_TUNER=m
6224 +# Customize TV tuners
6226 +CONFIG_MEDIA_TUNER_SIMPLE=m
6227 +CONFIG_MEDIA_TUNER_TDA18250=m
6228 +CONFIG_MEDIA_TUNER_TDA8290=m
6229 +CONFIG_MEDIA_TUNER_TDA827X=m
6230 +CONFIG_MEDIA_TUNER_TDA18271=m
6231 +CONFIG_MEDIA_TUNER_TDA9887=m
6232 +CONFIG_MEDIA_TUNER_TEA5761=m
6233 +CONFIG_MEDIA_TUNER_TEA5767=m
6234 +CONFIG_MEDIA_TUNER_MSI001=m
6235 +CONFIG_MEDIA_TUNER_MT20XX=m
6236 +CONFIG_MEDIA_TUNER_MT2060=m
6237 +CONFIG_MEDIA_TUNER_MT2063=m
6238 +CONFIG_MEDIA_TUNER_MT2266=m
6239 +CONFIG_MEDIA_TUNER_MT2131=m
6240 +CONFIG_MEDIA_TUNER_QT1010=m
6241 +CONFIG_MEDIA_TUNER_XC2028=m
6242 +CONFIG_MEDIA_TUNER_XC5000=m
6243 +CONFIG_MEDIA_TUNER_XC4000=m
6244 +CONFIG_MEDIA_TUNER_MXL5005S=m
6245 +CONFIG_MEDIA_TUNER_MXL5007T=m
6246 +CONFIG_MEDIA_TUNER_MC44S803=m
6247 +CONFIG_MEDIA_TUNER_MAX2165=m
6248 +CONFIG_MEDIA_TUNER_TDA18218=m
6249 +CONFIG_MEDIA_TUNER_FC0011=m
6250 +CONFIG_MEDIA_TUNER_FC0012=m
6251 +CONFIG_MEDIA_TUNER_FC0013=m
6252 +CONFIG_MEDIA_TUNER_TDA18212=m
6253 +CONFIG_MEDIA_TUNER_E4000=m
6254 +CONFIG_MEDIA_TUNER_FC2580=m
6255 +CONFIG_MEDIA_TUNER_M88RS6000T=m
6256 +CONFIG_MEDIA_TUNER_TUA9001=m
6257 +CONFIG_MEDIA_TUNER_SI2157=m
6258 +CONFIG_MEDIA_TUNER_IT913X=m
6259 +CONFIG_MEDIA_TUNER_R820T=m
6260 +CONFIG_MEDIA_TUNER_MXL301RF=m
6261 +CONFIG_MEDIA_TUNER_QM1D1C0042=m
6262 +CONFIG_MEDIA_TUNER_QM1D1B0004=m
6263 +# end of Customize TV tuners
6266 +# Customise DVB Frontends
6270 +# Multistandard (satellite) frontends
6272 +CONFIG_DVB_STB0899=m
6273 +CONFIG_DVB_STB6100=m
6274 +CONFIG_DVB_STV090x=m
6275 +CONFIG_DVB_STV0910=m
6276 +CONFIG_DVB_STV6110x=m
6277 +CONFIG_DVB_STV6111=m
6278 +CONFIG_DVB_MXL5XX=m
6279 +CONFIG_DVB_M88DS3103=m
6282 +# Multistandard (cable + terrestrial) frontends
6284 +CONFIG_DVB_DRXK=m
6285 +CONFIG_DVB_TDA18271C2DD=m
6286 +CONFIG_DVB_SI2165=m
6287 +CONFIG_DVB_MN88472=m
6288 +CONFIG_DVB_MN88473=m
6291 +# DVB-S (satellite) frontends
6293 +CONFIG_DVB_CX24110=m
6294 +CONFIG_DVB_CX24123=m
6295 +CONFIG_DVB_MT312=m
6296 +CONFIG_DVB_ZL10036=m
6297 +CONFIG_DVB_ZL10039=m
6298 +CONFIG_DVB_S5H1420=m
6299 +CONFIG_DVB_STV0288=m
6300 +CONFIG_DVB_STB6000=m
6301 +CONFIG_DVB_STV0299=m
6302 +CONFIG_DVB_STV6110=m
6303 +CONFIG_DVB_STV0900=m
6304 +CONFIG_DVB_TDA8083=m
6305 +CONFIG_DVB_TDA10086=m
6306 +CONFIG_DVB_TDA8261=m
6307 +CONFIG_DVB_VES1X93=m
6308 +CONFIG_DVB_TUNER_ITD1000=m
6309 +CONFIG_DVB_TUNER_CX24113=m
6310 +CONFIG_DVB_TDA826X=m
6311 +CONFIG_DVB_TUA6100=m
6312 +CONFIG_DVB_CX24116=m
6313 +CONFIG_DVB_CX24117=m
6314 +CONFIG_DVB_CX24120=m
6315 +CONFIG_DVB_SI21XX=m
6316 +CONFIG_DVB_TS2020=m
6317 +CONFIG_DVB_DS3000=m
6318 +CONFIG_DVB_MB86A16=m
6319 +CONFIG_DVB_TDA10071=m
6322 +# DVB-T (terrestrial) frontends
6324 +CONFIG_DVB_SP8870=m
6325 +CONFIG_DVB_SP887X=m
6326 +CONFIG_DVB_CX22700=m
6327 +CONFIG_DVB_CX22702=m
6328 +CONFIG_DVB_S5H1432=m
6329 +CONFIG_DVB_DRXD=m
6330 +CONFIG_DVB_L64781=m
6331 +CONFIG_DVB_TDA1004X=m
6332 +CONFIG_DVB_NXT6000=m
6333 +CONFIG_DVB_MT352=m
6334 +CONFIG_DVB_ZL10353=m
6335 +CONFIG_DVB_DIB3000MB=m
6336 +CONFIG_DVB_DIB3000MC=m
6337 +CONFIG_DVB_DIB7000M=m
6338 +CONFIG_DVB_DIB7000P=m
6339 +CONFIG_DVB_DIB9000=m
6340 +CONFIG_DVB_TDA10048=m
6341 +CONFIG_DVB_AF9013=m
6342 +CONFIG_DVB_EC100=m
6343 +CONFIG_DVB_STV0367=m
6344 +CONFIG_DVB_CXD2820R=m
6345 +CONFIG_DVB_CXD2841ER=m
6346 +CONFIG_DVB_RTL2830=m
6347 +CONFIG_DVB_RTL2832=m
6348 +CONFIG_DVB_RTL2832_SDR=m
6349 +CONFIG_DVB_SI2168=m
6350 +CONFIG_DVB_AS102_FE=m
6351 +CONFIG_DVB_ZD1301_DEMOD=m
6352 +CONFIG_DVB_GP8PSK_FE=m
6353 +CONFIG_DVB_CXD2880=m
6356 +# DVB-C (cable) frontends
6358 +CONFIG_DVB_VES1820=m
6359 +CONFIG_DVB_TDA10021=m
6360 +CONFIG_DVB_TDA10023=m
6361 +CONFIG_DVB_STV0297=m
6364 +# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
6366 +CONFIG_DVB_NXT200X=m
6367 +CONFIG_DVB_OR51211=m
6368 +CONFIG_DVB_OR51132=m
6369 +CONFIG_DVB_BCM3510=m
6370 +CONFIG_DVB_LGDT330X=m
6371 +CONFIG_DVB_LGDT3305=m
6372 +CONFIG_DVB_LGDT3306A=m
6373 +CONFIG_DVB_LG2160=m
6374 +CONFIG_DVB_S5H1409=m
6375 +CONFIG_DVB_AU8522=m
6376 +CONFIG_DVB_AU8522_DTV=m
6377 +CONFIG_DVB_AU8522_V4L=m
6378 +CONFIG_DVB_S5H1411=m
6379 +CONFIG_DVB_MXL692=m
6382 +# ISDB-T (terrestrial) frontends
6384 +CONFIG_DVB_S921=m
6385 +CONFIG_DVB_DIB8000=m
6386 +CONFIG_DVB_MB86A20S=m
6389 +# ISDB-S (satellite) & ISDB-T (terrestrial) frontends
6391 +CONFIG_DVB_TC90522=m
6392 +CONFIG_DVB_MN88443X=m
6395 +# Digital terrestrial only tuners/PLL
6397 +CONFIG_DVB_PLL=m
6398 +CONFIG_DVB_TUNER_DIB0070=m
6399 +CONFIG_DVB_TUNER_DIB0090=m
6402 +# SEC control devices for DVB-S
6404 +CONFIG_DVB_DRX39XYJ=m
6405 +CONFIG_DVB_LNBH25=m
6406 +CONFIG_DVB_LNBH29=m
6407 +CONFIG_DVB_LNBP21=m
6408 +CONFIG_DVB_LNBP22=m
6409 +CONFIG_DVB_ISL6405=m
6410 +CONFIG_DVB_ISL6421=m
6411 +CONFIG_DVB_ISL6423=m
6412 +CONFIG_DVB_A8293=m
6413 +CONFIG_DVB_LGS8GL5=m
6414 +CONFIG_DVB_LGS8GXX=m
6415 +CONFIG_DVB_ATBM8830=m
6416 +CONFIG_DVB_TDA665x=m
6417 +CONFIG_DVB_IX2505V=m
6418 +CONFIG_DVB_M88RS2000=m
6419 +CONFIG_DVB_AF9033=m
6420 +CONFIG_DVB_HORUS3A=m
6421 +CONFIG_DVB_ASCOT2E=m
6422 +CONFIG_DVB_HELENE=m
6425 +# Common Interface (EN50221) controller drivers
6427 +CONFIG_DVB_CXD2099=m
6428 +CONFIG_DVB_SP2=m
6429 +# end of Customise DVB Frontends
6432 +# Tools to develop new frontends
6434 +CONFIG_DVB_DUMMY_FE=m
6435 +# end of Media ancillary drivers
6438 +# Graphics support
6440 +CONFIG_AGP=y
6441 +CONFIG_AGP_AMD64=y
6442 +CONFIG_AGP_INTEL=y
6443 +CONFIG_AGP_SIS=m
6444 +CONFIG_AGP_VIA=y
6445 +CONFIG_INTEL_GTT=y
6446 +CONFIG_VGA_ARB=y
6447 +CONFIG_VGA_ARB_MAX_GPUS=16
6448 +CONFIG_VGA_SWITCHEROO=y
6449 +CONFIG_DRM=m
6450 +CONFIG_DRM_MIPI_DBI=m
6451 +CONFIG_DRM_MIPI_DSI=y
6452 +CONFIG_DRM_DP_AUX_CHARDEV=y
6453 +# CONFIG_DRM_DEBUG_SELFTEST is not set
6454 +CONFIG_DRM_KMS_HELPER=m
6455 +CONFIG_DRM_KMS_FB_HELPER=y
6456 +# CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS is not set
6457 +CONFIG_DRM_FBDEV_EMULATION=y
6458 +CONFIG_DRM_FBDEV_OVERALLOC=100
6459 +# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set
6460 +CONFIG_DRM_LOAD_EDID_FIRMWARE=y
6461 +CONFIG_DRM_DP_CEC=y
6462 +CONFIG_DRM_TTM=m
6463 +CONFIG_DRM_VRAM_HELPER=m
6464 +CONFIG_DRM_TTM_HELPER=m
6465 +CONFIG_DRM_GEM_CMA_HELPER=y
6466 +CONFIG_DRM_KMS_CMA_HELPER=y
6467 +CONFIG_DRM_GEM_SHMEM_HELPER=y
6468 +CONFIG_DRM_SCHED=m
6471 +# I2C encoder or helper chips
6473 +CONFIG_DRM_I2C_CH7006=m
6474 +CONFIG_DRM_I2C_SIL164=m
6475 +CONFIG_DRM_I2C_NXP_TDA998X=m
6476 +CONFIG_DRM_I2C_NXP_TDA9950=m
6477 +# end of I2C encoder or helper chips
6480 +# ARM devices
6482 +# end of ARM devices
6484 +CONFIG_DRM_RADEON=m
6485 +# CONFIG_DRM_RADEON_USERPTR is not set
6486 +CONFIG_DRM_AMDGPU=m
6487 +CONFIG_DRM_AMDGPU_SI=y
6488 +CONFIG_DRM_AMDGPU_CIK=y
6489 +CONFIG_DRM_AMDGPU_USERPTR=y
6490 +# CONFIG_DRM_AMDGPU_GART_DEBUGFS is not set
6493 +# ACP (Audio CoProcessor) Configuration
6495 +CONFIG_DRM_AMD_ACP=y
6496 +# end of ACP (Audio CoProcessor) Configuration
6499 +# Display Engine Configuration
6501 +CONFIG_DRM_AMD_DC=y
6502 +CONFIG_DRM_AMD_DC_DCN=y
6503 +CONFIG_DRM_AMD_DC_HDCP=y
6504 +CONFIG_DRM_AMD_DC_SI=y
6505 +# CONFIG_DEBUG_KERNEL_DC is not set
6506 +# end of Display Engine Configuration
6508 +CONFIG_HSA_AMD=y
6509 +CONFIG_DRM_NOUVEAU=m
6510 +# CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT is not set
6511 +CONFIG_NOUVEAU_DEBUG=5
6512 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3
6513 +# CONFIG_NOUVEAU_DEBUG_MMU is not set
6514 +# CONFIG_NOUVEAU_DEBUG_PUSH is not set
6515 +CONFIG_DRM_NOUVEAU_BACKLIGHT=y
6516 +# CONFIG_DRM_NOUVEAU_SVM is not set
6517 +CONFIG_DRM_I915=m
6518 +CONFIG_DRM_I915_FORCE_PROBE=""
6519 +CONFIG_DRM_I915_CAPTURE_ERROR=y
6520 +CONFIG_DRM_I915_COMPRESS_ERROR=y
6521 +CONFIG_DRM_I915_USERPTR=y
6522 +CONFIG_DRM_I915_GVT=y
6523 +CONFIG_DRM_I915_GVT_KVMGT=m
6526 +# drm/i915 Debugging
6528 +# CONFIG_DRM_I915_WERROR is not set
6529 +# CONFIG_DRM_I915_DEBUG is not set
6530 +# CONFIG_DRM_I915_DEBUG_MMIO is not set
6531 +# CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS is not set
6532 +# CONFIG_DRM_I915_SW_FENCE_CHECK_DAG is not set
6533 +# CONFIG_DRM_I915_DEBUG_GUC is not set
6534 +# CONFIG_DRM_I915_SELFTEST is not set
6535 +# CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS is not set
6536 +# CONFIG_DRM_I915_DEBUG_VBLANK_EVADE is not set
6537 +# CONFIG_DRM_I915_DEBUG_RUNTIME_PM is not set
6538 +# end of drm/i915 Debugging
6541 +# drm/i915 Profile Guided Optimisation
6543 +CONFIG_DRM_I915_FENCE_TIMEOUT=10000
6544 +CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND=250
6545 +CONFIG_DRM_I915_HEARTBEAT_INTERVAL=2500
6546 +CONFIG_DRM_I915_PREEMPT_TIMEOUT=640
6547 +CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT=8000
6548 +CONFIG_DRM_I915_STOP_TIMEOUT=100
6549 +CONFIG_DRM_I915_TIMESLICE_DURATION=1
6550 +# end of drm/i915 Profile Guided Optimisation
6552 +CONFIG_DRM_VGEM=m
6553 +CONFIG_DRM_VKMS=m
6554 +CONFIG_DRM_VMWGFX=m
6555 +CONFIG_DRM_VMWGFX_FBCON=y
6556 +CONFIG_DRM_GMA500=m
6557 +CONFIG_DRM_GMA600=y
6558 +CONFIG_DRM_UDL=m
6559 +CONFIG_DRM_AST=m
6560 +CONFIG_DRM_MGAG200=m
6561 +CONFIG_DRM_QXL=m
6562 +CONFIG_DRM_BOCHS=m
6563 +CONFIG_DRM_VIRTIO_GPU=m
6564 +CONFIG_DRM_PANEL=y
6567 +# Display Panels
6569 +CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN=m
6570 +# end of Display Panels
6572 +CONFIG_DRM_BRIDGE=y
6573 +CONFIG_DRM_PANEL_BRIDGE=y
6576 +# Display Interface Bridges
6578 +CONFIG_DRM_ANALOGIX_ANX78XX=m
6579 +CONFIG_DRM_ANALOGIX_DP=m
6580 +# end of Display Interface Bridges
6582 +# CONFIG_DRM_ETNAVIV is not set
6583 +CONFIG_DRM_CIRRUS_QEMU=m
6584 +CONFIG_DRM_GM12U320=m
6585 +CONFIG_TINYDRM_HX8357D=m
6586 +CONFIG_TINYDRM_ILI9225=m
6587 +CONFIG_TINYDRM_ILI9341=m
6588 +CONFIG_TINYDRM_ILI9486=m
6589 +CONFIG_TINYDRM_MI0283QT=m
6590 +CONFIG_TINYDRM_REPAPER=m
6591 +CONFIG_TINYDRM_ST7586=m
6592 +CONFIG_TINYDRM_ST7735R=m
6593 +CONFIG_DRM_XEN=y
6594 +CONFIG_DRM_XEN_FRONTEND=m
6595 +CONFIG_DRM_VBOXVIDEO=m
6596 +# CONFIG_DRM_LEGACY is not set
6597 +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y
6600 +# Frame buffer Devices
6602 +CONFIG_FB_CMDLINE=y
6603 +CONFIG_FB_NOTIFY=y
6604 +CONFIG_FB=y
6605 +CONFIG_FIRMWARE_EDID=y
6606 +CONFIG_FB_DDC=m
6607 +CONFIG_FB_BOOT_VESA_SUPPORT=y
6608 +CONFIG_FB_CFB_FILLRECT=y
6609 +CONFIG_FB_CFB_COPYAREA=y
6610 +CONFIG_FB_CFB_IMAGEBLIT=y
6611 +CONFIG_FB_SYS_FILLRECT=m
6612 +CONFIG_FB_SYS_COPYAREA=m
6613 +CONFIG_FB_SYS_IMAGEBLIT=m
6614 +# CONFIG_FB_FOREIGN_ENDIAN is not set
6615 +CONFIG_FB_SYS_FOPS=m
6616 +CONFIG_FB_DEFERRED_IO=y
6617 +CONFIG_FB_HECUBA=m
6618 +CONFIG_FB_SVGALIB=m
6619 +CONFIG_FB_BACKLIGHT=m
6620 +CONFIG_FB_MODE_HELPERS=y
6621 +CONFIG_FB_TILEBLITTING=y
6624 +# Frame buffer hardware drivers
6626 +CONFIG_FB_CIRRUS=m
6627 +CONFIG_FB_PM2=m
6628 +CONFIG_FB_PM2_FIFO_DISCONNECT=y
6629 +CONFIG_FB_CYBER2000=m
6630 +CONFIG_FB_CYBER2000_DDC=y
6631 +CONFIG_FB_ARC=m
6632 +CONFIG_FB_ASILIANT=y
6633 +CONFIG_FB_IMSTT=y
6634 +CONFIG_FB_VGA16=m
6635 +CONFIG_FB_UVESA=m
6636 +CONFIG_FB_VESA=y
6637 +CONFIG_FB_EFI=y
6638 +CONFIG_FB_N411=m
6639 +CONFIG_FB_HGA=m
6640 +CONFIG_FB_OPENCORES=m
6641 +CONFIG_FB_S1D13XXX=m
6642 +CONFIG_FB_NVIDIA=m
6643 +CONFIG_FB_NVIDIA_I2C=y
6644 +# CONFIG_FB_NVIDIA_DEBUG is not set
6645 +CONFIG_FB_NVIDIA_BACKLIGHT=y
6646 +CONFIG_FB_RIVA=m
6647 +CONFIG_FB_RIVA_I2C=y
6648 +# CONFIG_FB_RIVA_DEBUG is not set
6649 +CONFIG_FB_RIVA_BACKLIGHT=y
6650 +CONFIG_FB_I740=m
6651 +CONFIG_FB_LE80578=m
6652 +CONFIG_FB_CARILLO_RANCH=m
6653 +CONFIG_FB_INTEL=m
6654 +# CONFIG_FB_INTEL_DEBUG is not set
6655 +CONFIG_FB_INTEL_I2C=y
6656 +CONFIG_FB_MATROX=m
6657 +CONFIG_FB_MATROX_MILLENIUM=y
6658 +CONFIG_FB_MATROX_MYSTIQUE=y
6659 +CONFIG_FB_MATROX_G=y
6660 +CONFIG_FB_MATROX_I2C=m
6661 +CONFIG_FB_MATROX_MAVEN=m
6662 +CONFIG_FB_RADEON=m
6663 +CONFIG_FB_RADEON_I2C=y
6664 +CONFIG_FB_RADEON_BACKLIGHT=y
6665 +# CONFIG_FB_RADEON_DEBUG is not set
6666 +CONFIG_FB_ATY128=m
6667 +CONFIG_FB_ATY128_BACKLIGHT=y
6668 +CONFIG_FB_ATY=m
6669 +CONFIG_FB_ATY_CT=y
6670 +# CONFIG_FB_ATY_GENERIC_LCD is not set
6671 +CONFIG_FB_ATY_GX=y
6672 +CONFIG_FB_ATY_BACKLIGHT=y
6673 +CONFIG_FB_S3=m
6674 +CONFIG_FB_S3_DDC=y
6675 +CONFIG_FB_SAVAGE=m
6676 +CONFIG_FB_SAVAGE_I2C=y
6677 +# CONFIG_FB_SAVAGE_ACCEL is not set
6678 +CONFIG_FB_SIS=m
6679 +CONFIG_FB_SIS_300=y
6680 +CONFIG_FB_SIS_315=y
6681 +CONFIG_FB_VIA=m
6682 +# CONFIG_FB_VIA_DIRECT_PROCFS is not set
6683 +CONFIG_FB_VIA_X_COMPATIBILITY=y
6684 +CONFIG_FB_NEOMAGIC=m
6685 +CONFIG_FB_KYRO=m
6686 +CONFIG_FB_3DFX=m
6687 +# CONFIG_FB_3DFX_ACCEL is not set
6688 +# CONFIG_FB_3DFX_I2C is not set
6689 +CONFIG_FB_VOODOO1=m
6690 +CONFIG_FB_VT8623=m
6691 +CONFIG_FB_TRIDENT=m
6692 +CONFIG_FB_ARK=m
6693 +CONFIG_FB_PM3=m
6694 +CONFIG_FB_CARMINE=m
6695 +CONFIG_FB_CARMINE_DRAM_EVAL=y
6696 +# CONFIG_CARMINE_DRAM_CUSTOM is not set
6697 +CONFIG_FB_SM501=m
6698 +CONFIG_FB_SMSCUFX=m
6699 +CONFIG_FB_UDL=m
6700 +# CONFIG_FB_IBM_GXT4500 is not set
6701 +# CONFIG_FB_VIRTUAL is not set
6702 +CONFIG_XEN_FBDEV_FRONTEND=m
6703 +CONFIG_FB_METRONOME=m
6704 +CONFIG_FB_MB862XX=m
6705 +CONFIG_FB_MB862XX_PCI_GDC=y
6706 +CONFIG_FB_MB862XX_I2C=y
6707 +CONFIG_FB_HYPERV=m
6708 +CONFIG_FB_SIMPLE=y
6709 +CONFIG_FB_SM712=m
6710 +# end of Frame buffer Devices
6713 +# Backlight & LCD device support
6715 +CONFIG_LCD_CLASS_DEVICE=m
6716 +CONFIG_LCD_L4F00242T03=m
6717 +CONFIG_LCD_LMS283GF05=m
6718 +CONFIG_LCD_LTV350QV=m
6719 +CONFIG_LCD_ILI922X=m
6720 +CONFIG_LCD_ILI9320=m
6721 +CONFIG_LCD_TDO24M=m
6722 +CONFIG_LCD_VGG2432A4=m
6723 +CONFIG_LCD_PLATFORM=m
6724 +CONFIG_LCD_AMS369FG06=m
6725 +CONFIG_LCD_LMS501KF03=m
6726 +CONFIG_LCD_HX8357=m
6727 +CONFIG_LCD_OTM3225A=m
6728 +CONFIG_BACKLIGHT_CLASS_DEVICE=y
6729 +CONFIG_BACKLIGHT_KTD253=m
6730 +CONFIG_BACKLIGHT_LM3533=m
6731 +CONFIG_BACKLIGHT_CARILLO_RANCH=m
6732 +CONFIG_BACKLIGHT_PWM=m
6733 +CONFIG_BACKLIGHT_DA903X=m
6734 +CONFIG_BACKLIGHT_DA9052=m
6735 +CONFIG_BACKLIGHT_MAX8925=m
6736 +CONFIG_BACKLIGHT_APPLE=m
6737 +CONFIG_BACKLIGHT_QCOM_WLED=m
6738 +CONFIG_BACKLIGHT_SAHARA=m
6739 +CONFIG_BACKLIGHT_WM831X=m
6740 +CONFIG_BACKLIGHT_ADP5520=m
6741 +CONFIG_BACKLIGHT_ADP8860=m
6742 +CONFIG_BACKLIGHT_ADP8870=m
6743 +CONFIG_BACKLIGHT_88PM860X=m
6744 +CONFIG_BACKLIGHT_PCF50633=m
6745 +CONFIG_BACKLIGHT_AAT2870=m
6746 +CONFIG_BACKLIGHT_LM3630A=m
6747 +CONFIG_BACKLIGHT_LM3639=m
6748 +CONFIG_BACKLIGHT_LP855X=m
6749 +CONFIG_BACKLIGHT_LP8788=m
6750 +CONFIG_BACKLIGHT_PANDORA=m
6751 +CONFIG_BACKLIGHT_SKY81452=m
6752 +CONFIG_BACKLIGHT_AS3711=m
6753 +CONFIG_BACKLIGHT_GPIO=m
6754 +CONFIG_BACKLIGHT_LV5207LP=m
6755 +CONFIG_BACKLIGHT_BD6107=m
6756 +CONFIG_BACKLIGHT_ARCXCNN=m
6757 +CONFIG_BACKLIGHT_RAVE_SP=m
6758 +# end of Backlight & LCD device support
6760 +CONFIG_VGASTATE=m
6761 +CONFIG_VIDEOMODE_HELPERS=y
6762 +CONFIG_HDMI=y
6765 +# Console display driver support
6767 +CONFIG_VGA_CONSOLE=y
6768 +CONFIG_DUMMY_CONSOLE=y
6769 +CONFIG_DUMMY_CONSOLE_COLUMNS=80
6770 +CONFIG_DUMMY_CONSOLE_ROWS=25
6771 +CONFIG_FRAMEBUFFER_CONSOLE=y
6772 +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
6773 +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
6774 +CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER=y
6775 +# end of Console display driver support
6777 +# CONFIG_LOGO is not set
6778 +# end of Graphics support
6780 +CONFIG_SOUND=m
6781 +CONFIG_SOUND_OSS_CORE=y
6782 +# CONFIG_SOUND_OSS_CORE_PRECLAIM is not set
6783 +CONFIG_SND=m
6784 +CONFIG_SND_TIMER=m
6785 +CONFIG_SND_PCM=m
6786 +CONFIG_SND_PCM_ELD=y
6787 +CONFIG_SND_PCM_IEC958=y
6788 +CONFIG_SND_DMAENGINE_PCM=m
6789 +CONFIG_SND_HWDEP=m
6790 +CONFIG_SND_SEQ_DEVICE=m
6791 +CONFIG_SND_RAWMIDI=m
6792 +CONFIG_SND_COMPRESS_OFFLOAD=m
6793 +CONFIG_SND_JACK=y
6794 +CONFIG_SND_JACK_INPUT_DEV=y
6795 +CONFIG_SND_OSSEMUL=y
6796 +CONFIG_SND_MIXER_OSS=m
6797 +# CONFIG_SND_PCM_OSS is not set
6798 +CONFIG_SND_PCM_TIMER=y
6799 +CONFIG_SND_HRTIMER=m
6800 +CONFIG_SND_DYNAMIC_MINORS=y
6801 +CONFIG_SND_MAX_CARDS=32
6802 +CONFIG_SND_SUPPORT_OLD_API=y
6803 +CONFIG_SND_PROC_FS=y
6804 +CONFIG_SND_VERBOSE_PROCFS=y
6805 +# CONFIG_SND_VERBOSE_PRINTK is not set
6806 +# CONFIG_SND_DEBUG is not set
6807 +CONFIG_SND_VMASTER=y
6808 +CONFIG_SND_DMA_SGBUF=y
6809 +CONFIG_SND_SEQUENCER=m
6810 +CONFIG_SND_SEQ_DUMMY=m
6811 +# CONFIG_SND_SEQUENCER_OSS is not set
6812 +CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
6813 +CONFIG_SND_SEQ_MIDI_EVENT=m
6814 +CONFIG_SND_SEQ_MIDI=m
6815 +CONFIG_SND_SEQ_MIDI_EMUL=m
6816 +CONFIG_SND_SEQ_VIRMIDI=m
6817 +CONFIG_SND_MPU401_UART=m
6818 +CONFIG_SND_OPL3_LIB=m
6819 +CONFIG_SND_OPL3_LIB_SEQ=m
6820 +CONFIG_SND_VX_LIB=m
6821 +CONFIG_SND_AC97_CODEC=m
6822 +CONFIG_SND_DRIVERS=y
6823 +CONFIG_SND_PCSP=m
6824 +CONFIG_SND_DUMMY=m
6825 +CONFIG_SND_ALOOP=m
6826 +CONFIG_SND_VIRMIDI=m
6827 +CONFIG_SND_MTPAV=m
6828 +CONFIG_SND_MTS64=m
6829 +CONFIG_SND_SERIAL_U16550=m
6830 +CONFIG_SND_MPU401=m
6831 +CONFIG_SND_PORTMAN2X4=m
6832 +CONFIG_SND_AC97_POWER_SAVE=y
6833 +CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0
6834 +CONFIG_SND_SB_COMMON=m
6835 +CONFIG_SND_PCI=y
6836 +CONFIG_SND_AD1889=m
6837 +CONFIG_SND_ALS300=m
6838 +CONFIG_SND_ALS4000=m
6839 +CONFIG_SND_ALI5451=m
6840 +CONFIG_SND_ASIHPI=m
6841 +CONFIG_SND_ATIIXP=m
6842 +CONFIG_SND_ATIIXP_MODEM=m
6843 +CONFIG_SND_AU8810=m
6844 +CONFIG_SND_AU8820=m
6845 +CONFIG_SND_AU8830=m
6846 +CONFIG_SND_AW2=m
6847 +CONFIG_SND_AZT3328=m
6848 +CONFIG_SND_BT87X=m
6849 +# CONFIG_SND_BT87X_OVERCLOCK is not set
6850 +CONFIG_SND_CA0106=m
6851 +CONFIG_SND_CMIPCI=m
6852 +CONFIG_SND_OXYGEN_LIB=m
6853 +CONFIG_SND_OXYGEN=m
6854 +CONFIG_SND_CS4281=m
6855 +CONFIG_SND_CS46XX=m
6856 +CONFIG_SND_CS46XX_NEW_DSP=y
6857 +CONFIG_SND_CTXFI=m
6858 +CONFIG_SND_DARLA20=m
6859 +CONFIG_SND_GINA20=m
6860 +CONFIG_SND_LAYLA20=m
6861 +CONFIG_SND_DARLA24=m
6862 +CONFIG_SND_GINA24=m
6863 +CONFIG_SND_LAYLA24=m
6864 +CONFIG_SND_MONA=m
6865 +CONFIG_SND_MIA=m
6866 +CONFIG_SND_ECHO3G=m
6867 +CONFIG_SND_INDIGO=m
6868 +CONFIG_SND_INDIGOIO=m
6869 +CONFIG_SND_INDIGODJ=m
6870 +CONFIG_SND_INDIGOIOX=m
6871 +CONFIG_SND_INDIGODJX=m
6872 +CONFIG_SND_EMU10K1=m
6873 +CONFIG_SND_EMU10K1_SEQ=m
6874 +CONFIG_SND_EMU10K1X=m
6875 +CONFIG_SND_ENS1370=m
6876 +CONFIG_SND_ENS1371=m
6877 +CONFIG_SND_ES1938=m
6878 +CONFIG_SND_ES1968=m
6879 +CONFIG_SND_ES1968_INPUT=y
6880 +CONFIG_SND_ES1968_RADIO=y
6881 +CONFIG_SND_FM801=m
6882 +CONFIG_SND_FM801_TEA575X_BOOL=y
6883 +CONFIG_SND_HDSP=m
6884 +CONFIG_SND_HDSPM=m
6885 +CONFIG_SND_ICE1712=m
6886 +CONFIG_SND_ICE1724=m
6887 +CONFIG_SND_INTEL8X0=m
6888 +CONFIG_SND_INTEL8X0M=m
6889 +CONFIG_SND_KORG1212=m
6890 +CONFIG_SND_LOLA=m
6891 +CONFIG_SND_LX6464ES=m
6892 +CONFIG_SND_MAESTRO3=m
6893 +CONFIG_SND_MAESTRO3_INPUT=y
6894 +CONFIG_SND_MIXART=m
6895 +CONFIG_SND_NM256=m
6896 +CONFIG_SND_PCXHR=m
6897 +CONFIG_SND_RIPTIDE=m
6898 +CONFIG_SND_RME32=m
6899 +CONFIG_SND_RME96=m
6900 +CONFIG_SND_RME9652=m
6901 +CONFIG_SND_SONICVIBES=m
6902 +CONFIG_SND_TRIDENT=m
6903 +CONFIG_SND_VIA82XX=m
6904 +CONFIG_SND_VIA82XX_MODEM=m
6905 +CONFIG_SND_VIRTUOSO=m
6906 +CONFIG_SND_VX222=m
6907 +CONFIG_SND_YMFPCI=m
6910 +# HD-Audio
6912 +CONFIG_SND_HDA=m
6913 +CONFIG_SND_HDA_GENERIC_LEDS=y
6914 +CONFIG_SND_HDA_INTEL=m
6915 +CONFIG_SND_HDA_HWDEP=y
6916 +CONFIG_SND_HDA_RECONFIG=y
6917 +CONFIG_SND_HDA_INPUT_BEEP=y
6918 +CONFIG_SND_HDA_INPUT_BEEP_MODE=0
6919 +CONFIG_SND_HDA_PATCH_LOADER=y
6920 +CONFIG_SND_HDA_CODEC_REALTEK=m
6921 +CONFIG_SND_HDA_CODEC_ANALOG=m
6922 +CONFIG_SND_HDA_CODEC_SIGMATEL=m
6923 +CONFIG_SND_HDA_CODEC_VIA=m
6924 +CONFIG_SND_HDA_CODEC_HDMI=m
6925 +CONFIG_SND_HDA_CODEC_CIRRUS=m
6926 +CONFIG_SND_HDA_CODEC_CONEXANT=m
6927 +CONFIG_SND_HDA_CODEC_CA0110=m
6928 +CONFIG_SND_HDA_CODEC_CA0132=m
6929 +CONFIG_SND_HDA_CODEC_CA0132_DSP=y
6930 +CONFIG_SND_HDA_CODEC_CMEDIA=m
6931 +CONFIG_SND_HDA_CODEC_SI3054=m
6932 +CONFIG_SND_HDA_GENERIC=m
6933 +CONFIG_SND_HDA_POWER_SAVE_DEFAULT=1
6934 +# CONFIG_SND_HDA_INTEL_HDMI_SILENT_STREAM is not set
6935 +# end of HD-Audio
6937 +CONFIG_SND_HDA_CORE=m
6938 +CONFIG_SND_HDA_DSP_LOADER=y
6939 +CONFIG_SND_HDA_COMPONENT=y
6940 +CONFIG_SND_HDA_I915=y
6941 +CONFIG_SND_HDA_EXT_CORE=m
6942 +CONFIG_SND_HDA_PREALLOC_SIZE=0
6943 +CONFIG_SND_INTEL_NHLT=y
6944 +CONFIG_SND_INTEL_DSP_CONFIG=m
6945 +CONFIG_SND_INTEL_SOUNDWIRE_ACPI=m
6946 +CONFIG_SND_INTEL_BYT_PREFER_SOF=y
6947 +CONFIG_SND_SPI=y
6948 +CONFIG_SND_USB=y
6949 +CONFIG_SND_USB_AUDIO=m
6950 +CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER=y
6951 +CONFIG_SND_USB_UA101=m
6952 +CONFIG_SND_USB_USX2Y=m
6953 +CONFIG_SND_USB_CAIAQ=m
6954 +CONFIG_SND_USB_CAIAQ_INPUT=y
6955 +CONFIG_SND_USB_US122L=m
6956 +CONFIG_SND_USB_6FIRE=m
6957 +CONFIG_SND_USB_HIFACE=m
6958 +CONFIG_SND_BCD2000=m
6959 +CONFIG_SND_USB_LINE6=m
6960 +CONFIG_SND_USB_POD=m
6961 +CONFIG_SND_USB_PODHD=m
6962 +CONFIG_SND_USB_TONEPORT=m
6963 +CONFIG_SND_USB_VARIAX=m
6964 +CONFIG_SND_FIREWIRE=y
6965 +CONFIG_SND_FIREWIRE_LIB=m
6966 +CONFIG_SND_DICE=m
6967 +CONFIG_SND_OXFW=m
6968 +CONFIG_SND_ISIGHT=m
6969 +CONFIG_SND_FIREWORKS=m
6970 +CONFIG_SND_BEBOB=m
6971 +CONFIG_SND_FIREWIRE_DIGI00X=m
6972 +CONFIG_SND_FIREWIRE_TASCAM=m
6973 +CONFIG_SND_FIREWIRE_MOTU=m
6974 +CONFIG_SND_FIREFACE=m
6975 +CONFIG_SND_PCMCIA=y
6976 +CONFIG_SND_VXPOCKET=m
6977 +CONFIG_SND_PDAUDIOCF=m
6978 +CONFIG_SND_SOC=m
6979 +CONFIG_SND_SOC_AC97_BUS=y
6980 +CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM=y
6981 +CONFIG_SND_SOC_COMPRESS=y
6982 +CONFIG_SND_SOC_TOPOLOGY=y
6983 +CONFIG_SND_SOC_ACPI=m
6984 +CONFIG_SND_SOC_ADI=m
6985 +CONFIG_SND_SOC_ADI_AXI_I2S=m
6986 +CONFIG_SND_SOC_ADI_AXI_SPDIF=m
6987 +CONFIG_SND_SOC_AMD_ACP=m
6988 +CONFIG_SND_SOC_AMD_CZ_DA7219MX98357_MACH=m
6989 +CONFIG_SND_SOC_AMD_CZ_RT5645_MACH=m
6990 +CONFIG_SND_SOC_AMD_ACP3x=m
6991 +CONFIG_SND_SOC_AMD_RV_RT5682_MACH=m
6992 +CONFIG_SND_SOC_AMD_RENOIR=m
6993 +CONFIG_SND_SOC_AMD_RENOIR_MACH=m
6994 +CONFIG_SND_ATMEL_SOC=m
6995 +CONFIG_SND_BCM63XX_I2S_WHISTLER=m
6996 +CONFIG_SND_DESIGNWARE_I2S=m
6997 +CONFIG_SND_DESIGNWARE_PCM=y
7000 +# SoC Audio for Freescale CPUs
7004 +# Common SoC Audio options for Freescale CPUs:
7006 +CONFIG_SND_SOC_FSL_ASRC=m
7007 +CONFIG_SND_SOC_FSL_SAI=m
7008 +CONFIG_SND_SOC_FSL_MQS=m
7009 +CONFIG_SND_SOC_FSL_AUDMIX=m
7010 +CONFIG_SND_SOC_FSL_SSI=m
7011 +CONFIG_SND_SOC_FSL_SPDIF=m
7012 +CONFIG_SND_SOC_FSL_ESAI=m
7013 +CONFIG_SND_SOC_FSL_MICFIL=m
7014 +CONFIG_SND_SOC_FSL_EASRC=m
7015 +CONFIG_SND_SOC_FSL_XCVR=m
7016 +CONFIG_SND_SOC_IMX_AUDMUX=m
7017 +# end of SoC Audio for Freescale CPUs
7019 +CONFIG_SND_I2S_HI6210_I2S=m
7020 +CONFIG_SND_SOC_IMG=y
7021 +CONFIG_SND_SOC_IMG_I2S_IN=m
7022 +CONFIG_SND_SOC_IMG_I2S_OUT=m
7023 +CONFIG_SND_SOC_IMG_PARALLEL_OUT=m
7024 +CONFIG_SND_SOC_IMG_SPDIF_IN=m
7025 +CONFIG_SND_SOC_IMG_SPDIF_OUT=m
7026 +CONFIG_SND_SOC_IMG_PISTACHIO_INTERNAL_DAC=m
7027 +CONFIG_SND_SOC_INTEL_SST_TOPLEVEL=y
7028 +CONFIG_SND_SOC_INTEL_SST=m
7029 +CONFIG_SND_SOC_INTEL_CATPT=m
7030 +CONFIG_SND_SST_ATOM_HIFI2_PLATFORM=m
7031 +CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_PCI=m
7032 +CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_ACPI=m
7033 +# CONFIG_SND_SOC_INTEL_SKYLAKE is not set
7034 +CONFIG_SND_SOC_INTEL_SKL=m
7035 +CONFIG_SND_SOC_INTEL_APL=m
7036 +CONFIG_SND_SOC_INTEL_KBL=m
7037 +CONFIG_SND_SOC_INTEL_GLK=m
7038 +# CONFIG_SND_SOC_INTEL_CNL is not set
7039 +# CONFIG_SND_SOC_INTEL_CFL is not set
7040 +# CONFIG_SND_SOC_INTEL_CML_H is not set
7041 +# CONFIG_SND_SOC_INTEL_CML_LP is not set
7042 +CONFIG_SND_SOC_INTEL_SKYLAKE_FAMILY=m
7043 +CONFIG_SND_SOC_INTEL_SKYLAKE_SSP_CLK=m
7044 +# CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC is not set
7045 +CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON=m
7046 +CONFIG_SND_SOC_ACPI_INTEL_MATCH=m
7047 +CONFIG_SND_SOC_INTEL_MACH=y
7048 +# CONFIG_SND_SOC_INTEL_USER_FRIENDLY_LONG_NAMES is not set
7049 +CONFIG_SND_SOC_INTEL_HASWELL_MACH=m
7050 +CONFIG_SND_SOC_INTEL_BDW_RT5650_MACH=m
7051 +CONFIG_SND_SOC_INTEL_BDW_RT5677_MACH=m
7052 +CONFIG_SND_SOC_INTEL_BROADWELL_MACH=m
7053 +CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH=m
7054 +CONFIG_SND_SOC_INTEL_BYTCR_RT5651_MACH=m
7055 +CONFIG_SND_SOC_INTEL_BYTCR_WM5102_MACH=m
7056 +CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH=m
7057 +CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH=m
7058 +CONFIG_SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH=m
7059 +CONFIG_SND_SOC_INTEL_CHT_BSW_NAU8824_MACH=m
7060 +CONFIG_SND_SOC_INTEL_BYT_CHT_CX2072X_MACH=m
7061 +CONFIG_SND_SOC_INTEL_BYT_CHT_DA7213_MACH=m
7062 +CONFIG_SND_SOC_INTEL_BYT_CHT_ES8316_MACH=m
7063 +# CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH is not set
7064 +CONFIG_SND_SOC_INTEL_SKL_RT286_MACH=m
7065 +CONFIG_SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH=m
7066 +CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH=m
7067 +CONFIG_SND_SOC_INTEL_DA7219_MAX98357A_GENERIC=m
7068 +CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON=m
7069 +CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH=m
7070 +CONFIG_SND_SOC_INTEL_BXT_RT298_MACH=m
7071 +CONFIG_SND_SOC_INTEL_SOF_WM8804_MACH=m
7072 +CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH=m
7073 +CONFIG_SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH=m
7074 +CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH=m
7075 +CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH=m
7076 +CONFIG_SND_SOC_INTEL_KBL_RT5660_MACH=m
7077 +CONFIG_SND_SOC_INTEL_GLK_DA7219_MAX98357A_MACH=m
7078 +CONFIG_SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH=m
7079 +CONFIG_SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH=m
7080 +CONFIG_SND_SOC_INTEL_SOF_RT5682_MACH=m
7081 +CONFIG_SND_SOC_INTEL_SOF_PCM512x_MACH=m
7082 +CONFIG_SND_SOC_INTEL_CML_LP_DA7219_MAX98357A_MACH=m
7083 +CONFIG_SND_SOC_INTEL_SOF_CML_RT1011_RT5682_MACH=m
7084 +CONFIG_SND_SOC_INTEL_SOF_DA7219_MAX98373_MACH=m
7085 +CONFIG_SND_SOC_INTEL_EHL_RT5660_MACH=m
7086 +CONFIG_SND_SOC_MTK_BTCVSD=m
7087 +CONFIG_SND_SOC_SOF_TOPLEVEL=y
7088 +CONFIG_SND_SOC_SOF_PCI_DEV=m
7089 +CONFIG_SND_SOC_SOF_PCI=m
7090 +CONFIG_SND_SOC_SOF_ACPI=m
7091 +CONFIG_SND_SOC_SOF_ACPI_DEV=m
7092 +# CONFIG_SND_SOC_SOF_DEBUG_PROBES is not set
7093 +# CONFIG_SND_SOC_SOF_DEVELOPER_SUPPORT is not set
7094 +CONFIG_SND_SOC_SOF=m
7095 +CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE=y
7096 +CONFIG_SND_SOC_SOF_INTEL_TOPLEVEL=y
7097 +CONFIG_SND_SOC_SOF_INTEL_HIFI_EP_IPC=m
7098 +CONFIG_SND_SOC_SOF_INTEL_ATOM_HIFI_EP=m
7099 +CONFIG_SND_SOC_SOF_INTEL_COMMON=m
7100 +CONFIG_SND_SOC_SOF_BAYTRAIL=m
7101 +CONFIG_SND_SOC_SOF_BROADWELL=m
7102 +CONFIG_SND_SOC_SOF_MERRIFIELD=m
7103 +CONFIG_SND_SOC_SOF_INTEL_APL=m
7104 +CONFIG_SND_SOC_SOF_APOLLOLAKE=m
7105 +CONFIG_SND_SOC_SOF_GEMINILAKE=m
7106 +CONFIG_SND_SOC_SOF_INTEL_CNL=m
7107 +CONFIG_SND_SOC_SOF_CANNONLAKE=m
7108 +CONFIG_SND_SOC_SOF_COFFEELAKE=m
7109 +CONFIG_SND_SOC_SOF_COMETLAKE=m
7110 +CONFIG_SND_SOC_SOF_INTEL_ICL=m
7111 +CONFIG_SND_SOC_SOF_ICELAKE=m
7112 +CONFIG_SND_SOC_SOF_JASPERLAKE=m
7113 +CONFIG_SND_SOC_SOF_INTEL_TGL=m
7114 +CONFIG_SND_SOC_SOF_TIGERLAKE=m
7115 +CONFIG_SND_SOC_SOF_ELKHARTLAKE=m
7116 +CONFIG_SND_SOC_SOF_ALDERLAKE=m
7117 +CONFIG_SND_SOC_SOF_HDA_COMMON=m
7118 +CONFIG_SND_SOC_SOF_HDA_LINK=y
7119 +CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC=y
7120 +# CONFIG_SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1 is not set
7121 +CONFIG_SND_SOC_SOF_HDA_LINK_BASELINE=m
7122 +CONFIG_SND_SOC_SOF_HDA=m
7123 +CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE=m
7124 +CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE=m
7125 +CONFIG_SND_SOC_SOF_XTENSA=m
7128 +# STMicroelectronics STM32 SOC audio support
7130 +# end of STMicroelectronics STM32 SOC audio support
7132 +CONFIG_SND_SOC_XILINX_I2S=m
7133 +CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER=m
7134 +CONFIG_SND_SOC_XILINX_SPDIF=m
7135 +CONFIG_SND_SOC_XTFPGA_I2S=m
7136 +CONFIG_SND_SOC_I2C_AND_SPI=m
7139 +# CODEC drivers
7141 +CONFIG_SND_SOC_ARIZONA=m
7142 +CONFIG_SND_SOC_WM_ADSP=m
7143 +CONFIG_SND_SOC_AC97_CODEC=m
7144 +CONFIG_SND_SOC_ADAU_UTILS=m
7145 +CONFIG_SND_SOC_ADAU1372=m
7146 +CONFIG_SND_SOC_ADAU1372_I2C=m
7147 +CONFIG_SND_SOC_ADAU1372_SPI=m
7148 +CONFIG_SND_SOC_ADAU1701=m
7149 +CONFIG_SND_SOC_ADAU17X1=m
7150 +CONFIG_SND_SOC_ADAU1761=m
7151 +CONFIG_SND_SOC_ADAU1761_I2C=m
7152 +CONFIG_SND_SOC_ADAU1761_SPI=m
7153 +CONFIG_SND_SOC_ADAU7002=m
7154 +CONFIG_SND_SOC_ADAU7118=m
7155 +CONFIG_SND_SOC_ADAU7118_HW=m
7156 +CONFIG_SND_SOC_ADAU7118_I2C=m
7157 +CONFIG_SND_SOC_AK4104=m
7158 +CONFIG_SND_SOC_AK4118=m
7159 +CONFIG_SND_SOC_AK4458=m
7160 +CONFIG_SND_SOC_AK4554=m
7161 +CONFIG_SND_SOC_AK4613=m
7162 +CONFIG_SND_SOC_AK4642=m
7163 +CONFIG_SND_SOC_AK5386=m
7164 +CONFIG_SND_SOC_AK5558=m
7165 +CONFIG_SND_SOC_ALC5623=m
7166 +CONFIG_SND_SOC_BD28623=m
7167 +CONFIG_SND_SOC_BT_SCO=m
7168 +CONFIG_SND_SOC_CROS_EC_CODEC=m
7169 +CONFIG_SND_SOC_CS35L32=m
7170 +CONFIG_SND_SOC_CS35L33=m
7171 +CONFIG_SND_SOC_CS35L34=m
7172 +CONFIG_SND_SOC_CS35L35=m
7173 +CONFIG_SND_SOC_CS35L36=m
7174 +CONFIG_SND_SOC_CS42L42=m
7175 +CONFIG_SND_SOC_CS42L51=m
7176 +CONFIG_SND_SOC_CS42L51_I2C=m
7177 +CONFIG_SND_SOC_CS42L52=m
7178 +CONFIG_SND_SOC_CS42L56=m
7179 +CONFIG_SND_SOC_CS42L73=m
7180 +CONFIG_SND_SOC_CS4234=m
7181 +CONFIG_SND_SOC_CS4265=m
7182 +CONFIG_SND_SOC_CS4270=m
7183 +CONFIG_SND_SOC_CS4271=m
7184 +CONFIG_SND_SOC_CS4271_I2C=m
7185 +CONFIG_SND_SOC_CS4271_SPI=m
7186 +CONFIG_SND_SOC_CS42XX8=m
7187 +CONFIG_SND_SOC_CS42XX8_I2C=m
7188 +CONFIG_SND_SOC_CS43130=m
7189 +CONFIG_SND_SOC_CS4341=m
7190 +CONFIG_SND_SOC_CS4349=m
7191 +CONFIG_SND_SOC_CS53L30=m
7192 +CONFIG_SND_SOC_CX2072X=m
7193 +CONFIG_SND_SOC_DA7213=m
7194 +CONFIG_SND_SOC_DA7219=m
7195 +CONFIG_SND_SOC_DMIC=m
7196 +CONFIG_SND_SOC_HDMI_CODEC=m
7197 +CONFIG_SND_SOC_ES7134=m
7198 +CONFIG_SND_SOC_ES7241=m
7199 +CONFIG_SND_SOC_ES8316=m
7200 +CONFIG_SND_SOC_ES8328=m
7201 +CONFIG_SND_SOC_ES8328_I2C=m
7202 +CONFIG_SND_SOC_ES8328_SPI=m
7203 +CONFIG_SND_SOC_GTM601=m
7204 +CONFIG_SND_SOC_HDAC_HDMI=m
7205 +CONFIG_SND_SOC_HDAC_HDA=m
7206 +CONFIG_SND_SOC_INNO_RK3036=m
7207 +CONFIG_SND_SOC_MAX98088=m
7208 +CONFIG_SND_SOC_MAX98090=m
7209 +CONFIG_SND_SOC_MAX98357A=m
7210 +CONFIG_SND_SOC_MAX98504=m
7211 +CONFIG_SND_SOC_MAX9867=m
7212 +CONFIG_SND_SOC_MAX98927=m
7213 +CONFIG_SND_SOC_MAX98373=m
7214 +CONFIG_SND_SOC_MAX98373_I2C=m
7215 +CONFIG_SND_SOC_MAX98373_SDW=m
7216 +CONFIG_SND_SOC_MAX98390=m
7217 +CONFIG_SND_SOC_MAX9860=m
7218 +CONFIG_SND_SOC_MSM8916_WCD_ANALOG=m
7219 +CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=m
7220 +CONFIG_SND_SOC_PCM1681=m
7221 +CONFIG_SND_SOC_PCM1789=m
7222 +CONFIG_SND_SOC_PCM1789_I2C=m
7223 +CONFIG_SND_SOC_PCM179X=m
7224 +CONFIG_SND_SOC_PCM179X_I2C=m
7225 +CONFIG_SND_SOC_PCM179X_SPI=m
7226 +CONFIG_SND_SOC_PCM186X=m
7227 +CONFIG_SND_SOC_PCM186X_I2C=m
7228 +CONFIG_SND_SOC_PCM186X_SPI=m
7229 +CONFIG_SND_SOC_PCM3060=m
7230 +CONFIG_SND_SOC_PCM3060_I2C=m
7231 +CONFIG_SND_SOC_PCM3060_SPI=m
7232 +CONFIG_SND_SOC_PCM3168A=m
7233 +CONFIG_SND_SOC_PCM3168A_I2C=m
7234 +CONFIG_SND_SOC_PCM3168A_SPI=m
7235 +CONFIG_SND_SOC_PCM5102A=m
7236 +CONFIG_SND_SOC_PCM512x=m
7237 +CONFIG_SND_SOC_PCM512x_I2C=m
7238 +CONFIG_SND_SOC_PCM512x_SPI=m
7239 +CONFIG_SND_SOC_RK3328=m
7240 +CONFIG_SND_SOC_RL6231=m
7241 +CONFIG_SND_SOC_RL6347A=m
7242 +CONFIG_SND_SOC_RT286=m
7243 +CONFIG_SND_SOC_RT298=m
7244 +CONFIG_SND_SOC_RT1011=m
7245 +CONFIG_SND_SOC_RT1015=m
7246 +CONFIG_SND_SOC_RT1308_SDW=m
7247 +CONFIG_SND_SOC_RT5514=m
7248 +CONFIG_SND_SOC_RT5514_SPI=m
7249 +CONFIG_SND_SOC_RT5616=m
7250 +CONFIG_SND_SOC_RT5631=m
7251 +CONFIG_SND_SOC_RT5640=m
7252 +CONFIG_SND_SOC_RT5645=m
7253 +CONFIG_SND_SOC_RT5651=m
7254 +CONFIG_SND_SOC_RT5659=m
7255 +CONFIG_SND_SOC_RT5660=m
7256 +CONFIG_SND_SOC_RT5663=m
7257 +CONFIG_SND_SOC_RT5670=m
7258 +CONFIG_SND_SOC_RT5677=m
7259 +CONFIG_SND_SOC_RT5677_SPI=m
7260 +CONFIG_SND_SOC_RT5682=m
7261 +CONFIG_SND_SOC_RT5682_I2C=m
7262 +CONFIG_SND_SOC_RT5682_SDW=m
7263 +CONFIG_SND_SOC_RT700=m
7264 +CONFIG_SND_SOC_RT700_SDW=m
7265 +CONFIG_SND_SOC_RT711=m
7266 +CONFIG_SND_SOC_RT711_SDW=m
7267 +CONFIG_SND_SOC_RT715=m
7268 +CONFIG_SND_SOC_RT715_SDW=m
7269 +CONFIG_SND_SOC_SGTL5000=m
7270 +CONFIG_SND_SOC_SI476X=m
7271 +CONFIG_SND_SOC_SIGMADSP=m
7272 +CONFIG_SND_SOC_SIGMADSP_I2C=m
7273 +CONFIG_SND_SOC_SIGMADSP_REGMAP=m
7274 +CONFIG_SND_SOC_SIMPLE_AMPLIFIER=m
7275 +CONFIG_SND_SOC_SIMPLE_MUX=m
7276 +CONFIG_SND_SOC_SPDIF=m
7277 +CONFIG_SND_SOC_SSM2305=m
7278 +CONFIG_SND_SOC_SSM2602=m
7279 +CONFIG_SND_SOC_SSM2602_SPI=m
7280 +CONFIG_SND_SOC_SSM2602_I2C=m
7281 +CONFIG_SND_SOC_SSM4567=m
7282 +CONFIG_SND_SOC_STA32X=m
7283 +CONFIG_SND_SOC_STA350=m
7284 +CONFIG_SND_SOC_STI_SAS=m
7285 +CONFIG_SND_SOC_TAS2552=m
7286 +CONFIG_SND_SOC_TAS2562=m
7287 +CONFIG_SND_SOC_TAS2764=m
7288 +CONFIG_SND_SOC_TAS2770=m
7289 +CONFIG_SND_SOC_TAS5086=m
7290 +CONFIG_SND_SOC_TAS571X=m
7291 +CONFIG_SND_SOC_TAS5720=m
7292 +CONFIG_SND_SOC_TAS6424=m
7293 +CONFIG_SND_SOC_TDA7419=m
7294 +CONFIG_SND_SOC_TFA9879=m
7295 +CONFIG_SND_SOC_TLV320AIC23=m
7296 +CONFIG_SND_SOC_TLV320AIC23_I2C=m
7297 +CONFIG_SND_SOC_TLV320AIC23_SPI=m
7298 +CONFIG_SND_SOC_TLV320AIC31XX=m
7299 +CONFIG_SND_SOC_TLV320AIC32X4=m
7300 +CONFIG_SND_SOC_TLV320AIC32X4_I2C=m
7301 +CONFIG_SND_SOC_TLV320AIC32X4_SPI=m
7302 +CONFIG_SND_SOC_TLV320AIC3X=m
7303 +CONFIG_SND_SOC_TLV320ADCX140=m
7304 +CONFIG_SND_SOC_TS3A227E=m
7305 +CONFIG_SND_SOC_TSCS42XX=m
7306 +CONFIG_SND_SOC_TSCS454=m
7307 +CONFIG_SND_SOC_UDA1334=m
7308 +CONFIG_SND_SOC_WCD9335=m
7309 +CONFIG_SND_SOC_WCD934X=m
7310 +CONFIG_SND_SOC_WM5102=m
7311 +CONFIG_SND_SOC_WM8510=m
7312 +CONFIG_SND_SOC_WM8523=m
7313 +CONFIG_SND_SOC_WM8524=m
7314 +CONFIG_SND_SOC_WM8580=m
7315 +CONFIG_SND_SOC_WM8711=m
7316 +CONFIG_SND_SOC_WM8728=m
7317 +CONFIG_SND_SOC_WM8731=m
7318 +CONFIG_SND_SOC_WM8737=m
7319 +CONFIG_SND_SOC_WM8741=m
7320 +CONFIG_SND_SOC_WM8750=m
7321 +CONFIG_SND_SOC_WM8753=m
7322 +CONFIG_SND_SOC_WM8770=m
7323 +CONFIG_SND_SOC_WM8776=m
7324 +CONFIG_SND_SOC_WM8782=m
7325 +CONFIG_SND_SOC_WM8804=m
7326 +CONFIG_SND_SOC_WM8804_I2C=m
7327 +CONFIG_SND_SOC_WM8804_SPI=m
7328 +CONFIG_SND_SOC_WM8903=m
7329 +CONFIG_SND_SOC_WM8904=m
7330 +CONFIG_SND_SOC_WM8960=m
7331 +CONFIG_SND_SOC_WM8962=m
7332 +CONFIG_SND_SOC_WM8974=m
7333 +CONFIG_SND_SOC_WM8978=m
7334 +CONFIG_SND_SOC_WM8985=m
7335 +CONFIG_SND_SOC_WSA881X=m
7336 +CONFIG_SND_SOC_ZL38060=m
7337 +CONFIG_SND_SOC_ZX_AUD96P22=m
7338 +CONFIG_SND_SOC_MAX9759=m
7339 +CONFIG_SND_SOC_MT6351=m
7340 +CONFIG_SND_SOC_MT6358=m
7341 +CONFIG_SND_SOC_MT6660=m
7342 +CONFIG_SND_SOC_NAU8315=m
7343 +CONFIG_SND_SOC_NAU8540=m
7344 +CONFIG_SND_SOC_NAU8810=m
7345 +CONFIG_SND_SOC_NAU8822=m
7346 +CONFIG_SND_SOC_NAU8824=m
7347 +CONFIG_SND_SOC_NAU8825=m
7348 +CONFIG_SND_SOC_TPA6130A2=m
7349 +CONFIG_SND_SOC_LPASS_WSA_MACRO=m
7350 +CONFIG_SND_SOC_LPASS_VA_MACRO=m
7351 +CONFIG_SND_SOC_LPASS_RX_MACRO=m
7352 +CONFIG_SND_SOC_LPASS_TX_MACRO=m
7353 +# end of CODEC drivers
7355 +CONFIG_SND_SIMPLE_CARD_UTILS=m
7356 +CONFIG_SND_SIMPLE_CARD=m
7357 +CONFIG_SND_X86=y
7358 +CONFIG_HDMI_LPE_AUDIO=m
7359 +CONFIG_SND_SYNTH_EMUX=m
7360 +CONFIG_SND_XEN_FRONTEND=m
7361 +CONFIG_AC97_BUS=m
7364 +# HID support
7366 +CONFIG_HID=m
7367 +CONFIG_HID_BATTERY_STRENGTH=y
7368 +CONFIG_HIDRAW=y
7369 +CONFIG_UHID=m
7370 +CONFIG_HID_GENERIC=m
7373 +# Special HID drivers
7375 +CONFIG_HID_A4TECH=m
7376 +CONFIG_HID_ACCUTOUCH=m
7377 +CONFIG_HID_ACRUX=m
7378 +CONFIG_HID_ACRUX_FF=y
7379 +CONFIG_HID_APPLE=m
7380 +CONFIG_HID_APPLEIR=m
7381 +CONFIG_HID_ASUS=m
7382 +CONFIG_HID_AUREAL=m
7383 +CONFIG_HID_BELKIN=m
7384 +CONFIG_HID_BETOP_FF=m
7385 +CONFIG_HID_BIGBEN_FF=m
7386 +CONFIG_HID_CHERRY=m
7387 +CONFIG_HID_CHICONY=m
7388 +CONFIG_HID_CORSAIR=m
7389 +CONFIG_HID_COUGAR=m
7390 +CONFIG_HID_MACALLY=m
7391 +CONFIG_HID_PRODIKEYS=m
7392 +CONFIG_HID_CMEDIA=m
7393 +CONFIG_HID_CP2112=m
7394 +CONFIG_HID_CREATIVE_SB0540=m
7395 +CONFIG_HID_CYPRESS=m
7396 +CONFIG_HID_DRAGONRISE=m
7397 +CONFIG_DRAGONRISE_FF=y
7398 +CONFIG_HID_EMS_FF=m
7399 +CONFIG_HID_ELAN=m
7400 +CONFIG_HID_ELECOM=m
7401 +CONFIG_HID_ELO=m
7402 +CONFIG_HID_EZKEY=m
7403 +CONFIG_HID_GEMBIRD=m
7404 +CONFIG_HID_GFRM=m
7405 +CONFIG_HID_GLORIOUS=m
7406 +CONFIG_HID_HOLTEK=m
7407 +CONFIG_HOLTEK_FF=y
7408 +CONFIG_HID_GOOGLE_HAMMER=m
7409 +CONFIG_HID_VIVALDI=m
7410 +CONFIG_HID_GT683R=m
7411 +CONFIG_HID_KEYTOUCH=m
7412 +CONFIG_HID_KYE=m
7413 +CONFIG_HID_UCLOGIC=m
7414 +CONFIG_HID_WALTOP=m
7415 +CONFIG_HID_VIEWSONIC=m
7416 +CONFIG_HID_GYRATION=m
7417 +CONFIG_HID_ICADE=m
7418 +CONFIG_HID_ITE=m
7419 +CONFIG_HID_JABRA=m
7420 +CONFIG_HID_TWINHAN=m
7421 +CONFIG_HID_KENSINGTON=m
7422 +CONFIG_HID_LCPOWER=m
7423 +CONFIG_HID_LED=m
7424 +CONFIG_HID_LENOVO=m
7425 +CONFIG_HID_LOGITECH=m
7426 +CONFIG_HID_LOGITECH_DJ=m
7427 +CONFIG_HID_LOGITECH_HIDPP=m
7428 +CONFIG_LOGITECH_FF=y
7429 +CONFIG_LOGIRUMBLEPAD2_FF=y
7430 +CONFIG_LOGIG940_FF=y
7431 +CONFIG_LOGIWHEELS_FF=y
7432 +CONFIG_HID_MAGICMOUSE=m
7433 +CONFIG_HID_MALTRON=m
7434 +CONFIG_HID_MAYFLASH=m
7435 +CONFIG_HID_REDRAGON=m
7436 +CONFIG_HID_MICROSOFT=m
7437 +CONFIG_HID_MONTEREY=m
7438 +CONFIG_HID_MULTITOUCH=m
7439 +CONFIG_HID_NTI=m
7440 +CONFIG_HID_NTRIG=m
7441 +CONFIG_HID_ORTEK=m
7442 +CONFIG_HID_PANTHERLORD=m
7443 +CONFIG_PANTHERLORD_FF=y
7444 +CONFIG_HID_PENMOUNT=m
7445 +CONFIG_HID_PETALYNX=m
7446 +CONFIG_HID_PICOLCD=m
7447 +CONFIG_HID_PICOLCD_FB=y
7448 +CONFIG_HID_PICOLCD_BACKLIGHT=y
7449 +CONFIG_HID_PICOLCD_LCD=y
7450 +CONFIG_HID_PICOLCD_LEDS=y
7451 +CONFIG_HID_PICOLCD_CIR=y
7452 +CONFIG_HID_PLANTRONICS=m
7453 +CONFIG_HID_PLAYSTATION=m
7454 +CONFIG_PLAYSTATION_FF=y
7455 +CONFIG_HID_PRIMAX=m
7456 +CONFIG_HID_RETRODE=m
7457 +CONFIG_HID_ROCCAT=m
7458 +CONFIG_HID_SAITEK=m
7459 +CONFIG_HID_SAMSUNG=m
7460 +CONFIG_HID_SONY=m
7461 +CONFIG_SONY_FF=y
7462 +CONFIG_HID_SPEEDLINK=m
7463 +CONFIG_HID_STEAM=m
7464 +CONFIG_HID_STEELSERIES=m
7465 +CONFIG_HID_SUNPLUS=m
7466 +CONFIG_HID_RMI=m
7467 +CONFIG_HID_GREENASIA=m
7468 +CONFIG_GREENASIA_FF=y
7469 +CONFIG_HID_HYPERV_MOUSE=m
7470 +CONFIG_HID_SMARTJOYPLUS=m
7471 +CONFIG_SMARTJOYPLUS_FF=y
7472 +CONFIG_HID_TIVO=m
7473 +CONFIG_HID_TOPSEED=m
7474 +CONFIG_HID_THINGM=m
7475 +CONFIG_HID_THRUSTMASTER=m
7476 +CONFIG_THRUSTMASTER_FF=y
7477 +CONFIG_HID_UDRAW_PS3=m
7478 +CONFIG_HID_U2FZERO=m
7479 +CONFIG_HID_WACOM=m
7480 +CONFIG_HID_WIIMOTE=m
7481 +CONFIG_HID_XINMO=m
7482 +CONFIG_HID_ZEROPLUS=m
7483 +CONFIG_ZEROPLUS_FF=y
7484 +CONFIG_HID_ZYDACRON=m
7485 +CONFIG_HID_SENSOR_HUB=m
7486 +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m
7487 +CONFIG_HID_ALPS=m
7488 +CONFIG_HID_MCP2221=m
7489 +# end of Special HID drivers
7492 +# USB HID support
7494 +CONFIG_USB_HID=m
7495 +CONFIG_HID_PID=y
7496 +CONFIG_USB_HIDDEV=y
7499 +# USB HID Boot Protocol drivers
7501 +CONFIG_USB_KBD=m
7502 +CONFIG_USB_MOUSE=m
7503 +# end of USB HID Boot Protocol drivers
7504 +# end of USB HID support
7507 +# I2C HID support
7509 +CONFIG_I2C_HID_ACPI=m
7510 +# end of I2C HID support
7512 +CONFIG_I2C_HID_CORE=m
7515 +# Intel ISH HID support
7517 +CONFIG_INTEL_ISH_HID=m
7518 +CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER=m
7519 +# end of Intel ISH HID support
7522 +# AMD SFH HID Support
7524 +CONFIG_AMD_SFH_HID=m
7525 +# end of AMD SFH HID Support
7526 +# end of HID support
7528 +CONFIG_USB_OHCI_LITTLE_ENDIAN=y
7529 +CONFIG_USB_SUPPORT=y
7530 +CONFIG_USB_COMMON=y
7531 +CONFIG_USB_LED_TRIG=y
7532 +CONFIG_USB_ULPI_BUS=m
7533 +CONFIG_USB_CONN_GPIO=m
7534 +CONFIG_USB_ARCH_HAS_HCD=y
7535 +CONFIG_USB=y
7536 +CONFIG_USB_PCI=y
7537 +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
7540 +# Miscellaneous USB options
7542 +CONFIG_USB_DEFAULT_PERSIST=y
7543 +# CONFIG_USB_FEW_INIT_RETRIES is not set
7544 +CONFIG_USB_DYNAMIC_MINORS=y
7545 +# CONFIG_USB_OTG is not set
7546 +# CONFIG_USB_OTG_PRODUCTLIST is not set
7547 +# CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB is not set
7548 +CONFIG_USB_LEDS_TRIGGER_USBPORT=m
7549 +CONFIG_USB_AUTOSUSPEND_DELAY=2
7550 +CONFIG_USB_MON=m
7553 +# USB Host Controller Drivers
7555 +CONFIG_USB_C67X00_HCD=m
7556 +CONFIG_USB_XHCI_HCD=y
7557 +CONFIG_USB_XHCI_DBGCAP=y
7558 +CONFIG_USB_XHCI_PCI=m
7559 +CONFIG_USB_XHCI_PCI_RENESAS=m
7560 +CONFIG_USB_XHCI_PLATFORM=m
7561 +CONFIG_USB_EHCI_HCD=y
7562 +CONFIG_USB_EHCI_ROOT_HUB_TT=y
7563 +CONFIG_USB_EHCI_TT_NEWSCHED=y
7564 +CONFIG_USB_EHCI_PCI=y
7565 +CONFIG_USB_EHCI_FSL=m
7566 +CONFIG_USB_EHCI_HCD_PLATFORM=y
7567 +CONFIG_USB_OXU210HP_HCD=m
7568 +CONFIG_USB_ISP116X_HCD=m
7569 +CONFIG_USB_FOTG210_HCD=m
7570 +CONFIG_USB_MAX3421_HCD=m
7571 +CONFIG_USB_OHCI_HCD=y
7572 +CONFIG_USB_OHCI_HCD_PCI=y
7573 +CONFIG_USB_OHCI_HCD_PLATFORM=y
7574 +CONFIG_USB_UHCI_HCD=y
7575 +CONFIG_USB_U132_HCD=m
7576 +CONFIG_USB_SL811_HCD=m
7577 +CONFIG_USB_SL811_HCD_ISO=y
7578 +CONFIG_USB_SL811_CS=m
7579 +CONFIG_USB_R8A66597_HCD=m
7580 +CONFIG_USB_HCD_BCMA=m
7581 +CONFIG_USB_HCD_SSB=m
7582 +# CONFIG_USB_HCD_TEST_MODE is not set
7585 +# USB Device Class drivers
7587 +CONFIG_USB_ACM=m
7588 +CONFIG_USB_PRINTER=m
7589 +CONFIG_USB_WDM=m
7590 +CONFIG_USB_TMC=m
7593 +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
7597 +# also be needed; see USB_STORAGE Help for more info
7599 +CONFIG_USB_STORAGE=m
7600 +# CONFIG_USB_STORAGE_DEBUG is not set
7601 +CONFIG_USB_STORAGE_REALTEK=m
7602 +CONFIG_REALTEK_AUTOPM=y
7603 +CONFIG_USB_STORAGE_DATAFAB=m
7604 +CONFIG_USB_STORAGE_FREECOM=m
7605 +CONFIG_USB_STORAGE_ISD200=m
7606 +CONFIG_USB_STORAGE_USBAT=m
7607 +CONFIG_USB_STORAGE_SDDR09=m
7608 +CONFIG_USB_STORAGE_SDDR55=m
7609 +CONFIG_USB_STORAGE_JUMPSHOT=m
7610 +CONFIG_USB_STORAGE_ALAUDA=m
7611 +CONFIG_USB_STORAGE_ONETOUCH=m
7612 +CONFIG_USB_STORAGE_KARMA=m
7613 +CONFIG_USB_STORAGE_CYPRESS_ATACB=m
7614 +CONFIG_USB_STORAGE_ENE_UB6250=m
7615 +CONFIG_USB_UAS=m
7618 +# USB Imaging devices
7620 +CONFIG_USB_MDC800=m
7621 +CONFIG_USB_MICROTEK=m
7622 +CONFIG_USBIP_CORE=m
7623 +CONFIG_USBIP_VHCI_HCD=m
7624 +CONFIG_USBIP_VHCI_HC_PORTS=8
7625 +CONFIG_USBIP_VHCI_NR_HCS=1
7626 +CONFIG_USBIP_HOST=m
7627 +CONFIG_USBIP_VUDC=m
7628 +# CONFIG_USBIP_DEBUG is not set
7629 +CONFIG_USB_CDNS_SUPPORT=m
7630 +CONFIG_USB_CDNS_HOST=y
7631 +CONFIG_USB_CDNS3=m
7632 +CONFIG_USB_CDNS3_GADGET=y
7633 +CONFIG_USB_CDNS3_HOST=y
7634 +CONFIG_USB_CDNS3_PCI_WRAP=m
7635 +CONFIG_USB_CDNSP_PCI=m
7636 +CONFIG_USB_CDNSP_GADGET=y
7637 +CONFIG_USB_CDNSP_HOST=y
7638 +CONFIG_USB_MUSB_HDRC=m
7639 +# CONFIG_USB_MUSB_HOST is not set
7640 +# CONFIG_USB_MUSB_GADGET is not set
7641 +CONFIG_USB_MUSB_DUAL_ROLE=y
7644 +# Platform Glue Layer
7648 +# MUSB DMA mode
7650 +CONFIG_MUSB_PIO_ONLY=y
7651 +CONFIG_USB_DWC3=m
7652 +CONFIG_USB_DWC3_ULPI=y
7653 +# CONFIG_USB_DWC3_HOST is not set
7654 +# CONFIG_USB_DWC3_GADGET is not set
7655 +CONFIG_USB_DWC3_DUAL_ROLE=y
7658 +# Platform Glue Driver Support
7660 +CONFIG_USB_DWC3_PCI=m
7661 +CONFIG_USB_DWC3_HAPS=m
7662 +CONFIG_USB_DWC2=y
7663 +CONFIG_USB_DWC2_HOST=y
7666 +# Gadget/Dual-role mode requires USB Gadget support to be enabled
7668 +CONFIG_USB_DWC2_PCI=m
7669 +# CONFIG_USB_DWC2_DEBUG is not set
7670 +# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set
7671 +CONFIG_USB_CHIPIDEA=m
7672 +CONFIG_USB_CHIPIDEA_UDC=y
7673 +CONFIG_USB_CHIPIDEA_HOST=y
7674 +CONFIG_USB_CHIPIDEA_PCI=m
7675 +CONFIG_USB_CHIPIDEA_MSM=m
7676 +CONFIG_USB_CHIPIDEA_GENERIC=m
7677 +CONFIG_USB_ISP1760=m
7678 +CONFIG_USB_ISP1760_HCD=y
7679 +CONFIG_USB_ISP1761_UDC=y
7680 +# CONFIG_USB_ISP1760_HOST_ROLE is not set
7681 +# CONFIG_USB_ISP1760_GADGET_ROLE is not set
7682 +CONFIG_USB_ISP1760_DUAL_ROLE=y
7685 +# USB port drivers
7687 +CONFIG_USB_USS720=m
7688 +CONFIG_USB_SERIAL=m
7689 +CONFIG_USB_SERIAL_GENERIC=y
7690 +CONFIG_USB_SERIAL_SIMPLE=m
7691 +CONFIG_USB_SERIAL_AIRCABLE=m
7692 +CONFIG_USB_SERIAL_ARK3116=m
7693 +CONFIG_USB_SERIAL_BELKIN=m
7694 +CONFIG_USB_SERIAL_CH341=m
7695 +CONFIG_USB_SERIAL_WHITEHEAT=m
7696 +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
7697 +CONFIG_USB_SERIAL_CP210X=m
7698 +CONFIG_USB_SERIAL_CYPRESS_M8=m
7699 +CONFIG_USB_SERIAL_EMPEG=m
7700 +CONFIG_USB_SERIAL_FTDI_SIO=m
7701 +CONFIG_USB_SERIAL_VISOR=m
7702 +CONFIG_USB_SERIAL_IPAQ=m
7703 +CONFIG_USB_SERIAL_IR=m
7704 +CONFIG_USB_SERIAL_EDGEPORT=m
7705 +CONFIG_USB_SERIAL_EDGEPORT_TI=m
7706 +CONFIG_USB_SERIAL_F81232=m
7707 +CONFIG_USB_SERIAL_F8153X=m
7708 +CONFIG_USB_SERIAL_GARMIN=m
7709 +CONFIG_USB_SERIAL_IPW=m
7710 +CONFIG_USB_SERIAL_IUU=m
7711 +CONFIG_USB_SERIAL_KEYSPAN_PDA=m
7712 +CONFIG_USB_SERIAL_KEYSPAN=m
7713 +CONFIG_USB_SERIAL_KLSI=m
7714 +CONFIG_USB_SERIAL_KOBIL_SCT=m
7715 +CONFIG_USB_SERIAL_MCT_U232=m
7716 +CONFIG_USB_SERIAL_METRO=m
7717 +CONFIG_USB_SERIAL_MOS7720=m
7718 +CONFIG_USB_SERIAL_MOS7715_PARPORT=y
7719 +CONFIG_USB_SERIAL_MOS7840=m
7720 +CONFIG_USB_SERIAL_MXUPORT=m
7721 +CONFIG_USB_SERIAL_NAVMAN=m
7722 +CONFIG_USB_SERIAL_PL2303=m
7723 +CONFIG_USB_SERIAL_OTI6858=m
7724 +CONFIG_USB_SERIAL_QCAUX=m
7725 +CONFIG_USB_SERIAL_QUALCOMM=m
7726 +CONFIG_USB_SERIAL_SPCP8X5=m
7727 +CONFIG_USB_SERIAL_SAFE=m
7728 +# CONFIG_USB_SERIAL_SAFE_PADDED is not set
7729 +CONFIG_USB_SERIAL_SIERRAWIRELESS=m
7730 +CONFIG_USB_SERIAL_SYMBOL=m
7731 +CONFIG_USB_SERIAL_TI=m
7732 +CONFIG_USB_SERIAL_CYBERJACK=m
7733 +CONFIG_USB_SERIAL_WWAN=m
7734 +CONFIG_USB_SERIAL_OPTION=m
7735 +CONFIG_USB_SERIAL_OMNINET=m
7736 +CONFIG_USB_SERIAL_OPTICON=m
7737 +CONFIG_USB_SERIAL_XSENS_MT=m
7738 +CONFIG_USB_SERIAL_WISHBONE=m
7739 +CONFIG_USB_SERIAL_SSU100=m
7740 +CONFIG_USB_SERIAL_QT2=m
7741 +CONFIG_USB_SERIAL_UPD78F0730=m
7742 +CONFIG_USB_SERIAL_XR=m
7743 +CONFIG_USB_SERIAL_DEBUG=m
7746 +# USB Miscellaneous drivers
7748 +CONFIG_USB_EMI62=m
7749 +CONFIG_USB_EMI26=m
7750 +CONFIG_USB_ADUTUX=m
7751 +CONFIG_USB_SEVSEG=m
7752 +CONFIG_USB_LEGOTOWER=m
7753 +CONFIG_USB_LCD=m
7754 +CONFIG_USB_CYPRESS_CY7C63=m
7755 +CONFIG_USB_CYTHERM=m
7756 +CONFIG_USB_IDMOUSE=m
7757 +CONFIG_USB_FTDI_ELAN=m
7758 +CONFIG_USB_APPLEDISPLAY=m
7759 +CONFIG_APPLE_MFI_FASTCHARGE=m
7760 +CONFIG_USB_SISUSBVGA=m
7761 +CONFIG_USB_LD=m
7762 +CONFIG_USB_TRANCEVIBRATOR=m
7763 +CONFIG_USB_IOWARRIOR=m
7764 +CONFIG_USB_TEST=m
7765 +CONFIG_USB_EHSET_TEST_FIXTURE=m
7766 +CONFIG_USB_ISIGHTFW=m
7767 +CONFIG_USB_YUREX=m
7768 +CONFIG_USB_EZUSB_FX2=m
7769 +CONFIG_USB_HUB_USB251XB=m
7770 +CONFIG_USB_HSIC_USB3503=m
7771 +CONFIG_USB_HSIC_USB4604=m
7772 +CONFIG_USB_LINK_LAYER_TEST=m
7773 +CONFIG_USB_CHAOSKEY=m
7774 +CONFIG_USB_ATM=m
7775 +CONFIG_USB_SPEEDTOUCH=m
7776 +CONFIG_USB_CXACRU=m
7777 +CONFIG_USB_UEAGLEATM=m
7778 +CONFIG_USB_XUSBATM=m
7781 +# USB Physical Layer drivers
7783 +CONFIG_USB_PHY=y
7784 +CONFIG_NOP_USB_XCEIV=m
7785 +CONFIG_USB_GPIO_VBUS=m
7786 +CONFIG_TAHVO_USB=m
7787 +CONFIG_TAHVO_USB_HOST_BY_DEFAULT=y
7788 +CONFIG_USB_ISP1301=m
7789 +# end of USB Physical Layer drivers
7791 +CONFIG_USB_GADGET=m
7792 +# CONFIG_USB_GADGET_DEBUG is not set
7793 +# CONFIG_USB_GADGET_DEBUG_FILES is not set
7794 +# CONFIG_USB_GADGET_DEBUG_FS is not set
7795 +CONFIG_USB_GADGET_VBUS_DRAW=2
7796 +CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
7797 +CONFIG_U_SERIAL_CONSOLE=y
7800 +# USB Peripheral Controller
7802 +CONFIG_USB_FOTG210_UDC=m
7803 +CONFIG_USB_GR_UDC=m
7804 +CONFIG_USB_R8A66597=m
7805 +CONFIG_USB_PXA27X=m
7806 +CONFIG_USB_MV_UDC=m
7807 +CONFIG_USB_MV_U3D=m
7808 +CONFIG_USB_SNP_CORE=m
7809 +# CONFIG_USB_M66592 is not set
7810 +CONFIG_USB_BDC_UDC=m
7811 +CONFIG_USB_AMD5536UDC=m
7812 +CONFIG_USB_NET2272=m
7813 +CONFIG_USB_NET2272_DMA=y
7814 +CONFIG_USB_NET2280=m
7815 +CONFIG_USB_GOKU=m
7816 +CONFIG_USB_EG20T=m
7817 +CONFIG_USB_MAX3420_UDC=m
7818 +# CONFIG_USB_DUMMY_HCD is not set
7819 +# end of USB Peripheral Controller
7821 +CONFIG_USB_LIBCOMPOSITE=m
7822 +CONFIG_USB_F_ACM=m
7823 +CONFIG_USB_F_SS_LB=m
7824 +CONFIG_USB_U_SERIAL=m
7825 +CONFIG_USB_U_ETHER=m
7826 +CONFIG_USB_U_AUDIO=m
7827 +CONFIG_USB_F_SERIAL=m
7828 +CONFIG_USB_F_OBEX=m
7829 +CONFIG_USB_F_NCM=m
7830 +CONFIG_USB_F_ECM=m
7831 +CONFIG_USB_F_PHONET=m
7832 +CONFIG_USB_F_EEM=m
7833 +CONFIG_USB_F_SUBSET=m
7834 +CONFIG_USB_F_RNDIS=m
7835 +CONFIG_USB_F_MASS_STORAGE=m
7836 +CONFIG_USB_F_FS=m
7837 +CONFIG_USB_F_UAC1=m
7838 +CONFIG_USB_F_UAC1_LEGACY=m
7839 +CONFIG_USB_F_UAC2=m
7840 +CONFIG_USB_F_UVC=m
7841 +CONFIG_USB_F_MIDI=m
7842 +CONFIG_USB_F_HID=m
7843 +CONFIG_USB_F_PRINTER=m
7844 +CONFIG_USB_F_TCM=m
7845 +CONFIG_USB_CONFIGFS=m
7846 +CONFIG_USB_CONFIGFS_SERIAL=y
7847 +CONFIG_USB_CONFIGFS_ACM=y
7848 +CONFIG_USB_CONFIGFS_OBEX=y
7849 +CONFIG_USB_CONFIGFS_NCM=y
7850 +CONFIG_USB_CONFIGFS_ECM=y
7851 +CONFIG_USB_CONFIGFS_ECM_SUBSET=y
7852 +CONFIG_USB_CONFIGFS_RNDIS=y
7853 +CONFIG_USB_CONFIGFS_EEM=y
7854 +CONFIG_USB_CONFIGFS_PHONET=y
7855 +CONFIG_USB_CONFIGFS_MASS_STORAGE=y
7856 +CONFIG_USB_CONFIGFS_F_LB_SS=y
7857 +CONFIG_USB_CONFIGFS_F_FS=y
7858 +CONFIG_USB_CONFIGFS_F_UAC1=y
7859 +CONFIG_USB_CONFIGFS_F_UAC1_LEGACY=y
7860 +CONFIG_USB_CONFIGFS_F_UAC2=y
7861 +CONFIG_USB_CONFIGFS_F_MIDI=y
7862 +CONFIG_USB_CONFIGFS_F_HID=y
7863 +CONFIG_USB_CONFIGFS_F_UVC=y
7864 +CONFIG_USB_CONFIGFS_F_PRINTER=y
7865 +CONFIG_USB_CONFIGFS_F_TCM=y
7868 +# USB Gadget precomposed configurations
7870 +CONFIG_USB_ZERO=m
7871 +CONFIG_USB_AUDIO=m
7872 +CONFIG_GADGET_UAC1=y
7873 +# CONFIG_GADGET_UAC1_LEGACY is not set
7874 +CONFIG_USB_ETH=m
7875 +CONFIG_USB_ETH_RNDIS=y
7876 +CONFIG_USB_ETH_EEM=y
7877 +CONFIG_USB_G_NCM=m
7878 +CONFIG_USB_GADGETFS=m
7879 +CONFIG_USB_FUNCTIONFS=m
7880 +CONFIG_USB_FUNCTIONFS_ETH=y
7881 +CONFIG_USB_FUNCTIONFS_RNDIS=y
7882 +CONFIG_USB_FUNCTIONFS_GENERIC=y
7883 +CONFIG_USB_MASS_STORAGE=m
7884 +CONFIG_USB_GADGET_TARGET=m
7885 +CONFIG_USB_G_SERIAL=m
7886 +CONFIG_USB_MIDI_GADGET=m
7887 +CONFIG_USB_G_PRINTER=m
7888 +CONFIG_USB_CDC_COMPOSITE=m
7889 +CONFIG_USB_G_NOKIA=m
7890 +CONFIG_USB_G_ACM_MS=m
7891 +# CONFIG_USB_G_MULTI is not set
7892 +CONFIG_USB_G_HID=m
7893 +CONFIG_USB_G_DBGP=m
7894 +# CONFIG_USB_G_DBGP_PRINTK is not set
7895 +CONFIG_USB_G_DBGP_SERIAL=y
7896 +CONFIG_USB_G_WEBCAM=m
7897 +CONFIG_USB_RAW_GADGET=m
7898 +# end of USB Gadget precomposed configurations
7900 +CONFIG_TYPEC=m
7901 +CONFIG_TYPEC_TCPM=m
7902 +CONFIG_TYPEC_TCPCI=m
7903 +CONFIG_TYPEC_RT1711H=m
7904 +CONFIG_TYPEC_MT6360=m
7905 +CONFIG_TYPEC_TCPCI_MAXIM=m
7906 +CONFIG_TYPEC_FUSB302=m
7907 +# CONFIG_TYPEC_WCOVE is not set
7908 +CONFIG_TYPEC_UCSI=m
7909 +CONFIG_UCSI_CCG=m
7910 +CONFIG_UCSI_ACPI=m
7911 +CONFIG_TYPEC_HD3SS3220=m
7912 +CONFIG_TYPEC_TPS6598X=m
7913 +CONFIG_TYPEC_STUSB160X=m
7916 +# USB Type-C Multiplexer/DeMultiplexer Switch support
7918 +CONFIG_TYPEC_MUX_PI3USB30532=m
7919 +CONFIG_TYPEC_MUX_INTEL_PMC=m
7920 +# end of USB Type-C Multiplexer/DeMultiplexer Switch support
7923 +# USB Type-C Alternate Mode drivers
7925 +CONFIG_TYPEC_DP_ALTMODE=m
7926 +CONFIG_TYPEC_NVIDIA_ALTMODE=m
7927 +# end of USB Type-C Alternate Mode drivers
7929 +CONFIG_USB_ROLE_SWITCH=y
7930 +CONFIG_USB_ROLES_INTEL_XHCI=m
7931 +CONFIG_MMC=y
7932 +CONFIG_MMC_BLOCK=m
7933 +CONFIG_MMC_BLOCK_MINORS=8
7934 +CONFIG_SDIO_UART=m
7935 +# CONFIG_MMC_TEST is not set
7936 +CONFIG_MMC_CRYPTO=y
7939 +# MMC/SD/SDIO Host Controller Drivers
7941 +# CONFIG_MMC_DEBUG is not set
7942 +CONFIG_MMC_SDHCI=m
7943 +CONFIG_MMC_SDHCI_IO_ACCESSORS=y
7944 +CONFIG_MMC_SDHCI_PCI=m
7945 +CONFIG_MMC_RICOH_MMC=y
7946 +CONFIG_MMC_SDHCI_ACPI=m
7947 +CONFIG_MMC_SDHCI_PLTFM=m
7948 +CONFIG_MMC_SDHCI_F_SDH30=m
7949 +CONFIG_MMC_WBSD=m
7950 +CONFIG_MMC_ALCOR=m
7951 +CONFIG_MMC_TIFM_SD=m
7952 +CONFIG_MMC_SPI=m
7953 +CONFIG_MMC_SDRICOH_CS=m
7954 +CONFIG_MMC_CB710=m
7955 +CONFIG_MMC_VIA_SDMMC=m
7956 +CONFIG_MMC_VUB300=m
7957 +CONFIG_MMC_USHC=m
7958 +CONFIG_MMC_USDHI6ROL0=m
7959 +CONFIG_MMC_REALTEK_PCI=m
7960 +CONFIG_MMC_REALTEK_USB=m
7961 +CONFIG_MMC_CQHCI=m
7962 +# CONFIG_MMC_HSQ is not set
7963 +CONFIG_MMC_TOSHIBA_PCI=m
7964 +CONFIG_MMC_MTK=m
7965 +CONFIG_MMC_SDHCI_XENON=m
7966 +CONFIG_MEMSTICK=m
7967 +# CONFIG_MEMSTICK_DEBUG is not set
7970 +# MemoryStick drivers
7972 +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set
7973 +CONFIG_MSPRO_BLOCK=m
7974 +CONFIG_MS_BLOCK=m
7977 +# MemoryStick Host Controller Drivers
7979 +CONFIG_MEMSTICK_TIFM_MS=m
7980 +CONFIG_MEMSTICK_JMICRON_38X=m
7981 +CONFIG_MEMSTICK_R592=m
7982 +CONFIG_MEMSTICK_REALTEK_PCI=m
7983 +CONFIG_MEMSTICK_REALTEK_USB=m
7984 +CONFIG_NEW_LEDS=y
7985 +CONFIG_LEDS_CLASS=y
7986 +CONFIG_LEDS_CLASS_FLASH=m
7987 +CONFIG_LEDS_CLASS_MULTICOLOR=m
7988 +CONFIG_LEDS_BRIGHTNESS_HW_CHANGED=y
7991 +# LED drivers
7993 +CONFIG_LEDS_88PM860X=m
7994 +CONFIG_LEDS_APU=m
7995 +CONFIG_LEDS_AS3645A=m
7996 +CONFIG_LEDS_LM3530=m
7997 +CONFIG_LEDS_LM3532=m
7998 +CONFIG_LEDS_LM3533=m
7999 +CONFIG_LEDS_LM3642=m
8000 +CONFIG_LEDS_LM3601X=m
8001 +CONFIG_LEDS_MT6323=m
8002 +CONFIG_LEDS_PCA9532=m
8003 +CONFIG_LEDS_PCA9532_GPIO=y
8004 +CONFIG_LEDS_GPIO=m
8005 +CONFIG_LEDS_LP3944=m
8006 +CONFIG_LEDS_LP3952=m
8007 +CONFIG_LEDS_LP50XX=m
8008 +CONFIG_LEDS_LP8788=m
8009 +CONFIG_LEDS_CLEVO_MAIL=m
8010 +CONFIG_LEDS_PCA955X=m
8011 +CONFIG_LEDS_PCA955X_GPIO=y
8012 +CONFIG_LEDS_PCA963X=m
8013 +CONFIG_LEDS_WM831X_STATUS=m
8014 +CONFIG_LEDS_WM8350=m
8015 +CONFIG_LEDS_DA903X=m
8016 +CONFIG_LEDS_DA9052=m
8017 +CONFIG_LEDS_DAC124S085=m
8018 +CONFIG_LEDS_PWM=m
8019 +CONFIG_LEDS_REGULATOR=m
8020 +CONFIG_LEDS_BD2802=m
8021 +CONFIG_LEDS_INTEL_SS4200=m
8022 +CONFIG_LEDS_ADP5520=m
8023 +CONFIG_LEDS_MC13783=m
8024 +CONFIG_LEDS_TCA6507=m
8025 +CONFIG_LEDS_TLC591XX=m
8026 +CONFIG_LEDS_MAX8997=m
8027 +CONFIG_LEDS_LM355x=m
8028 +CONFIG_LEDS_MENF21BMC=m
8031 +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)
8033 +CONFIG_LEDS_BLINKM=m
8034 +CONFIG_LEDS_MLXCPLD=m
8035 +CONFIG_LEDS_MLXREG=m
8036 +CONFIG_LEDS_USER=m
8037 +CONFIG_LEDS_NIC78BX=m
8038 +CONFIG_LEDS_TI_LMU_COMMON=m
8039 +CONFIG_LEDS_LM36274=m
8040 +CONFIG_LEDS_TPS6105X=m
8041 +CONFIG_LEDS_SGM3140=m
8044 +# Flash and Torch LED drivers
8046 +CONFIG_LEDS_RT8515=m
8049 +# LED Triggers
8051 +CONFIG_LEDS_TRIGGERS=y
8052 +CONFIG_LEDS_TRIGGER_TIMER=m
8053 +CONFIG_LEDS_TRIGGER_ONESHOT=m
8054 +CONFIG_LEDS_TRIGGER_DISK=y
8055 +CONFIG_LEDS_TRIGGER_MTD=y
8056 +CONFIG_LEDS_TRIGGER_HEARTBEAT=m
8057 +CONFIG_LEDS_TRIGGER_BACKLIGHT=m
8058 +CONFIG_LEDS_TRIGGER_CPU=y
8059 +CONFIG_LEDS_TRIGGER_ACTIVITY=m
8060 +CONFIG_LEDS_TRIGGER_GPIO=m
8061 +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
8064 +# iptables trigger is under Netfilter config (LED target)
8066 +CONFIG_LEDS_TRIGGER_TRANSIENT=m
8067 +CONFIG_LEDS_TRIGGER_CAMERA=m
8068 +CONFIG_LEDS_TRIGGER_PANIC=y
8069 +CONFIG_LEDS_TRIGGER_NETDEV=m
8070 +CONFIG_LEDS_TRIGGER_PATTERN=m
8071 +CONFIG_LEDS_TRIGGER_AUDIO=m
8072 +CONFIG_LEDS_TRIGGER_TTY=m
8075 +# LED Blink
8077 +CONFIG_LEDS_BLINK=y
8078 +# CONFIG_ACCESSIBILITY is not set
8079 +CONFIG_INFINIBAND=m
8080 +CONFIG_INFINIBAND_USER_MAD=m
8081 +CONFIG_INFINIBAND_USER_ACCESS=m
8082 +CONFIG_INFINIBAND_USER_MEM=y
8083 +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y
8084 +CONFIG_INFINIBAND_ADDR_TRANS=y
8085 +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y
8086 +CONFIG_INFINIBAND_VIRT_DMA=y
8087 +CONFIG_INFINIBAND_MTHCA=m
8088 +# CONFIG_INFINIBAND_MTHCA_DEBUG is not set
8089 +CONFIG_INFINIBAND_QIB=m
8090 +CONFIG_INFINIBAND_QIB_DCA=y
8091 +CONFIG_INFINIBAND_CXGB4=m
8092 +CONFIG_INFINIBAND_EFA=m
8093 +CONFIG_INFINIBAND_I40IW=m
8094 +CONFIG_MLX4_INFINIBAND=m
8095 +CONFIG_MLX5_INFINIBAND=m
8096 +CONFIG_INFINIBAND_OCRDMA=m
8097 +CONFIG_INFINIBAND_VMWARE_PVRDMA=m
8098 +CONFIG_INFINIBAND_USNIC=m
8099 +CONFIG_INFINIBAND_BNXT_RE=m
8100 +CONFIG_INFINIBAND_HFI1=m
8101 +# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set
8102 +# CONFIG_SDMA_VERBOSITY is not set
8103 +CONFIG_INFINIBAND_QEDR=m
8104 +CONFIG_INFINIBAND_RDMAVT=m
8105 +CONFIG_RDMA_RXE=m
8106 +CONFIG_RDMA_SIW=m
8107 +CONFIG_INFINIBAND_IPOIB=m
8108 +CONFIG_INFINIBAND_IPOIB_CM=y
8109 +# CONFIG_INFINIBAND_IPOIB_DEBUG is not set
8110 +CONFIG_INFINIBAND_SRP=m
8111 +CONFIG_INFINIBAND_SRPT=m
8112 +CONFIG_INFINIBAND_ISER=m
8113 +CONFIG_INFINIBAND_ISERT=m
8114 +CONFIG_INFINIBAND_RTRS=m
8115 +CONFIG_INFINIBAND_RTRS_CLIENT=m
8116 +CONFIG_INFINIBAND_RTRS_SERVER=m
8117 +CONFIG_INFINIBAND_OPA_VNIC=m
8118 +CONFIG_EDAC_ATOMIC_SCRUB=y
8119 +CONFIG_EDAC_SUPPORT=y
8120 +CONFIG_EDAC=y
8121 +# CONFIG_EDAC_LEGACY_SYSFS is not set
8122 +# CONFIG_EDAC_DEBUG is not set
8123 +CONFIG_EDAC_DECODE_MCE=m
8124 +CONFIG_EDAC_GHES=y
8125 +CONFIG_EDAC_AMD64=m
8126 +CONFIG_EDAC_E752X=m
8127 +CONFIG_EDAC_I82975X=m
8128 +CONFIG_EDAC_I3000=m
8129 +CONFIG_EDAC_I3200=m
8130 +CONFIG_EDAC_IE31200=m
8131 +CONFIG_EDAC_X38=m
8132 +CONFIG_EDAC_I5400=m
8133 +CONFIG_EDAC_I7CORE=m
8134 +CONFIG_EDAC_I5000=m
8135 +CONFIG_EDAC_I5100=m
8136 +CONFIG_EDAC_I7300=m
8137 +CONFIG_EDAC_SBRIDGE=m
8138 +CONFIG_EDAC_SKX=m
8139 +CONFIG_EDAC_I10NM=m
8140 +CONFIG_EDAC_PND2=m
8141 +CONFIG_EDAC_IGEN6=m
8142 +CONFIG_RTC_LIB=y
8143 +CONFIG_RTC_MC146818_LIB=y
8144 +CONFIG_RTC_CLASS=y
8145 +CONFIG_RTC_HCTOSYS=y
8146 +CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
8147 +CONFIG_RTC_SYSTOHC=y
8148 +CONFIG_RTC_SYSTOHC_DEVICE="rtc0"
8149 +# CONFIG_RTC_DEBUG is not set
8150 +CONFIG_RTC_NVMEM=y
8153 +# RTC interfaces
8155 +CONFIG_RTC_INTF_SYSFS=y
8156 +CONFIG_RTC_INTF_PROC=y
8157 +CONFIG_RTC_INTF_DEV=y
8158 +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
8159 +# CONFIG_RTC_DRV_TEST is not set
8162 +# I2C RTC drivers
8164 +CONFIG_RTC_DRV_88PM860X=m
8165 +CONFIG_RTC_DRV_88PM80X=m
8166 +CONFIG_RTC_DRV_ABB5ZES3=m
8167 +CONFIG_RTC_DRV_ABEOZ9=m
8168 +CONFIG_RTC_DRV_ABX80X=m
8169 +CONFIG_RTC_DRV_DS1307=m
8170 +CONFIG_RTC_DRV_DS1307_CENTURY=y
8171 +CONFIG_RTC_DRV_DS1374=m
8172 +CONFIG_RTC_DRV_DS1374_WDT=y
8173 +CONFIG_RTC_DRV_DS1672=m
8174 +CONFIG_RTC_DRV_LP8788=m
8175 +CONFIG_RTC_DRV_MAX6900=m
8176 +CONFIG_RTC_DRV_MAX8907=m
8177 +CONFIG_RTC_DRV_MAX8925=m
8178 +CONFIG_RTC_DRV_MAX8998=m
8179 +CONFIG_RTC_DRV_MAX8997=m
8180 +CONFIG_RTC_DRV_RS5C372=m
8181 +CONFIG_RTC_DRV_ISL1208=m
8182 +CONFIG_RTC_DRV_ISL12022=m
8183 +CONFIG_RTC_DRV_X1205=m
8184 +CONFIG_RTC_DRV_PCF8523=m
8185 +CONFIG_RTC_DRV_PCF85063=m
8186 +CONFIG_RTC_DRV_PCF85363=m
8187 +CONFIG_RTC_DRV_PCF8563=m
8188 +CONFIG_RTC_DRV_PCF8583=m
8189 +CONFIG_RTC_DRV_M41T80=m
8190 +CONFIG_RTC_DRV_M41T80_WDT=y
8191 +CONFIG_RTC_DRV_BQ32K=m
8192 +CONFIG_RTC_DRV_PALMAS=m
8193 +CONFIG_RTC_DRV_TPS6586X=m
8194 +CONFIG_RTC_DRV_TPS65910=m
8195 +CONFIG_RTC_DRV_TPS80031=m
8196 +CONFIG_RTC_DRV_RC5T583=m
8197 +CONFIG_RTC_DRV_S35390A=m
8198 +CONFIG_RTC_DRV_FM3130=m
8199 +CONFIG_RTC_DRV_RX8010=m
8200 +CONFIG_RTC_DRV_RX8581=m
8201 +CONFIG_RTC_DRV_RX8025=m
8202 +CONFIG_RTC_DRV_EM3027=m
8203 +CONFIG_RTC_DRV_RV3028=m
8204 +CONFIG_RTC_DRV_RV3032=m
8205 +CONFIG_RTC_DRV_RV8803=m
8206 +CONFIG_RTC_DRV_S5M=m
8207 +CONFIG_RTC_DRV_SD3078=m
8210 +# SPI RTC drivers
8212 +CONFIG_RTC_DRV_M41T93=m
8213 +CONFIG_RTC_DRV_M41T94=m
8214 +CONFIG_RTC_DRV_DS1302=m
8215 +CONFIG_RTC_DRV_DS1305=m
8216 +CONFIG_RTC_DRV_DS1343=m
8217 +CONFIG_RTC_DRV_DS1347=m
8218 +CONFIG_RTC_DRV_DS1390=m
8219 +CONFIG_RTC_DRV_MAX6916=m
8220 +CONFIG_RTC_DRV_R9701=m
8221 +CONFIG_RTC_DRV_RX4581=m
8222 +CONFIG_RTC_DRV_RS5C348=m
8223 +CONFIG_RTC_DRV_MAX6902=m
8224 +CONFIG_RTC_DRV_PCF2123=m
8225 +CONFIG_RTC_DRV_MCP795=m
8226 +CONFIG_RTC_I2C_AND_SPI=y
8229 +# SPI and I2C RTC drivers
8231 +CONFIG_RTC_DRV_DS3232=m
8232 +CONFIG_RTC_DRV_DS3232_HWMON=y
8233 +CONFIG_RTC_DRV_PCF2127=m
8234 +CONFIG_RTC_DRV_RV3029C2=m
8235 +CONFIG_RTC_DRV_RV3029_HWMON=y
8236 +CONFIG_RTC_DRV_RX6110=m
8239 +# Platform RTC drivers
8241 +CONFIG_RTC_DRV_CMOS=y
8242 +CONFIG_RTC_DRV_DS1286=m
8243 +CONFIG_RTC_DRV_DS1511=m
8244 +CONFIG_RTC_DRV_DS1553=m
8245 +CONFIG_RTC_DRV_DS1685_FAMILY=m
8246 +CONFIG_RTC_DRV_DS1685=y
8247 +# CONFIG_RTC_DRV_DS1689 is not set
8248 +# CONFIG_RTC_DRV_DS17285 is not set
8249 +# CONFIG_RTC_DRV_DS17485 is not set
8250 +# CONFIG_RTC_DRV_DS17885 is not set
8251 +CONFIG_RTC_DRV_DS1742=m
8252 +CONFIG_RTC_DRV_DS2404=m
8253 +CONFIG_RTC_DRV_DA9052=m
8254 +CONFIG_RTC_DRV_DA9055=m
8255 +CONFIG_RTC_DRV_DA9063=m
8256 +CONFIG_RTC_DRV_STK17TA8=m
8257 +CONFIG_RTC_DRV_M48T86=m
8258 +CONFIG_RTC_DRV_M48T35=m
8259 +CONFIG_RTC_DRV_M48T59=m
8260 +CONFIG_RTC_DRV_MSM6242=m
8261 +CONFIG_RTC_DRV_BQ4802=m
8262 +CONFIG_RTC_DRV_RP5C01=m
8263 +CONFIG_RTC_DRV_V3020=m
8264 +CONFIG_RTC_DRV_WM831X=m
8265 +CONFIG_RTC_DRV_WM8350=m
8266 +CONFIG_RTC_DRV_PCF50633=m
8267 +CONFIG_RTC_DRV_CROS_EC=m
8270 +# on-CPU RTC drivers
8272 +CONFIG_RTC_DRV_FTRTC010=m
8273 +CONFIG_RTC_DRV_PCAP=m
8274 +CONFIG_RTC_DRV_MC13XXX=m
8275 +CONFIG_RTC_DRV_MT6397=m
8278 +# HID Sensor RTC drivers
8280 +CONFIG_RTC_DRV_HID_SENSOR_TIME=m
8281 +CONFIG_RTC_DRV_WILCO_EC=m
8282 +CONFIG_DMADEVICES=y
8283 +# CONFIG_DMADEVICES_DEBUG is not set
8286 +# DMA Devices
8288 +CONFIG_DMA_ENGINE=y
8289 +CONFIG_DMA_VIRTUAL_CHANNELS=y
8290 +CONFIG_DMA_ACPI=y
8291 +CONFIG_ALTERA_MSGDMA=m
8292 +CONFIG_INTEL_IDMA64=m
8293 +CONFIG_INTEL_IDXD=m
8294 +CONFIG_INTEL_IDXD_SVM=y
8295 +CONFIG_INTEL_IOATDMA=m
8296 +CONFIG_PLX_DMA=m
8297 +CONFIG_XILINX_ZYNQMP_DPDMA=m
8298 +CONFIG_QCOM_HIDMA_MGMT=m
8299 +CONFIG_QCOM_HIDMA=m
8300 +CONFIG_DW_DMAC_CORE=m
8301 +CONFIG_DW_DMAC=m
8302 +CONFIG_DW_DMAC_PCI=m
8303 +CONFIG_DW_EDMA=m
8304 +CONFIG_DW_EDMA_PCIE=m
8305 +CONFIG_HSU_DMA=m
8306 +CONFIG_SF_PDMA=m
8307 +CONFIG_INTEL_LDMA=y
8310 +# DMA Clients
8312 +CONFIG_ASYNC_TX_DMA=y
8313 +# CONFIG_DMATEST is not set
8314 +CONFIG_DMA_ENGINE_RAID=y
8317 +# DMABUF options
8319 +CONFIG_SYNC_FILE=y
8320 +CONFIG_SW_SYNC=y
8321 +CONFIG_UDMABUF=y
8322 +# CONFIG_DMABUF_MOVE_NOTIFY is not set
8323 +# CONFIG_DMABUF_DEBUG is not set
8324 +# CONFIG_DMABUF_SELFTESTS is not set
8325 +CONFIG_DMABUF_HEAPS=y
8326 +CONFIG_DMABUF_HEAPS_SYSTEM=y
8327 +# end of DMABUF options
8329 +CONFIG_DCA=m
8330 +CONFIG_AUXDISPLAY=y
8331 +CONFIG_CHARLCD=m
8332 +CONFIG_HD44780_COMMON=m
8333 +CONFIG_HD44780=m
8334 +CONFIG_KS0108=m
8335 +CONFIG_KS0108_PORT=0x378
8336 +CONFIG_KS0108_DELAY=2
8337 +CONFIG_CFAG12864B=m
8338 +CONFIG_CFAG12864B_RATE=20
8339 +CONFIG_IMG_ASCII_LCD=m
8340 +CONFIG_LCD2S=m
8341 +CONFIG_PARPORT_PANEL=m
8342 +CONFIG_PANEL_PARPORT=0
8343 +CONFIG_PANEL_PROFILE=5
8344 +# CONFIG_PANEL_CHANGE_MESSAGE is not set
8345 +# CONFIG_CHARLCD_BL_OFF is not set
8346 +# CONFIG_CHARLCD_BL_ON is not set
8347 +CONFIG_CHARLCD_BL_FLASH=y
8348 +CONFIG_PANEL=m
8349 +CONFIG_UIO=m
8350 +CONFIG_UIO_CIF=m
8351 +CONFIG_UIO_PDRV_GENIRQ=m
8352 +CONFIG_UIO_DMEM_GENIRQ=m
8353 +CONFIG_UIO_AEC=m
8354 +CONFIG_UIO_SERCOS3=m
8355 +CONFIG_UIO_PCI_GENERIC=m
8356 +CONFIG_UIO_NETX=m
8357 +CONFIG_UIO_PRUSS=m
8358 +CONFIG_UIO_MF624=m
8359 +CONFIG_UIO_HV_GENERIC=m
8360 +CONFIG_VFIO_IOMMU_TYPE1=y
8361 +CONFIG_VFIO_VIRQFD=y
8362 +CONFIG_VFIO=y
8363 +CONFIG_VFIO_NOIOMMU=y
8364 +CONFIG_VFIO_PCI=y
8365 +CONFIG_VFIO_PCI_VGA=y
8366 +CONFIG_VFIO_PCI_MMAP=y
8367 +CONFIG_VFIO_PCI_INTX=y
8368 +CONFIG_VFIO_PCI_IGD=y
8369 +CONFIG_VFIO_MDEV=m
8370 +CONFIG_VFIO_MDEV_DEVICE=m
8371 +CONFIG_IRQ_BYPASS_MANAGER=y
8372 +CONFIG_VIRT_DRIVERS=y
8373 +CONFIG_VBOXGUEST=m
8374 +CONFIG_NITRO_ENCLAVES=m
8375 +CONFIG_ACRN_HSM=m
8376 +CONFIG_VIRTIO=y
8377 +CONFIG_VIRTIO_PCI_LIB=y
8378 +CONFIG_VIRTIO_MENU=y
8379 +CONFIG_VIRTIO_PCI=y
8380 +CONFIG_VIRTIO_PCI_LEGACY=y
8381 +CONFIG_VIRTIO_VDPA=m
8382 +CONFIG_VIRTIO_PMEM=m
8383 +CONFIG_VIRTIO_BALLOON=y
8384 +CONFIG_VIRTIO_MEM=m
8385 +CONFIG_VIRTIO_INPUT=m
8386 +CONFIG_VIRTIO_MMIO=y
8387 +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
8388 +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m
8389 +CONFIG_VDPA=m
8390 +CONFIG_VDPA_SIM=m
8391 +CONFIG_VDPA_SIM_NET=m
8392 +CONFIG_IFCVF=m
8393 +CONFIG_MLX5_VDPA=y
8394 +CONFIG_MLX5_VDPA_NET=m
8395 +CONFIG_VHOST_IOTLB=m
8396 +CONFIG_VHOST_RING=m
8397 +CONFIG_VHOST=m
8398 +CONFIG_VHOST_MENU=y
8399 +CONFIG_VHOST_NET=m
8400 +CONFIG_VHOST_SCSI=m
8401 +CONFIG_VHOST_VSOCK=m
8402 +CONFIG_VHOST_VDPA=m
8403 +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set
8406 +# Microsoft Hyper-V guest support
8408 +CONFIG_HYPERV=m
8409 +CONFIG_HYPERV_TIMER=y
8410 +CONFIG_HYPERV_UTILS=m
8411 +CONFIG_HYPERV_BALLOON=m
8412 +# end of Microsoft Hyper-V guest support
8415 +# Xen driver support
8417 +CONFIG_XEN_BALLOON=y
8418 +CONFIG_XEN_BALLOON_MEMORY_HOTPLUG=y
8419 +CONFIG_XEN_MEMORY_HOTPLUG_LIMIT=512
8420 +CONFIG_XEN_SCRUB_PAGES_DEFAULT=y
8421 +CONFIG_XEN_DEV_EVTCHN=m
8422 +CONFIG_XEN_BACKEND=y
8423 +CONFIG_XENFS=m
8424 +CONFIG_XEN_COMPAT_XENFS=y
8425 +CONFIG_XEN_SYS_HYPERVISOR=y
8426 +CONFIG_XEN_XENBUS_FRONTEND=y
8427 +CONFIG_XEN_GNTDEV=m
8428 +CONFIG_XEN_GNTDEV_DMABUF=y
8429 +CONFIG_XEN_GRANT_DEV_ALLOC=m
8430 +CONFIG_XEN_GRANT_DMA_ALLOC=y
8431 +CONFIG_SWIOTLB_XEN=y
8432 +CONFIG_XEN_PCIDEV_BACKEND=m
8433 +CONFIG_XEN_PVCALLS_FRONTEND=m
8434 +# CONFIG_XEN_PVCALLS_BACKEND is not set
8435 +CONFIG_XEN_SCSI_BACKEND=m
8436 +CONFIG_XEN_PRIVCMD=m
8437 +CONFIG_XEN_ACPI_PROCESSOR=y
8438 +CONFIG_XEN_MCE_LOG=y
8439 +CONFIG_XEN_HAVE_PVMMU=y
8440 +CONFIG_XEN_EFI=y
8441 +CONFIG_XEN_AUTO_XLATE=y
8442 +CONFIG_XEN_ACPI=y
8443 +CONFIG_XEN_SYMS=y
8444 +CONFIG_XEN_HAVE_VPMU=y
8445 +CONFIG_XEN_FRONT_PGDIR_SHBUF=m
8446 +CONFIG_XEN_UNPOPULATED_ALLOC=y
8447 +# end of Xen driver support
8449 +CONFIG_GREYBUS=m
8450 +CONFIG_GREYBUS_ES2=m
8451 +CONFIG_STAGING=y
8452 +CONFIG_PRISM2_USB=m
8453 +CONFIG_COMEDI=m
8454 +# CONFIG_COMEDI_DEBUG is not set
8455 +CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB=2048
8456 +CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB=20480
8457 +CONFIG_COMEDI_MISC_DRIVERS=y
8458 +CONFIG_COMEDI_BOND=m
8459 +CONFIG_COMEDI_TEST=m
8460 +CONFIG_COMEDI_PARPORT=m
8461 +CONFIG_COMEDI_ISA_DRIVERS=y
8462 +CONFIG_COMEDI_PCL711=m
8463 +CONFIG_COMEDI_PCL724=m
8464 +CONFIG_COMEDI_PCL726=m
8465 +CONFIG_COMEDI_PCL730=m
8466 +CONFIG_COMEDI_PCL812=m
8467 +CONFIG_COMEDI_PCL816=m
8468 +CONFIG_COMEDI_PCL818=m
8469 +CONFIG_COMEDI_PCM3724=m
8470 +CONFIG_COMEDI_AMPLC_DIO200_ISA=m
8471 +CONFIG_COMEDI_AMPLC_PC236_ISA=m
8472 +CONFIG_COMEDI_AMPLC_PC263_ISA=m
8473 +CONFIG_COMEDI_RTI800=m
8474 +CONFIG_COMEDI_RTI802=m
8475 +CONFIG_COMEDI_DAC02=m
8476 +CONFIG_COMEDI_DAS16M1=m
8477 +CONFIG_COMEDI_DAS08_ISA=m
8478 +CONFIG_COMEDI_DAS16=m
8479 +CONFIG_COMEDI_DAS800=m
8480 +CONFIG_COMEDI_DAS1800=m
8481 +CONFIG_COMEDI_DAS6402=m
8482 +CONFIG_COMEDI_DT2801=m
8483 +CONFIG_COMEDI_DT2811=m
8484 +CONFIG_COMEDI_DT2814=m
8485 +CONFIG_COMEDI_DT2815=m
8486 +CONFIG_COMEDI_DT2817=m
8487 +CONFIG_COMEDI_DT282X=m
8488 +CONFIG_COMEDI_DMM32AT=m
8489 +CONFIG_COMEDI_FL512=m
8490 +CONFIG_COMEDI_AIO_AIO12_8=m
8491 +CONFIG_COMEDI_AIO_IIRO_16=m
8492 +CONFIG_COMEDI_II_PCI20KC=m
8493 +CONFIG_COMEDI_C6XDIGIO=m
8494 +CONFIG_COMEDI_MPC624=m
8495 +CONFIG_COMEDI_ADQ12B=m
8496 +CONFIG_COMEDI_NI_AT_A2150=m
8497 +CONFIG_COMEDI_NI_AT_AO=m
8498 +CONFIG_COMEDI_NI_ATMIO=m
8499 +CONFIG_COMEDI_NI_ATMIO16D=m
8500 +CONFIG_COMEDI_NI_LABPC_ISA=m
8501 +CONFIG_COMEDI_PCMAD=m
8502 +CONFIG_COMEDI_PCMDA12=m
8503 +CONFIG_COMEDI_PCMMIO=m
8504 +CONFIG_COMEDI_PCMUIO=m
8505 +CONFIG_COMEDI_MULTIQ3=m
8506 +CONFIG_COMEDI_S526=m
8507 +CONFIG_COMEDI_PCI_DRIVERS=m
8508 +CONFIG_COMEDI_8255_PCI=m
8509 +CONFIG_COMEDI_ADDI_WATCHDOG=m
8510 +CONFIG_COMEDI_ADDI_APCI_1032=m
8511 +CONFIG_COMEDI_ADDI_APCI_1500=m
8512 +CONFIG_COMEDI_ADDI_APCI_1516=m
8513 +CONFIG_COMEDI_ADDI_APCI_1564=m
8514 +CONFIG_COMEDI_ADDI_APCI_16XX=m
8515 +CONFIG_COMEDI_ADDI_APCI_2032=m
8516 +CONFIG_COMEDI_ADDI_APCI_2200=m
8517 +CONFIG_COMEDI_ADDI_APCI_3120=m
8518 +CONFIG_COMEDI_ADDI_APCI_3501=m
8519 +CONFIG_COMEDI_ADDI_APCI_3XXX=m
8520 +CONFIG_COMEDI_ADL_PCI6208=m
8521 +CONFIG_COMEDI_ADL_PCI7X3X=m
8522 +CONFIG_COMEDI_ADL_PCI8164=m
8523 +CONFIG_COMEDI_ADL_PCI9111=m
8524 +CONFIG_COMEDI_ADL_PCI9118=m
8525 +CONFIG_COMEDI_ADV_PCI1710=m
8526 +CONFIG_COMEDI_ADV_PCI1720=m
8527 +CONFIG_COMEDI_ADV_PCI1723=m
8528 +CONFIG_COMEDI_ADV_PCI1724=m
8529 +CONFIG_COMEDI_ADV_PCI1760=m
8530 +CONFIG_COMEDI_ADV_PCI_DIO=m
8531 +CONFIG_COMEDI_AMPLC_DIO200_PCI=m
8532 +CONFIG_COMEDI_AMPLC_PC236_PCI=m
8533 +CONFIG_COMEDI_AMPLC_PC263_PCI=m
8534 +CONFIG_COMEDI_AMPLC_PCI224=m
8535 +CONFIG_COMEDI_AMPLC_PCI230=m
8536 +CONFIG_COMEDI_CONTEC_PCI_DIO=m
8537 +CONFIG_COMEDI_DAS08_PCI=m
8538 +CONFIG_COMEDI_DT3000=m
8539 +CONFIG_COMEDI_DYNA_PCI10XX=m
8540 +CONFIG_COMEDI_GSC_HPDI=m
8541 +CONFIG_COMEDI_MF6X4=m
8542 +CONFIG_COMEDI_ICP_MULTI=m
8543 +CONFIG_COMEDI_DAQBOARD2000=m
8544 +CONFIG_COMEDI_JR3_PCI=m
8545 +CONFIG_COMEDI_KE_COUNTER=m
8546 +CONFIG_COMEDI_CB_PCIDAS64=m
8547 +CONFIG_COMEDI_CB_PCIDAS=m
8548 +CONFIG_COMEDI_CB_PCIDDA=m
8549 +CONFIG_COMEDI_CB_PCIMDAS=m
8550 +CONFIG_COMEDI_CB_PCIMDDA=m
8551 +CONFIG_COMEDI_ME4000=m
8552 +CONFIG_COMEDI_ME_DAQ=m
8553 +CONFIG_COMEDI_NI_6527=m
8554 +CONFIG_COMEDI_NI_65XX=m
8555 +CONFIG_COMEDI_NI_660X=m
8556 +CONFIG_COMEDI_NI_670X=m
8557 +CONFIG_COMEDI_NI_LABPC_PCI=m
8558 +CONFIG_COMEDI_NI_PCIDIO=m
8559 +CONFIG_COMEDI_NI_PCIMIO=m
8560 +CONFIG_COMEDI_RTD520=m
8561 +CONFIG_COMEDI_S626=m
8562 +CONFIG_COMEDI_MITE=m
8563 +CONFIG_COMEDI_NI_TIOCMD=m
8564 +CONFIG_COMEDI_PCMCIA_DRIVERS=m
8565 +CONFIG_COMEDI_CB_DAS16_CS=m
8566 +CONFIG_COMEDI_DAS08_CS=m
8567 +CONFIG_COMEDI_NI_DAQ_700_CS=m
8568 +CONFIG_COMEDI_NI_DAQ_DIO24_CS=m
8569 +CONFIG_COMEDI_NI_LABPC_CS=m
8570 +CONFIG_COMEDI_NI_MIO_CS=m
8571 +CONFIG_COMEDI_QUATECH_DAQP_CS=m
8572 +CONFIG_COMEDI_USB_DRIVERS=m
8573 +CONFIG_COMEDI_DT9812=m
8574 +CONFIG_COMEDI_NI_USB6501=m
8575 +CONFIG_COMEDI_USBDUX=m
8576 +CONFIG_COMEDI_USBDUXFAST=m
8577 +CONFIG_COMEDI_USBDUXSIGMA=m
8578 +CONFIG_COMEDI_VMK80XX=m
8579 +CONFIG_COMEDI_8254=m
8580 +CONFIG_COMEDI_8255=m
8581 +CONFIG_COMEDI_8255_SA=m
8582 +CONFIG_COMEDI_KCOMEDILIB=m
8583 +CONFIG_COMEDI_AMPLC_DIO200=m
8584 +CONFIG_COMEDI_AMPLC_PC236=m
8585 +CONFIG_COMEDI_DAS08=m
8586 +CONFIG_COMEDI_ISADMA=m
8587 +CONFIG_COMEDI_NI_LABPC=m
8588 +CONFIG_COMEDI_NI_LABPC_ISADMA=m
8589 +CONFIG_COMEDI_NI_TIO=m
8590 +CONFIG_COMEDI_NI_ROUTING=m
8591 +CONFIG_RTL8192U=m
8592 +CONFIG_RTLLIB=m
8593 +CONFIG_RTLLIB_CRYPTO_CCMP=m
8594 +CONFIG_RTLLIB_CRYPTO_TKIP=m
8595 +CONFIG_RTLLIB_CRYPTO_WEP=m
8596 +CONFIG_RTL8192E=m
8597 +CONFIG_RTL8723BS=m
8598 +CONFIG_R8712U=m
8599 +CONFIG_R8188EU=m
8600 +CONFIG_88EU_AP_MODE=y
8601 +CONFIG_RTS5208=m
8602 +CONFIG_VT6655=m
8603 +CONFIG_VT6656=m
8606 +# IIO staging drivers
8610 +# Accelerometers
8612 +CONFIG_ADIS16203=m
8613 +CONFIG_ADIS16240=m
8614 +# end of Accelerometers
8617 +# Analog to digital converters
8619 +CONFIG_AD7816=m
8620 +CONFIG_AD7280=m
8621 +# end of Analog to digital converters
8624 +# Analog digital bi-direction converters
8626 +CONFIG_ADT7316=m
8627 +CONFIG_ADT7316_SPI=m
8628 +CONFIG_ADT7316_I2C=m
8629 +# end of Analog digital bi-direction converters
8632 +# Capacitance to digital converters
8634 +CONFIG_AD7150=m
8635 +CONFIG_AD7746=m
8636 +# end of Capacitance to digital converters
8639 +# Direct Digital Synthesis
8641 +CONFIG_AD9832=m
8642 +CONFIG_AD9834=m
8643 +# end of Direct Digital Synthesis
8646 +# Network Analyzer, Impedance Converters
8648 +CONFIG_AD5933=m
8649 +# end of Network Analyzer, Impedance Converters
8652 +# Active energy metering IC
8654 +CONFIG_ADE7854=m
8655 +CONFIG_ADE7854_I2C=m
8656 +CONFIG_ADE7854_SPI=m
8657 +# end of Active energy metering IC
8660 +# Resolver to digital converters
8662 +CONFIG_AD2S1210=m
8663 +# end of Resolver to digital converters
8664 +# end of IIO staging drivers
8666 +CONFIG_FB_SM750=m
8667 +CONFIG_STAGING_MEDIA=y
8668 +CONFIG_INTEL_ATOMISP=y
8669 +CONFIG_VIDEO_ATOMISP=m
8670 +# CONFIG_VIDEO_ATOMISP_ISP2401 is not set
8671 +CONFIG_VIDEO_ATOMISP_OV2722=m
8672 +CONFIG_VIDEO_ATOMISP_GC2235=m
8673 +CONFIG_VIDEO_ATOMISP_MSRLIST_HELPER=m
8674 +CONFIG_VIDEO_ATOMISP_MT9M114=m
8675 +CONFIG_VIDEO_ATOMISP_GC0310=m
8676 +CONFIG_VIDEO_ATOMISP_OV2680=m
8677 +CONFIG_VIDEO_ATOMISP_OV5693=m
8678 +CONFIG_VIDEO_ATOMISP_LM3554=m
8679 +CONFIG_VIDEO_ZORAN=m
8680 +CONFIG_VIDEO_ZORAN_DC30=m
8681 +CONFIG_VIDEO_ZORAN_ZR36060=m
8682 +CONFIG_VIDEO_ZORAN_BUZ=m
8683 +CONFIG_VIDEO_ZORAN_DC10=m
8684 +CONFIG_VIDEO_ZORAN_LML33=m
8685 +CONFIG_VIDEO_ZORAN_LML33R10=m
8686 +CONFIG_VIDEO_ZORAN_AVS6EYES=m
8687 +CONFIG_VIDEO_IPU3_IMGU=m
8690 +# Android
8692 +CONFIG_ASHMEM=m
8693 +# end of Android
8695 +CONFIG_LTE_GDM724X=m
8696 +CONFIG_FIREWIRE_SERIAL=m
8697 +CONFIG_FWTTY_MAX_TOTAL_PORTS=64
8698 +CONFIG_FWTTY_MAX_CARD_PORTS=32
8699 +CONFIG_GS_FPGABOOT=m
8700 +CONFIG_UNISYSSPAR=y
8701 +CONFIG_UNISYS_VISORNIC=m
8702 +CONFIG_UNISYS_VISORINPUT=m
8703 +CONFIG_UNISYS_VISORHBA=m
8704 +CONFIG_FB_TFT=m
8705 +CONFIG_FB_TFT_AGM1264K_FL=m
8706 +CONFIG_FB_TFT_BD663474=m
8707 +CONFIG_FB_TFT_HX8340BN=m
8708 +CONFIG_FB_TFT_HX8347D=m
8709 +CONFIG_FB_TFT_HX8353D=m
8710 +CONFIG_FB_TFT_HX8357D=m
8711 +CONFIG_FB_TFT_ILI9163=m
8712 +CONFIG_FB_TFT_ILI9320=m
8713 +CONFIG_FB_TFT_ILI9325=m
8714 +CONFIG_FB_TFT_ILI9340=m
8715 +CONFIG_FB_TFT_ILI9341=m
8716 +CONFIG_FB_TFT_ILI9481=m
8717 +CONFIG_FB_TFT_ILI9486=m
8718 +CONFIG_FB_TFT_PCD8544=m
8719 +CONFIG_FB_TFT_RA8875=m
8720 +CONFIG_FB_TFT_S6D02A1=m
8721 +CONFIG_FB_TFT_S6D1121=m
8722 +CONFIG_FB_TFT_SEPS525=m
8723 +CONFIG_FB_TFT_SH1106=m
8724 +CONFIG_FB_TFT_SSD1289=m
8725 +CONFIG_FB_TFT_SSD1305=m
8726 +CONFIG_FB_TFT_SSD1306=m
8727 +CONFIG_FB_TFT_SSD1331=m
8728 +CONFIG_FB_TFT_SSD1351=m
8729 +CONFIG_FB_TFT_ST7735R=m
8730 +CONFIG_FB_TFT_ST7789V=m
8731 +CONFIG_FB_TFT_TINYLCD=m
8732 +CONFIG_FB_TFT_TLS8204=m
8733 +CONFIG_FB_TFT_UC1611=m
8734 +CONFIG_FB_TFT_UC1701=m
8735 +CONFIG_FB_TFT_UPD161704=m
8736 +CONFIG_FB_TFT_WATTEROTT=m
8737 +CONFIG_MOST_COMPONENTS=m
8738 +CONFIG_MOST_NET=m
8739 +CONFIG_MOST_SOUND=m
8740 +CONFIG_MOST_VIDEO=m
8741 +CONFIG_MOST_I2C=m
8742 +CONFIG_KS7010=m
8743 +CONFIG_GREYBUS_AUDIO=m
8744 +CONFIG_GREYBUS_AUDIO_APB_CODEC=m
8745 +CONFIG_GREYBUS_BOOTROM=m
8746 +CONFIG_GREYBUS_FIRMWARE=m
8747 +CONFIG_GREYBUS_HID=m
8748 +CONFIG_GREYBUS_LIGHT=m
8749 +CONFIG_GREYBUS_LOG=m
8750 +CONFIG_GREYBUS_LOOPBACK=m
8751 +CONFIG_GREYBUS_POWER=m
8752 +CONFIG_GREYBUS_RAW=m
8753 +CONFIG_GREYBUS_VIBRATOR=m
8754 +CONFIG_GREYBUS_BRIDGED_PHY=m
8755 +CONFIG_GREYBUS_GPIO=m
8756 +CONFIG_GREYBUS_I2C=m
8757 +CONFIG_GREYBUS_PWM=m
8758 +CONFIG_GREYBUS_SDIO=m
8759 +CONFIG_GREYBUS_SPI=m
8760 +CONFIG_GREYBUS_UART=m
8761 +CONFIG_GREYBUS_USB=m
8762 +CONFIG_PI433=m
8765 +# Gasket devices
8767 +CONFIG_STAGING_GASKET_FRAMEWORK=m
8768 +CONFIG_STAGING_APEX_DRIVER=m
8769 +# end of Gasket devices
8771 +CONFIG_FIELDBUS_DEV=m
8772 +CONFIG_KPC2000=y
8773 +CONFIG_KPC2000_CORE=m
8774 +CONFIG_KPC2000_SPI=m
8775 +CONFIG_KPC2000_I2C=m
8776 +CONFIG_KPC2000_DMA=m
8777 +CONFIG_QLGE=m
8778 +CONFIG_WIMAX=m
8779 +CONFIG_WIMAX_DEBUG_LEVEL=8
8780 +CONFIG_WIMAX_I2400M=m
8781 +CONFIG_WIMAX_I2400M_USB=m
8782 +CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8
8783 +CONFIG_WFX=m
8784 +CONFIG_SPMI_HISI3670=m
8785 +CONFIG_X86_PLATFORM_DEVICES=y
8786 +CONFIG_ACPI_WMI=m
8787 +CONFIG_WMI_BMOF=m
8788 +CONFIG_HUAWEI_WMI=m
8789 +CONFIG_UV_SYSFS=m
8790 +CONFIG_INTEL_WMI_SBL_FW_UPDATE=m
8791 +CONFIG_INTEL_WMI_THUNDERBOLT=m
8792 +CONFIG_MXM_WMI=m
8793 +CONFIG_PEAQ_WMI=m
8794 +CONFIG_XIAOMI_WMI=m
8795 +CONFIG_ACERHDF=m
8796 +CONFIG_ACER_WIRELESS=m
8797 +CONFIG_ACER_WMI=m
8798 +CONFIG_AMD_PMC=m
8799 +CONFIG_APPLE_GMUX=m
8800 +CONFIG_ASUS_LAPTOP=m
8801 +CONFIG_ASUS_WIRELESS=m
8802 +CONFIG_ASUS_WMI=m
8803 +CONFIG_ASUS_NB_WMI=m
8804 +CONFIG_EEEPC_LAPTOP=m
8805 +CONFIG_EEEPC_WMI=m
8806 +CONFIG_X86_PLATFORM_DRIVERS_DELL=y
8807 +CONFIG_ALIENWARE_WMI=m
8808 +CONFIG_DCDBAS=m
8809 +CONFIG_DELL_LAPTOP=m
8810 +CONFIG_DELL_RBU=m
8811 +CONFIG_DELL_RBTN=m
8812 +CONFIG_DELL_SMBIOS=m
8813 +CONFIG_DELL_SMBIOS_WMI=y
8814 +CONFIG_DELL_SMBIOS_SMM=y
8815 +CONFIG_DELL_SMO8800=m
8816 +CONFIG_DELL_WMI=m
8817 +CONFIG_DELL_WMI_AIO=m
8818 +CONFIG_DELL_WMI_DESCRIPTOR=m
8819 +CONFIG_DELL_WMI_LED=m
8820 +CONFIG_DELL_WMI_SYSMAN=m
8821 +CONFIG_AMILO_RFKILL=m
8822 +CONFIG_FUJITSU_LAPTOP=m
8823 +CONFIG_FUJITSU_TABLET=m
8824 +CONFIG_GPD_POCKET_FAN=m
8825 +CONFIG_HP_ACCEL=m
8826 +CONFIG_HP_WIRELESS=m
8827 +CONFIG_HP_WMI=m
8828 +CONFIG_IBM_RTL=m
8829 +CONFIG_IDEAPAD_LAPTOP=m
8830 +CONFIG_SENSORS_HDAPS=m
8831 +CONFIG_THINKPAD_ACPI=m
8832 +CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y
8833 +CONFIG_THINKPAD_ACPI_DEBUGFACILITIES=y
8834 +# CONFIG_THINKPAD_ACPI_DEBUG is not set
8835 +# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set
8836 +CONFIG_THINKPAD_ACPI_VIDEO=y
8837 +CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y
8838 +CONFIG_INTEL_ATOMISP2_LED=m
8839 +CONFIG_INTEL_CHT_INT33FE=m
8840 +CONFIG_INTEL_HID_EVENT=m
8841 +CONFIG_INTEL_INT0002_VGPIO=m
8842 +CONFIG_INTEL_MENLOW=m
8843 +CONFIG_INTEL_OAKTRAIL=m
8844 +CONFIG_INTEL_VBTN=m
8845 +CONFIG_MSI_LAPTOP=m
8846 +CONFIG_MSI_WMI=m
8847 +CONFIG_PCENGINES_APU2=m
8848 +CONFIG_SAMSUNG_LAPTOP=m
8849 +CONFIG_SAMSUNG_Q10=m
8850 +CONFIG_ACPI_TOSHIBA=m
8851 +CONFIG_TOSHIBA_BT_RFKILL=m
8852 +CONFIG_TOSHIBA_HAPS=m
8853 +# CONFIG_TOSHIBA_WMI is not set
8854 +CONFIG_ACPI_CMPC=m
8855 +CONFIG_COMPAL_LAPTOP=m
8856 +CONFIG_LG_LAPTOP=m
8857 +CONFIG_PANASONIC_LAPTOP=m
8858 +CONFIG_SONY_LAPTOP=m
8859 +CONFIG_SONYPI_COMPAT=y
8860 +CONFIG_SYSTEM76_ACPI=m
8861 +CONFIG_TOPSTAR_LAPTOP=m
8862 +CONFIG_I2C_MULTI_INSTANTIATE=m
8863 +CONFIG_MLX_PLATFORM=m
8864 +CONFIG_TOUCHSCREEN_DMI=y
8865 +CONFIG_INTEL_IPS=m
8866 +CONFIG_INTEL_RST=m
8867 +CONFIG_INTEL_SMARTCONNECT=m
8870 +# Intel Speed Select Technology interface support
8872 +CONFIG_INTEL_SPEED_SELECT_INTERFACE=m
8873 +# end of Intel Speed Select Technology interface support
8875 +CONFIG_INTEL_TURBO_MAX_3=y
8876 +CONFIG_INTEL_UNCORE_FREQ_CONTROL=m
8877 +CONFIG_INTEL_BXTWC_PMIC_TMU=m
8878 +CONFIG_INTEL_CHTDC_TI_PWRBTN=m
8879 +CONFIG_INTEL_MRFLD_PWRBTN=m
8880 +CONFIG_INTEL_PMC_CORE=y
8881 +CONFIG_INTEL_PMT_CLASS=m
8882 +CONFIG_INTEL_PMT_TELEMETRY=m
8883 +CONFIG_INTEL_PMT_CRASHLOG=m
8884 +CONFIG_INTEL_PUNIT_IPC=m
8885 +CONFIG_INTEL_SCU_IPC=y
8886 +CONFIG_INTEL_SCU=y
8887 +CONFIG_INTEL_SCU_PCI=y
8888 +CONFIG_INTEL_SCU_PLATFORM=m
8889 +CONFIG_INTEL_SCU_IPC_UTIL=m
8890 +CONFIG_INTEL_TELEMETRY=m
8891 +CONFIG_PMC_ATOM=y
8892 +CONFIG_CHROME_PLATFORMS=y
8893 +CONFIG_CHROMEOS_LAPTOP=m
8894 +CONFIG_CHROMEOS_PSTORE=m
8895 +CONFIG_CHROMEOS_TBMC=m
8896 +CONFIG_CROS_EC=m
8897 +CONFIG_CROS_EC_I2C=m
8898 +CONFIG_CROS_EC_ISHTP=m
8899 +CONFIG_CROS_EC_SPI=m
8900 +CONFIG_CROS_EC_LPC=m
8901 +CONFIG_CROS_EC_PROTO=y
8902 +CONFIG_CROS_KBD_LED_BACKLIGHT=m
8903 +CONFIG_CROS_EC_CHARDEV=m
8904 +CONFIG_CROS_EC_LIGHTBAR=m
8905 +CONFIG_CROS_EC_DEBUGFS=m
8906 +CONFIG_CROS_EC_SENSORHUB=m
8907 +CONFIG_CROS_EC_SYSFS=m
8908 +CONFIG_CROS_EC_TYPEC=m
8909 +CONFIG_CROS_USBPD_LOGGER=m
8910 +CONFIG_CROS_USBPD_NOTIFY=m
8911 +CONFIG_WILCO_EC=m
8912 +CONFIG_WILCO_EC_DEBUGFS=m
8913 +CONFIG_WILCO_EC_EVENTS=m
8914 +CONFIG_WILCO_EC_TELEMETRY=m
8915 +CONFIG_MELLANOX_PLATFORM=y
8916 +CONFIG_MLXREG_HOTPLUG=m
8917 +CONFIG_MLXREG_IO=m
8918 +CONFIG_SURFACE_PLATFORMS=y
8919 +CONFIG_SURFACE3_WMI=m
8920 +CONFIG_SURFACE_3_BUTTON=m
8921 +CONFIG_SURFACE_3_POWER_OPREGION=m
8922 +CONFIG_SURFACE_ACPI_NOTIFY=m
8923 +CONFIG_SURFACE_AGGREGATOR_CDEV=m
8924 +CONFIG_SURFACE_GPE=m
8925 +CONFIG_SURFACE_HOTPLUG=m
8926 +CONFIG_SURFACE_PRO3_BUTTON=m
8927 +CONFIG_SURFACE_AGGREGATOR=m
8928 +CONFIG_SURFACE_AGGREGATOR_BUS=y
8929 +# CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION is not set
8930 +CONFIG_HAVE_CLK=y
8931 +CONFIG_CLKDEV_LOOKUP=y
8932 +CONFIG_HAVE_CLK_PREPARE=y
8933 +CONFIG_COMMON_CLK=y
8934 +CONFIG_COMMON_CLK_WM831X=m
8935 +CONFIG_COMMON_CLK_MAX9485=m
8936 +CONFIG_COMMON_CLK_SI5341=m
8937 +CONFIG_COMMON_CLK_SI5351=m
8938 +CONFIG_COMMON_CLK_SI544=m
8939 +CONFIG_COMMON_CLK_CDCE706=m
8940 +CONFIG_COMMON_CLK_CS2000_CP=m
8941 +CONFIG_COMMON_CLK_S2MPS11=m
8942 +CONFIG_CLK_TWL6040=m
8943 +CONFIG_COMMON_CLK_PALMAS=m
8944 +CONFIG_COMMON_CLK_PWM=m
8945 +CONFIG_XILINX_VCU=m
8946 +CONFIG_HWSPINLOCK=y
8949 +# Clock Source drivers
8951 +CONFIG_CLKEVT_I8253=y
8952 +CONFIG_I8253_LOCK=y
8953 +CONFIG_CLKBLD_I8253=y
8954 +# end of Clock Source drivers
8956 +CONFIG_MAILBOX=y
8957 +CONFIG_PCC=y
8958 +CONFIG_ALTERA_MBOX=m
8959 +CONFIG_IOMMU_IOVA=y
8960 +CONFIG_IOASID=y
8961 +CONFIG_IOMMU_API=y
8962 +CONFIG_IOMMU_SUPPORT=y
8965 +# Generic IOMMU Pagetable Support
8967 +CONFIG_IOMMU_IO_PGTABLE=y
8968 +# end of Generic IOMMU Pagetable Support
8970 +# CONFIG_IOMMU_DEBUGFS is not set
8971 +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set
8972 +CONFIG_IOMMU_DMA=y
8973 +CONFIG_AMD_IOMMU=y
8974 +CONFIG_AMD_IOMMU_V2=m
8975 +CONFIG_DMAR_TABLE=y
8976 +CONFIG_INTEL_IOMMU=y
8977 +CONFIG_INTEL_IOMMU_SVM=y
8978 +# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set
8979 +CONFIG_INTEL_IOMMU_FLOPPY_WA=y
8980 +# CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON is not set
8981 +CONFIG_IRQ_REMAP=y
8982 +CONFIG_HYPERV_IOMMU=y
8985 +# Remoteproc drivers
8987 +CONFIG_REMOTEPROC=y
8988 +CONFIG_REMOTEPROC_CDEV=y
8989 +# end of Remoteproc drivers
8992 +# Rpmsg drivers
8994 +CONFIG_RPMSG=m
8995 +CONFIG_RPMSG_CHAR=m
8996 +CONFIG_RPMSG_NS=m
8997 +CONFIG_RPMSG_QCOM_GLINK=m
8998 +CONFIG_RPMSG_QCOM_GLINK_RPM=m
8999 +CONFIG_RPMSG_VIRTIO=m
9000 +# end of Rpmsg drivers
9002 +CONFIG_SOUNDWIRE=m
9005 +# SoundWire Devices
9007 +CONFIG_SOUNDWIRE_CADENCE=m
9008 +CONFIG_SOUNDWIRE_INTEL=m
9009 +CONFIG_SOUNDWIRE_QCOM=m
9010 +CONFIG_SOUNDWIRE_GENERIC_ALLOCATION=m
9013 +# SOC (System On Chip) specific Drivers
9017 +# Amlogic SoC drivers
9019 +# end of Amlogic SoC drivers
9022 +# Broadcom SoC drivers
9024 +# end of Broadcom SoC drivers
9027 +# NXP/Freescale QorIQ SoC drivers
9029 +# end of NXP/Freescale QorIQ SoC drivers
9032 +# i.MX SoC drivers
9034 +# end of i.MX SoC drivers
9037 +# Enable LiteX SoC Builder specific drivers
9039 +# end of Enable LiteX SoC Builder specific drivers
9042 +# Qualcomm SoC drivers
9044 +CONFIG_QCOM_QMI_HELPERS=m
9045 +# end of Qualcomm SoC drivers
9047 +CONFIG_SOC_TI=y
9050 +# Xilinx SoC drivers
9052 +# end of Xilinx SoC drivers
9053 +# end of SOC (System On Chip) specific Drivers
9055 +CONFIG_PM_DEVFREQ=y
9058 +# DEVFREQ Governors
9060 +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
9061 +CONFIG_DEVFREQ_GOV_PERFORMANCE=y
9062 +CONFIG_DEVFREQ_GOV_POWERSAVE=y
9063 +CONFIG_DEVFREQ_GOV_USERSPACE=y
9064 +CONFIG_DEVFREQ_GOV_PASSIVE=y
9067 +# DEVFREQ Drivers
9069 +CONFIG_PM_DEVFREQ_EVENT=y
9070 +CONFIG_EXTCON=y
9073 +# Extcon Device Drivers
9075 +CONFIG_EXTCON_ADC_JACK=m
9076 +CONFIG_EXTCON_ARIZONA=m
9077 +CONFIG_EXTCON_AXP288=m
9078 +CONFIG_EXTCON_FSA9480=m
9079 +CONFIG_EXTCON_GPIO=m
9080 +CONFIG_EXTCON_INTEL_INT3496=m
9081 +CONFIG_EXTCON_INTEL_CHT_WC=m
9082 +CONFIG_EXTCON_INTEL_MRFLD=m
9083 +CONFIG_EXTCON_MAX14577=m
9084 +CONFIG_EXTCON_MAX3355=m
9085 +CONFIG_EXTCON_MAX77693=m
9086 +CONFIG_EXTCON_MAX77843=m
9087 +CONFIG_EXTCON_MAX8997=m
9088 +CONFIG_EXTCON_PALMAS=m
9089 +CONFIG_EXTCON_PTN5150=m
9090 +CONFIG_EXTCON_RT8973A=m
9091 +CONFIG_EXTCON_SM5502=m
9092 +CONFIG_EXTCON_USB_GPIO=m
9093 +CONFIG_EXTCON_USBC_CROS_EC=m
9094 +CONFIG_EXTCON_USBC_TUSB320=m
9095 +CONFIG_MEMORY=y
9096 +CONFIG_FPGA_DFL_EMIF=m
9097 +CONFIG_IIO=m
9098 +CONFIG_IIO_BUFFER=y
9099 +CONFIG_IIO_BUFFER_CB=m
9100 +CONFIG_IIO_BUFFER_DMA=m
9101 +CONFIG_IIO_BUFFER_DMAENGINE=m
9102 +CONFIG_IIO_BUFFER_HW_CONSUMER=m
9103 +CONFIG_IIO_KFIFO_BUF=m
9104 +CONFIG_IIO_TRIGGERED_BUFFER=m
9105 +CONFIG_IIO_CONFIGFS=m
9106 +CONFIG_IIO_TRIGGER=y
9107 +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
9108 +CONFIG_IIO_SW_DEVICE=m
9109 +CONFIG_IIO_SW_TRIGGER=m
9110 +CONFIG_IIO_TRIGGERED_EVENT=m
9113 +# Accelerometers
9115 +CONFIG_ADIS16201=m
9116 +CONFIG_ADIS16209=m
9117 +CONFIG_ADXL372=m
9118 +CONFIG_ADXL372_SPI=m
9119 +CONFIG_ADXL372_I2C=m
9120 +CONFIG_BMA220=m
9121 +CONFIG_BMA400=m
9122 +CONFIG_BMA400_I2C=m
9123 +CONFIG_BMA400_SPI=m
9124 +CONFIG_BMC150_ACCEL=m
9125 +CONFIG_BMC150_ACCEL_I2C=m
9126 +CONFIG_BMC150_ACCEL_SPI=m
9127 +CONFIG_DA280=m
9128 +CONFIG_DA311=m
9129 +CONFIG_DMARD09=m
9130 +CONFIG_DMARD10=m
9131 +CONFIG_HID_SENSOR_ACCEL_3D=m
9132 +CONFIG_IIO_CROS_EC_ACCEL_LEGACY=m
9133 +CONFIG_IIO_ST_ACCEL_3AXIS=m
9134 +CONFIG_IIO_ST_ACCEL_I2C_3AXIS=m
9135 +CONFIG_IIO_ST_ACCEL_SPI_3AXIS=m
9136 +CONFIG_KXSD9=m
9137 +CONFIG_KXSD9_SPI=m
9138 +CONFIG_KXSD9_I2C=m
9139 +CONFIG_KXCJK1013=m
9140 +CONFIG_MC3230=m
9141 +CONFIG_MMA7455=m
9142 +CONFIG_MMA7455_I2C=m
9143 +CONFIG_MMA7455_SPI=m
9144 +CONFIG_MMA7660=m
9145 +CONFIG_MMA8452=m
9146 +CONFIG_MMA9551_CORE=m
9147 +CONFIG_MMA9551=m
9148 +CONFIG_MMA9553=m
9149 +CONFIG_MXC4005=m
9150 +CONFIG_MXC6255=m
9151 +CONFIG_SCA3000=m
9152 +CONFIG_STK8312=m
9153 +CONFIG_STK8BA50=m
9154 +# end of Accelerometers
9157 +# Analog to digital converters
9159 +CONFIG_AD_SIGMA_DELTA=m
9160 +CONFIG_AD7091R5=m
9161 +CONFIG_AD7124=m
9162 +CONFIG_AD7192=m
9163 +CONFIG_AD7266=m
9164 +CONFIG_AD7291=m
9165 +CONFIG_AD7292=m
9166 +CONFIG_AD7298=m
9167 +CONFIG_AD7476=m
9168 +CONFIG_AD7606=m
9169 +CONFIG_AD7606_IFACE_PARALLEL=m
9170 +CONFIG_AD7606_IFACE_SPI=m
9171 +CONFIG_AD7766=m
9172 +CONFIG_AD7768_1=m
9173 +CONFIG_AD7780=m
9174 +CONFIG_AD7791=m
9175 +CONFIG_AD7793=m
9176 +CONFIG_AD7887=m
9177 +CONFIG_AD7923=m
9178 +CONFIG_AD7949=m
9179 +CONFIG_AD799X=m
9180 +CONFIG_AD9467=m
9181 +CONFIG_ADI_AXI_ADC=m
9182 +CONFIG_AXP20X_ADC=m
9183 +CONFIG_AXP288_ADC=m
9184 +CONFIG_CC10001_ADC=m
9185 +CONFIG_DA9150_GPADC=m
9186 +CONFIG_DLN2_ADC=m
9187 +CONFIG_HI8435=m
9188 +CONFIG_HX711=m
9189 +CONFIG_INA2XX_ADC=m
9190 +CONFIG_INTEL_MRFLD_ADC=m
9191 +CONFIG_LP8788_ADC=m
9192 +CONFIG_LTC2471=m
9193 +CONFIG_LTC2485=m
9194 +CONFIG_LTC2496=m
9195 +CONFIG_LTC2497=m
9196 +CONFIG_MAX1027=m
9197 +CONFIG_MAX11100=m
9198 +CONFIG_MAX1118=m
9199 +CONFIG_MAX1241=m
9200 +CONFIG_MAX1363=m
9201 +CONFIG_MAX9611=m
9202 +CONFIG_MCP320X=m
9203 +CONFIG_MCP3422=m
9204 +CONFIG_MCP3911=m
9205 +CONFIG_MEDIATEK_MT6360_ADC=m
9206 +CONFIG_MEN_Z188_ADC=m
9207 +CONFIG_MP2629_ADC=m
9208 +CONFIG_NAU7802=m
9209 +CONFIG_PALMAS_GPADC=m
9210 +CONFIG_QCOM_VADC_COMMON=m
9211 +CONFIG_QCOM_SPMI_IADC=m
9212 +CONFIG_QCOM_SPMI_VADC=m
9213 +CONFIG_QCOM_SPMI_ADC5=m
9214 +CONFIG_STX104=m
9215 +CONFIG_TI_ADC081C=m
9216 +CONFIG_TI_ADC0832=m
9217 +CONFIG_TI_ADC084S021=m
9218 +CONFIG_TI_ADC12138=m
9219 +CONFIG_TI_ADC108S102=m
9220 +CONFIG_TI_ADC128S052=m
9221 +CONFIG_TI_ADC161S626=m
9222 +CONFIG_TI_ADS1015=m
9223 +CONFIG_TI_ADS7950=m
9224 +CONFIG_TI_AM335X_ADC=m
9225 +CONFIG_TI_TLC4541=m
9226 +CONFIG_TWL4030_MADC=m
9227 +CONFIG_TWL6030_GPADC=m
9228 +CONFIG_VIPERBOARD_ADC=m
9229 +CONFIG_XILINX_XADC=m
9230 +# end of Analog to digital converters
9233 +# Analog Front Ends
9235 +# end of Analog Front Ends
9238 +# Amplifiers
9240 +CONFIG_AD8366=m
9241 +CONFIG_HMC425=m
9242 +# end of Amplifiers
9245 +# Chemical Sensors
9247 +CONFIG_ATLAS_PH_SENSOR=m
9248 +CONFIG_ATLAS_EZO_SENSOR=m
9249 +CONFIG_BME680=m
9250 +CONFIG_BME680_I2C=m
9251 +CONFIG_BME680_SPI=m
9252 +CONFIG_CCS811=m
9253 +CONFIG_IAQCORE=m
9254 +CONFIG_PMS7003=m
9255 +CONFIG_SCD30_CORE=m
9256 +CONFIG_SCD30_I2C=m
9257 +CONFIG_SCD30_SERIAL=m
9258 +CONFIG_SENSIRION_SGP30=m
9259 +CONFIG_SPS30=m
9260 +CONFIG_VZ89X=m
9261 +# end of Chemical Sensors
9263 +CONFIG_IIO_CROS_EC_SENSORS_CORE=m
9264 +CONFIG_IIO_CROS_EC_SENSORS=m
9265 +CONFIG_IIO_CROS_EC_SENSORS_LID_ANGLE=m
9268 +# Hid Sensor IIO Common
9270 +CONFIG_HID_SENSOR_IIO_COMMON=m
9271 +CONFIG_HID_SENSOR_IIO_TRIGGER=m
9272 +# end of Hid Sensor IIO Common
9274 +CONFIG_IIO_MS_SENSORS_I2C=m
9277 +# SSP Sensor Common
9279 +CONFIG_IIO_SSP_SENSORS_COMMONS=m
9280 +CONFIG_IIO_SSP_SENSORHUB=m
9281 +# end of SSP Sensor Common
9283 +CONFIG_IIO_ST_SENSORS_I2C=m
9284 +CONFIG_IIO_ST_SENSORS_SPI=m
9285 +CONFIG_IIO_ST_SENSORS_CORE=m
9288 +# Digital to analog converters
9290 +CONFIG_AD5064=m
9291 +CONFIG_AD5360=m
9292 +CONFIG_AD5380=m
9293 +CONFIG_AD5421=m
9294 +CONFIG_AD5446=m
9295 +CONFIG_AD5449=m
9296 +CONFIG_AD5592R_BASE=m
9297 +CONFIG_AD5592R=m
9298 +CONFIG_AD5593R=m
9299 +CONFIG_AD5504=m
9300 +CONFIG_AD5624R_SPI=m
9301 +CONFIG_AD5686=m
9302 +CONFIG_AD5686_SPI=m
9303 +CONFIG_AD5696_I2C=m
9304 +CONFIG_AD5755=m
9305 +CONFIG_AD5758=m
9306 +CONFIG_AD5761=m
9307 +CONFIG_AD5764=m
9308 +CONFIG_AD5766=m
9309 +CONFIG_AD5770R=m
9310 +CONFIG_AD5791=m
9311 +CONFIG_AD7303=m
9312 +CONFIG_AD8801=m
9313 +CONFIG_CIO_DAC=m
9314 +CONFIG_DS4424=m
9315 +CONFIG_LTC1660=m
9316 +CONFIG_LTC2632=m
9317 +CONFIG_M62332=m
9318 +CONFIG_MAX517=m
9319 +CONFIG_MCP4725=m
9320 +CONFIG_MCP4922=m
9321 +CONFIG_TI_DAC082S085=m
9322 +CONFIG_TI_DAC5571=m
9323 +CONFIG_TI_DAC7311=m
9324 +CONFIG_TI_DAC7612=m
9325 +# end of Digital to analog converters
9328 +# IIO dummy driver
9330 +CONFIG_IIO_SIMPLE_DUMMY=m
9331 +# CONFIG_IIO_SIMPLE_DUMMY_EVENTS is not set
9332 +# CONFIG_IIO_SIMPLE_DUMMY_BUFFER is not set
9333 +# end of IIO dummy driver
9336 +# Frequency Synthesizers DDS/PLL
9340 +# Clock Generator/Distribution
9342 +CONFIG_AD9523=m
9343 +# end of Clock Generator/Distribution
9346 +# Phase-Locked Loop (PLL) frequency synthesizers
9348 +CONFIG_ADF4350=m
9349 +CONFIG_ADF4371=m
9350 +# end of Phase-Locked Loop (PLL) frequency synthesizers
9351 +# end of Frequency Synthesizers DDS/PLL
9354 +# Digital gyroscope sensors
9356 +CONFIG_ADIS16080=m
9357 +CONFIG_ADIS16130=m
9358 +CONFIG_ADIS16136=m
9359 +CONFIG_ADIS16260=m
9360 +CONFIG_ADXRS290=m
9361 +CONFIG_ADXRS450=m
9362 +CONFIG_BMG160=m
9363 +CONFIG_BMG160_I2C=m
9364 +CONFIG_BMG160_SPI=m
9365 +CONFIG_FXAS21002C=m
9366 +CONFIG_FXAS21002C_I2C=m
9367 +CONFIG_FXAS21002C_SPI=m
9368 +CONFIG_HID_SENSOR_GYRO_3D=m
9369 +CONFIG_MPU3050=m
9370 +CONFIG_MPU3050_I2C=m
9371 +CONFIG_IIO_ST_GYRO_3AXIS=m
9372 +CONFIG_IIO_ST_GYRO_I2C_3AXIS=m
9373 +CONFIG_IIO_ST_GYRO_SPI_3AXIS=m
9374 +CONFIG_ITG3200=m
9375 +# end of Digital gyroscope sensors
9378 +# Health Sensors
9382 +# Heart Rate Monitors
9384 +CONFIG_AFE4403=m
9385 +CONFIG_AFE4404=m
9386 +CONFIG_MAX30100=m
9387 +CONFIG_MAX30102=m
9388 +# end of Heart Rate Monitors
9389 +# end of Health Sensors
9392 +# Humidity sensors
9394 +CONFIG_AM2315=m
9395 +CONFIG_DHT11=m
9396 +CONFIG_HDC100X=m
9397 +CONFIG_HDC2010=m
9398 +CONFIG_HID_SENSOR_HUMIDITY=m
9399 +CONFIG_HTS221=m
9400 +CONFIG_HTS221_I2C=m
9401 +CONFIG_HTS221_SPI=m
9402 +CONFIG_HTU21=m
9403 +CONFIG_SI7005=m
9404 +CONFIG_SI7020=m
9405 +# end of Humidity sensors
9408 +# Inertial measurement units
9410 +CONFIG_ADIS16400=m
9411 +CONFIG_ADIS16460=m
9412 +CONFIG_ADIS16475=m
9413 +CONFIG_ADIS16480=m
9414 +CONFIG_BMI160=m
9415 +CONFIG_BMI160_I2C=m
9416 +CONFIG_BMI160_SPI=m
9417 +CONFIG_FXOS8700=m
9418 +CONFIG_FXOS8700_I2C=m
9419 +CONFIG_FXOS8700_SPI=m
9420 +CONFIG_KMX61=m
9421 +CONFIG_INV_ICM42600=m
9422 +CONFIG_INV_ICM42600_I2C=m
9423 +CONFIG_INV_ICM42600_SPI=m
9424 +CONFIG_INV_MPU6050_IIO=m
9425 +CONFIG_INV_MPU6050_I2C=m
9426 +CONFIG_INV_MPU6050_SPI=m
9427 +CONFIG_IIO_ST_LSM6DSX=m
9428 +CONFIG_IIO_ST_LSM6DSX_I2C=m
9429 +CONFIG_IIO_ST_LSM6DSX_SPI=m
9430 +CONFIG_IIO_ST_LSM6DSX_I3C=m
9431 +# end of Inertial measurement units
9433 +CONFIG_IIO_ADIS_LIB=m
9434 +CONFIG_IIO_ADIS_LIB_BUFFER=y
9437 +# Light sensors
9439 +CONFIG_ACPI_ALS=m
9440 +CONFIG_ADJD_S311=m
9441 +CONFIG_ADUX1020=m
9442 +CONFIG_AL3010=m
9443 +CONFIG_AL3320A=m
9444 +CONFIG_APDS9300=m
9445 +CONFIG_APDS9960=m
9446 +CONFIG_AS73211=m
9447 +CONFIG_BH1750=m
9448 +CONFIG_BH1780=m
9449 +CONFIG_CM32181=m
9450 +CONFIG_CM3232=m
9451 +CONFIG_CM3323=m
9452 +CONFIG_CM36651=m
9453 +CONFIG_IIO_CROS_EC_LIGHT_PROX=m
9454 +CONFIG_GP2AP002=m
9455 +CONFIG_GP2AP020A00F=m
9456 +CONFIG_IQS621_ALS=m
9457 +CONFIG_SENSORS_ISL29018=m
9458 +CONFIG_SENSORS_ISL29028=m
9459 +CONFIG_ISL29125=m
9460 +CONFIG_HID_SENSOR_ALS=m
9461 +CONFIG_HID_SENSOR_PROX=m
9462 +CONFIG_JSA1212=m
9463 +CONFIG_RPR0521=m
9464 +CONFIG_SENSORS_LM3533=m
9465 +CONFIG_LTR501=m
9466 +CONFIG_LV0104CS=m
9467 +CONFIG_MAX44000=m
9468 +CONFIG_MAX44009=m
9469 +CONFIG_NOA1305=m
9470 +CONFIG_OPT3001=m
9471 +CONFIG_PA12203001=m
9472 +CONFIG_SI1133=m
9473 +CONFIG_SI1145=m
9474 +CONFIG_STK3310=m
9475 +CONFIG_ST_UVIS25=m
9476 +CONFIG_ST_UVIS25_I2C=m
9477 +CONFIG_ST_UVIS25_SPI=m
9478 +CONFIG_TCS3414=m
9479 +CONFIG_TCS3472=m
9480 +CONFIG_SENSORS_TSL2563=m
9481 +CONFIG_TSL2583=m
9482 +CONFIG_TSL2772=m
9483 +CONFIG_TSL4531=m
9484 +CONFIG_US5182D=m
9485 +CONFIG_VCNL4000=m
9486 +CONFIG_VCNL4035=m
9487 +CONFIG_VEML6030=m
9488 +CONFIG_VEML6070=m
9489 +CONFIG_VL6180=m
9490 +CONFIG_ZOPT2201=m
9491 +# end of Light sensors
9494 +# Magnetometer sensors
9496 +CONFIG_AK8975=m
9497 +CONFIG_AK09911=m
9498 +CONFIG_BMC150_MAGN=m
9499 +CONFIG_BMC150_MAGN_I2C=m
9500 +CONFIG_BMC150_MAGN_SPI=m
9501 +CONFIG_MAG3110=m
9502 +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m
9503 +CONFIG_MMC35240=m
9504 +CONFIG_IIO_ST_MAGN_3AXIS=m
9505 +CONFIG_IIO_ST_MAGN_I2C_3AXIS=m
9506 +CONFIG_IIO_ST_MAGN_SPI_3AXIS=m
9507 +CONFIG_SENSORS_HMC5843=m
9508 +CONFIG_SENSORS_HMC5843_I2C=m
9509 +CONFIG_SENSORS_HMC5843_SPI=m
9510 +CONFIG_SENSORS_RM3100=m
9511 +CONFIG_SENSORS_RM3100_I2C=m
9512 +CONFIG_SENSORS_RM3100_SPI=m
9513 +CONFIG_YAMAHA_YAS530=m
9514 +# end of Magnetometer sensors
9517 +# Multiplexers
9519 +# end of Multiplexers
9522 +# Inclinometer sensors
9524 +CONFIG_HID_SENSOR_INCLINOMETER_3D=m
9525 +CONFIG_HID_SENSOR_DEVICE_ROTATION=m
9526 +# end of Inclinometer sensors
9529 +# Triggers - standalone
9531 +CONFIG_IIO_HRTIMER_TRIGGER=m
9532 +CONFIG_IIO_INTERRUPT_TRIGGER=m
9533 +CONFIG_IIO_TIGHTLOOP_TRIGGER=m
9534 +CONFIG_IIO_SYSFS_TRIGGER=m
9535 +# end of Triggers - standalone
9538 +# Linear and angular position sensors
9540 +CONFIG_IQS624_POS=m
9541 +CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE=m
9542 +# end of Linear and angular position sensors
9545 +# Digital potentiometers
9547 +CONFIG_AD5272=m
9548 +CONFIG_DS1803=m
9549 +CONFIG_MAX5432=m
9550 +CONFIG_MAX5481=m
9551 +CONFIG_MAX5487=m
9552 +CONFIG_MCP4018=m
9553 +CONFIG_MCP4131=m
9554 +CONFIG_MCP4531=m
9555 +CONFIG_MCP41010=m
9556 +CONFIG_TPL0102=m
9557 +# end of Digital potentiometers
9560 +# Digital potentiostats
9562 +CONFIG_LMP91000=m
9563 +# end of Digital potentiostats
9566 +# Pressure sensors
9568 +CONFIG_ABP060MG=m
9569 +CONFIG_BMP280=m
9570 +CONFIG_BMP280_I2C=m
9571 +CONFIG_BMP280_SPI=m
9572 +CONFIG_IIO_CROS_EC_BARO=m
9573 +CONFIG_DLHL60D=m
9574 +CONFIG_DPS310=m
9575 +CONFIG_HID_SENSOR_PRESS=m
9576 +CONFIG_HP03=m
9577 +CONFIG_ICP10100=m
9578 +CONFIG_MPL115=m
9579 +CONFIG_MPL115_I2C=m
9580 +CONFIG_MPL115_SPI=m
9581 +CONFIG_MPL3115=m
9582 +CONFIG_MS5611=m
9583 +CONFIG_MS5611_I2C=m
9584 +CONFIG_MS5611_SPI=m
9585 +CONFIG_MS5637=m
9586 +CONFIG_IIO_ST_PRESS=m
9587 +CONFIG_IIO_ST_PRESS_I2C=m
9588 +CONFIG_IIO_ST_PRESS_SPI=m
9589 +CONFIG_T5403=m
9590 +CONFIG_HP206C=m
9591 +CONFIG_ZPA2326=m
9592 +CONFIG_ZPA2326_I2C=m
9593 +CONFIG_ZPA2326_SPI=m
9594 +# end of Pressure sensors
9597 +# Lightning sensors
9599 +CONFIG_AS3935=m
9600 +# end of Lightning sensors
9603 +# Proximity and distance sensors
9605 +CONFIG_ISL29501=m
9606 +CONFIG_LIDAR_LITE_V2=m
9607 +CONFIG_MB1232=m
9608 +CONFIG_PING=m
9609 +CONFIG_RFD77402=m
9610 +CONFIG_SRF04=m
9611 +CONFIG_SX9310=m
9612 +CONFIG_SX9500=m
9613 +CONFIG_SRF08=m
9614 +CONFIG_VCNL3020=m
9615 +CONFIG_VL53L0X_I2C=m
9616 +# end of Proximity and distance sensors
9619 +# Resolver to digital converters
9621 +CONFIG_AD2S90=m
9622 +CONFIG_AD2S1200=m
9623 +# end of Resolver to digital converters
9626 +# Temperature sensors
9628 +CONFIG_IQS620AT_TEMP=m
9629 +CONFIG_LTC2983=m
9630 +CONFIG_MAXIM_THERMOCOUPLE=m
9631 +CONFIG_HID_SENSOR_TEMP=m
9632 +CONFIG_MLX90614=m
9633 +CONFIG_MLX90632=m
9634 +CONFIG_TMP006=m
9635 +CONFIG_TMP007=m
9636 +CONFIG_TSYS01=m
9637 +CONFIG_TSYS02D=m
9638 +CONFIG_MAX31856=m
9639 +# end of Temperature sensors
9641 +CONFIG_NTB=m
9642 +CONFIG_NTB_MSI=y
9643 +# CONFIG_NTB_AMD is not set
9644 +CONFIG_NTB_IDT=m
9645 +CONFIG_NTB_INTEL=m
9646 +CONFIG_NTB_EPF=m
9647 +CONFIG_NTB_SWITCHTEC=m
9648 +CONFIG_NTB_PINGPONG=m
9649 +CONFIG_NTB_TOOL=m
9650 +CONFIG_NTB_PERF=m
9651 +# CONFIG_NTB_MSI_TEST is not set
9652 +CONFIG_NTB_TRANSPORT=m
9653 +CONFIG_VME_BUS=y
9656 +# VME Bridge Drivers
9658 +CONFIG_VME_CA91CX42=m
9659 +CONFIG_VME_TSI148=m
9660 +CONFIG_VME_FAKE=m
9663 +# VME Board Drivers
9665 +CONFIG_VMIVME_7805=m
9668 +# VME Device Drivers
9670 +CONFIG_VME_USER=m
9671 +CONFIG_PWM=y
9672 +CONFIG_PWM_SYSFS=y
9673 +# CONFIG_PWM_DEBUG is not set
9674 +CONFIG_PWM_CRC=y
9675 +CONFIG_PWM_CROS_EC=m
9676 +CONFIG_PWM_DWC=m
9677 +CONFIG_PWM_IQS620A=m
9678 +CONFIG_PWM_LP3943=m
9679 +CONFIG_PWM_LPSS=y
9680 +CONFIG_PWM_LPSS_PCI=y
9681 +CONFIG_PWM_LPSS_PLATFORM=y
9682 +CONFIG_PWM_PCA9685=m
9683 +CONFIG_PWM_TWL=m
9684 +CONFIG_PWM_TWL_LED=m
9687 +# IRQ chip support
9689 +CONFIG_MADERA_IRQ=m
9690 +# end of IRQ chip support
9692 +CONFIG_IPACK_BUS=m
9693 +CONFIG_BOARD_TPCI200=m
9694 +CONFIG_SERIAL_IPOCTAL=m
9695 +CONFIG_RESET_CONTROLLER=y
9696 +CONFIG_RESET_BRCMSTB_RESCAL=y
9697 +CONFIG_RESET_TI_SYSCON=m
9700 +# PHY Subsystem
9702 +CONFIG_GENERIC_PHY=y
9703 +CONFIG_USB_LGM_PHY=m
9704 +CONFIG_BCM_KONA_USB2_PHY=m
9705 +CONFIG_PHY_PXA_28NM_HSIC=m
9706 +CONFIG_PHY_PXA_28NM_USB2=m
9707 +CONFIG_PHY_CPCAP_USB=m
9708 +CONFIG_PHY_QCOM_USB_HS=m
9709 +CONFIG_PHY_QCOM_USB_HSIC=m
9710 +CONFIG_PHY_SAMSUNG_USB2=m
9711 +CONFIG_PHY_TUSB1210=m
9712 +CONFIG_PHY_INTEL_LGM_EMMC=m
9713 +# end of PHY Subsystem
9715 +CONFIG_POWERCAP=y
9716 +CONFIG_INTEL_RAPL_CORE=m
9717 +CONFIG_INTEL_RAPL=m
9718 +CONFIG_IDLE_INJECT=y
9719 +CONFIG_DTPM=y
9720 +CONFIG_DTPM_CPU=y
9721 +CONFIG_MCB=m
9722 +CONFIG_MCB_PCI=m
9723 +CONFIG_MCB_LPC=m
9726 +# Performance monitor support
9728 +# end of Performance monitor support
9730 +CONFIG_RAS=y
9731 +CONFIG_RAS_CEC=y
9732 +# CONFIG_RAS_CEC_DEBUG is not set
9733 +CONFIG_USB4=m
9734 +# CONFIG_USB4_DEBUGFS_WRITE is not set
9735 +# CONFIG_USB4_DMA_TEST is not set
9738 +# Android
9740 +CONFIG_ANDROID=y
9741 +CONFIG_ANDROID_BINDER_IPC=m
9742 +CONFIG_ANDROID_BINDERFS=m
9743 +CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder,vndbinder"
9744 +# CONFIG_ANDROID_BINDER_IPC_SELFTEST is not set
9745 +# end of Android
9747 +CONFIG_LIBNVDIMM=y
9748 +CONFIG_BLK_DEV_PMEM=m
9749 +CONFIG_ND_BLK=m
9750 +CONFIG_ND_CLAIM=y
9751 +CONFIG_ND_BTT=m
9752 +CONFIG_BTT=y
9753 +CONFIG_ND_PFN=m
9754 +CONFIG_NVDIMM_PFN=y
9755 +CONFIG_NVDIMM_DAX=y
9756 +CONFIG_NVDIMM_KEYS=y
9757 +CONFIG_DAX_DRIVER=y
9758 +CONFIG_DAX=y
9759 +CONFIG_DEV_DAX=m
9760 +CONFIG_DEV_DAX_PMEM=m
9761 +CONFIG_DEV_DAX_HMEM=m
9762 +CONFIG_DEV_DAX_HMEM_DEVICES=y
9763 +CONFIG_DEV_DAX_KMEM=m
9764 +CONFIG_DEV_DAX_PMEM_COMPAT=m
9765 +CONFIG_NVMEM=y
9766 +CONFIG_NVMEM_SYSFS=y
9767 +CONFIG_NVMEM_SPMI_SDAM=m
9768 +CONFIG_RAVE_SP_EEPROM=m
9769 +CONFIG_NVMEM_RMEM=m
9772 +# HW tracing support
9774 +CONFIG_STM=m
9775 +CONFIG_STM_PROTO_BASIC=m
9776 +CONFIG_STM_PROTO_SYS_T=m
9777 +CONFIG_STM_DUMMY=m
9778 +CONFIG_STM_SOURCE_CONSOLE=m
9779 +CONFIG_STM_SOURCE_HEARTBEAT=m
9780 +CONFIG_INTEL_TH=m
9781 +CONFIG_INTEL_TH_PCI=m
9782 +CONFIG_INTEL_TH_ACPI=m
9783 +CONFIG_INTEL_TH_GTH=m
9784 +CONFIG_INTEL_TH_STH=m
9785 +CONFIG_INTEL_TH_MSU=m
9786 +CONFIG_INTEL_TH_PTI=m
9787 +# CONFIG_INTEL_TH_DEBUG is not set
9788 +# end of HW tracing support
9790 +CONFIG_FPGA=m
9791 +CONFIG_ALTERA_PR_IP_CORE=m
9792 +CONFIG_FPGA_MGR_ALTERA_PS_SPI=m
9793 +CONFIG_FPGA_MGR_ALTERA_CVP=m
9794 +CONFIG_FPGA_MGR_XILINX_SPI=m
9795 +CONFIG_FPGA_MGR_MACHXO2_SPI=m
9796 +CONFIG_FPGA_BRIDGE=m
9797 +CONFIG_ALTERA_FREEZE_BRIDGE=m
9798 +CONFIG_XILINX_PR_DECOUPLER=m
9799 +CONFIG_FPGA_REGION=m
9800 +CONFIG_FPGA_DFL=m
9801 +CONFIG_FPGA_DFL_FME=m
9802 +CONFIG_FPGA_DFL_FME_MGR=m
9803 +CONFIG_FPGA_DFL_FME_BRIDGE=m
9804 +CONFIG_FPGA_DFL_FME_REGION=m
9805 +CONFIG_FPGA_DFL_AFU=m
9806 +CONFIG_FPGA_DFL_NIOS_INTEL_PAC_N3000=m
9807 +CONFIG_FPGA_DFL_PCI=m
9808 +CONFIG_TEE=m
9811 +# TEE drivers
9813 +CONFIG_AMDTEE=m
9814 +# end of TEE drivers
9816 +CONFIG_MULTIPLEXER=m
9819 +# Multiplexer drivers
9821 +CONFIG_MUX_ADG792A=m
9822 +CONFIG_MUX_ADGS1408=m
9823 +CONFIG_MUX_GPIO=m
9824 +# end of Multiplexer drivers
9826 +CONFIG_PM_OPP=y
9827 +CONFIG_UNISYS_VISORBUS=m
9828 +CONFIG_SIOX=m
9829 +CONFIG_SIOX_BUS_GPIO=m
9830 +CONFIG_SLIMBUS=m
9831 +CONFIG_SLIM_QCOM_CTRL=m
9832 +CONFIG_INTERCONNECT=y
9833 +CONFIG_COUNTER=m
9834 +CONFIG_104_QUAD_8=m
9835 +CONFIG_MOST=m
9836 +CONFIG_MOST_USB_HDM=m
9837 +CONFIG_MOST_CDEV=m
9838 +# end of Device Drivers
9841 +# File systems
9843 +CONFIG_DCACHE_WORD_ACCESS=y
9844 +CONFIG_VALIDATE_FS_PARSER=y
9845 +CONFIG_FS_IOMAP=y
9846 +# CONFIG_EXT2_FS is not set
9847 +# CONFIG_EXT3_FS is not set
9848 +CONFIG_EXT4_FS=y
9849 +CONFIG_EXT4_USE_FOR_EXT2=y
9850 +CONFIG_EXT4_FS_POSIX_ACL=y
9851 +CONFIG_EXT4_FS_SECURITY=y
9852 +# CONFIG_EXT4_DEBUG is not set
9853 +CONFIG_JBD2=y
9854 +# CONFIG_JBD2_DEBUG is not set
9855 +CONFIG_FS_MBCACHE=y
9856 +CONFIG_REISERFS_FS=m
9857 +# CONFIG_REISERFS_CHECK is not set
9858 +# CONFIG_REISERFS_PROC_INFO is not set
9859 +CONFIG_REISERFS_FS_XATTR=y
9860 +CONFIG_REISERFS_FS_POSIX_ACL=y
9861 +CONFIG_REISERFS_FS_SECURITY=y
9862 +CONFIG_JFS_FS=m
9863 +CONFIG_JFS_POSIX_ACL=y
9864 +CONFIG_JFS_SECURITY=y
9865 +# CONFIG_JFS_DEBUG is not set
9866 +CONFIG_JFS_STATISTICS=y
9867 +CONFIG_XFS_FS=m
9868 +CONFIG_XFS_SUPPORT_V4=y
9869 +CONFIG_XFS_QUOTA=y
9870 +CONFIG_XFS_POSIX_ACL=y
9871 +CONFIG_XFS_RT=y
9872 +# CONFIG_XFS_ONLINE_SCRUB is not set
9873 +# CONFIG_XFS_WARN is not set
9874 +# CONFIG_XFS_DEBUG is not set
9875 +CONFIG_GFS2_FS=m
9876 +CONFIG_GFS2_FS_LOCKING_DLM=y
9877 +CONFIG_OCFS2_FS=m
9878 +CONFIG_OCFS2_FS_O2CB=m
9879 +CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
9880 +CONFIG_OCFS2_FS_STATS=y
9881 +CONFIG_OCFS2_DEBUG_MASKLOG=y
9882 +# CONFIG_OCFS2_DEBUG_FS is not set
9883 +CONFIG_BTRFS_FS=m
9884 +CONFIG_BTRFS_FS_POSIX_ACL=y
9885 +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set
9886 +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set
9887 +# CONFIG_BTRFS_DEBUG is not set
9888 +# CONFIG_BTRFS_ASSERT is not set
9889 +# CONFIG_BTRFS_FS_REF_VERIFY is not set
9890 +CONFIG_NILFS2_FS=m
9891 +CONFIG_F2FS_FS=m
9892 +CONFIG_F2FS_STAT_FS=y
9893 +CONFIG_F2FS_FS_XATTR=y
9894 +CONFIG_F2FS_FS_POSIX_ACL=y
9895 +CONFIG_F2FS_FS_SECURITY=y
9896 +# CONFIG_F2FS_CHECK_FS is not set
9897 +# CONFIG_F2FS_FAULT_INJECTION is not set
9898 +CONFIG_F2FS_FS_COMPRESSION=y
9899 +CONFIG_F2FS_FS_LZO=y
9900 +CONFIG_F2FS_FS_LZ4=y
9901 +CONFIG_F2FS_FS_LZ4HC=y
9902 +CONFIG_F2FS_FS_ZSTD=y
9903 +CONFIG_F2FS_FS_LZORLE=y
9904 +CONFIG_ZONEFS_FS=m
9905 +CONFIG_FS_DAX=y
9906 +CONFIG_FS_DAX_PMD=y
9907 +CONFIG_FS_POSIX_ACL=y
9908 +CONFIG_EXPORTFS=y
9909 +CONFIG_EXPORTFS_BLOCK_OPS=y
9910 +CONFIG_FILE_LOCKING=y
9911 +CONFIG_MANDATORY_FILE_LOCKING=y
9912 +CONFIG_FS_ENCRYPTION=y
9913 +CONFIG_FS_ENCRYPTION_ALGS=y
9914 +CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
9915 +CONFIG_FS_VERITY=y
9916 +# CONFIG_FS_VERITY_DEBUG is not set
9917 +CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y
9918 +CONFIG_FSNOTIFY=y
9919 +CONFIG_DNOTIFY=y
9920 +CONFIG_INOTIFY_USER=y
9921 +CONFIG_FANOTIFY=y
9922 +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
9923 +CONFIG_QUOTA=y
9924 +CONFIG_QUOTA_NETLINK_INTERFACE=y
9925 +# CONFIG_PRINT_QUOTA_WARNING is not set
9926 +# CONFIG_QUOTA_DEBUG is not set
9927 +CONFIG_QUOTA_TREE=m
9928 +CONFIG_QFMT_V1=m
9929 +CONFIG_QFMT_V2=m
9930 +CONFIG_QUOTACTL=y
9931 +CONFIG_AUTOFS4_FS=m
9932 +CONFIG_AUTOFS_FS=m
9933 +CONFIG_FUSE_FS=y
9934 +CONFIG_CUSE=m
9935 +CONFIG_VIRTIO_FS=m
9936 +CONFIG_FUSE_DAX=y
9937 +CONFIG_OVERLAY_FS=m
9938 +# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set
9939 +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y
9940 +# CONFIG_OVERLAY_FS_INDEX is not set
9941 +CONFIG_OVERLAY_FS_XINO_AUTO=y
9942 +# CONFIG_OVERLAY_FS_METACOPY is not set
9945 +# Caches
9947 +CONFIG_FSCACHE=m
9948 +CONFIG_FSCACHE_STATS=y
9949 +# CONFIG_FSCACHE_HISTOGRAM is not set
9950 +# CONFIG_FSCACHE_DEBUG is not set
9951 +# CONFIG_FSCACHE_OBJECT_LIST is not set
9952 +CONFIG_CACHEFILES=m
9953 +# CONFIG_CACHEFILES_DEBUG is not set
9954 +# CONFIG_CACHEFILES_HISTOGRAM is not set
9955 +# end of Caches
9958 +# CD-ROM/DVD Filesystems
9960 +CONFIG_ISO9660_FS=m
9961 +CONFIG_JOLIET=y
9962 +CONFIG_ZISOFS=y
9963 +CONFIG_UDF_FS=m
9964 +# end of CD-ROM/DVD Filesystems
9967 +# DOS/FAT/EXFAT/NT Filesystems
9969 +CONFIG_FAT_FS=y
9970 +CONFIG_MSDOS_FS=m
9971 +CONFIG_VFAT_FS=y
9972 +CONFIG_FAT_DEFAULT_CODEPAGE=437
9973 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
9974 +# CONFIG_FAT_DEFAULT_UTF8 is not set
9975 +CONFIG_EXFAT_FS=m
9976 +CONFIG_EXFAT_DEFAULT_IOCHARSET="utf8"
9977 +# CONFIG_NTFS_FS is not set
9978 +CONFIG_NTFS3_FS=m
9979 +# CONFIG_NTFS3_64BIT_CLUSTER is not set
9980 +CONFIG_NTFS3_LZX_XPRESS=y
9981 +# CONFIG_NTFS3_FS_POSIX_ACL is not set
9982 +# end of DOS/FAT/EXFAT/NT Filesystems
9985 +# Pseudo filesystems
9987 +CONFIG_PROC_FS=y
9988 +CONFIG_PROC_KCORE=y
9989 +CONFIG_PROC_VMCORE=y
9990 +CONFIG_PROC_VMCORE_DEVICE_DUMP=y
9991 +CONFIG_PROC_SYSCTL=y
9992 +CONFIG_PROC_PAGE_MONITOR=y
9993 +CONFIG_PROC_CHILDREN=y
9994 +CONFIG_PROC_PID_ARCH_STATUS=y
9995 +CONFIG_PROC_CPU_RESCTRL=y
9996 +CONFIG_KERNFS=y
9997 +CONFIG_SYSFS=y
9998 +CONFIG_TMPFS=y
9999 +CONFIG_TMPFS_POSIX_ACL=y
10000 +CONFIG_TMPFS_XATTR=y
10001 +CONFIG_TMPFS_INODE64=y
10002 +CONFIG_HUGETLBFS=y
10003 +CONFIG_HUGETLB_PAGE=y
10004 +CONFIG_MEMFD_CREATE=y
10005 +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y
10006 +CONFIG_CONFIGFS_FS=y
10007 +CONFIG_EFIVAR_FS=y
10008 +# end of Pseudo filesystems
10010 +CONFIG_MISC_FILESYSTEMS=y
10011 +CONFIG_ORANGEFS_FS=m
10012 +CONFIG_ADFS_FS=m
10013 +# CONFIG_ADFS_FS_RW is not set
10014 +CONFIG_AFFS_FS=m
10015 +CONFIG_ECRYPT_FS=y
10016 +CONFIG_ECRYPT_FS_MESSAGING=y
10017 +CONFIG_HFS_FS=m
10018 +CONFIG_HFSPLUS_FS=m
10019 +CONFIG_BEFS_FS=m
10020 +# CONFIG_BEFS_DEBUG is not set
10021 +CONFIG_BFS_FS=m
10022 +CONFIG_EFS_FS=m
10023 +CONFIG_JFFS2_FS=m
10024 +CONFIG_JFFS2_FS_DEBUG=0
10025 +CONFIG_JFFS2_FS_WRITEBUFFER=y
10026 +# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
10027 +# CONFIG_JFFS2_SUMMARY is not set
10028 +CONFIG_JFFS2_FS_XATTR=y
10029 +CONFIG_JFFS2_FS_POSIX_ACL=y
10030 +CONFIG_JFFS2_FS_SECURITY=y
10031 +CONFIG_JFFS2_COMPRESSION_OPTIONS=y
10032 +CONFIG_JFFS2_ZLIB=y
10033 +CONFIG_JFFS2_LZO=y
10034 +CONFIG_JFFS2_RTIME=y
10035 +# CONFIG_JFFS2_RUBIN is not set
10036 +# CONFIG_JFFS2_CMODE_NONE is not set
10037 +# CONFIG_JFFS2_CMODE_PRIORITY is not set
10038 +# CONFIG_JFFS2_CMODE_SIZE is not set
10039 +CONFIG_JFFS2_CMODE_FAVOURLZO=y
10040 +CONFIG_UBIFS_FS=m
10041 +# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
10042 +CONFIG_UBIFS_FS_LZO=y
10043 +CONFIG_UBIFS_FS_ZLIB=y
10044 +CONFIG_UBIFS_FS_ZSTD=y
10045 +# CONFIG_UBIFS_ATIME_SUPPORT is not set
10046 +CONFIG_UBIFS_FS_XATTR=y
10047 +CONFIG_UBIFS_FS_SECURITY=y
10048 +CONFIG_UBIFS_FS_AUTHENTICATION=y
10049 +CONFIG_CRAMFS=m
10050 +CONFIG_CRAMFS_BLOCKDEV=y
10051 +CONFIG_CRAMFS_MTD=y
10052 +CONFIG_SQUASHFS=y
10053 +# CONFIG_SQUASHFS_FILE_CACHE is not set
10054 +CONFIG_SQUASHFS_FILE_DIRECT=y
10055 +CONFIG_SQUASHFS_DECOMP_SINGLE=y
10056 +# CONFIG_SQUASHFS_DECOMP_MULTI is not set
10057 +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set
10058 +CONFIG_SQUASHFS_XATTR=y
10059 +CONFIG_SQUASHFS_ZLIB=y
10060 +CONFIG_SQUASHFS_LZ4=y
10061 +CONFIG_SQUASHFS_LZO=y
10062 +CONFIG_SQUASHFS_XZ=y
10063 +CONFIG_SQUASHFS_ZSTD=y
10064 +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set
10065 +# CONFIG_SQUASHFS_EMBEDDED is not set
10066 +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
10067 +CONFIG_VXFS_FS=m
10068 +CONFIG_MINIX_FS=m
10069 +CONFIG_OMFS_FS=m
10070 +CONFIG_HPFS_FS=m
10071 +CONFIG_QNX4FS_FS=m
10072 +CONFIG_QNX6FS_FS=m
10073 +# CONFIG_QNX6FS_DEBUG is not set
10074 +CONFIG_ROMFS_FS=m
10075 +CONFIG_ROMFS_BACKED_BY_BLOCK=y
10076 +# CONFIG_ROMFS_BACKED_BY_MTD is not set
10077 +# CONFIG_ROMFS_BACKED_BY_BOTH is not set
10078 +CONFIG_ROMFS_ON_BLOCK=y
10079 +CONFIG_PSTORE=y
10080 +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240
10081 +# CONFIG_PSTORE_DEFLATE_COMPRESS is not set
10082 +# CONFIG_PSTORE_LZO_COMPRESS is not set
10083 +# CONFIG_PSTORE_LZ4_COMPRESS is not set
10084 +# CONFIG_PSTORE_LZ4HC_COMPRESS is not set
10085 +# CONFIG_PSTORE_842_COMPRESS is not set
10086 +CONFIG_PSTORE_ZSTD_COMPRESS=y
10087 +CONFIG_PSTORE_COMPRESS=y
10088 +CONFIG_PSTORE_ZSTD_COMPRESS_DEFAULT=y
10089 +CONFIG_PSTORE_COMPRESS_DEFAULT="zstd"
10090 +# CONFIG_PSTORE_CONSOLE is not set
10091 +# CONFIG_PSTORE_PMSG is not set
10092 +CONFIG_PSTORE_RAM=m
10093 +CONFIG_PSTORE_ZONE=m
10094 +CONFIG_PSTORE_BLK=m
10095 +CONFIG_PSTORE_BLK_BLKDEV=""
10096 +CONFIG_PSTORE_BLK_KMSG_SIZE=64
10097 +CONFIG_PSTORE_BLK_MAX_REASON=2
10098 +CONFIG_SYSV_FS=m
10099 +CONFIG_UFS_FS=m
10100 +# CONFIG_UFS_FS_WRITE is not set
10101 +# CONFIG_UFS_DEBUG is not set
10102 +CONFIG_EROFS_FS=m
10103 +# CONFIG_EROFS_FS_DEBUG is not set
10104 +CONFIG_EROFS_FS_XATTR=y
10105 +CONFIG_EROFS_FS_POSIX_ACL=y
10106 +CONFIG_EROFS_FS_SECURITY=y
10107 +CONFIG_EROFS_FS_ZIP=y
10108 +CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT=1
10109 +CONFIG_VBOXSF_FS=m
10110 +CONFIG_NETWORK_FILESYSTEMS=y
10111 +CONFIG_NFS_FS=m
10112 +CONFIG_NFS_V2=m
10113 +CONFIG_NFS_V3=m
10114 +CONFIG_NFS_V3_ACL=y
10115 +CONFIG_NFS_V4=m
10116 +CONFIG_NFS_SWAP=y
10117 +CONFIG_NFS_V4_1=y
10118 +CONFIG_NFS_V4_2=y
10119 +CONFIG_PNFS_FILE_LAYOUT=m
10120 +CONFIG_PNFS_BLOCK=m
10121 +CONFIG_PNFS_FLEXFILE_LAYOUT=m
10122 +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org"
10123 +CONFIG_NFS_V4_1_MIGRATION=y
10124 +CONFIG_NFS_V4_SECURITY_LABEL=y
10125 +CONFIG_NFS_FSCACHE=y
10126 +# CONFIG_NFS_USE_LEGACY_DNS is not set
10127 +CONFIG_NFS_USE_KERNEL_DNS=y
10128 +CONFIG_NFS_DEBUG=y
10129 +CONFIG_NFS_DISABLE_UDP_SUPPORT=y
10130 +# CONFIG_NFS_V4_2_READ_PLUS is not set
10131 +CONFIG_NFSD=m
10132 +CONFIG_NFSD_V2_ACL=y
10133 +CONFIG_NFSD_V3=y
10134 +CONFIG_NFSD_V3_ACL=y
10135 +CONFIG_NFSD_V4=y
10136 +CONFIG_NFSD_PNFS=y
10137 +CONFIG_NFSD_BLOCKLAYOUT=y
10138 +CONFIG_NFSD_SCSILAYOUT=y
10139 +CONFIG_NFSD_FLEXFILELAYOUT=y
10140 +CONFIG_NFSD_V4_2_INTER_SSC=y
10141 +CONFIG_NFSD_V4_SECURITY_LABEL=y
10142 +CONFIG_GRACE_PERIOD=m
10143 +CONFIG_LOCKD=m
10144 +CONFIG_LOCKD_V4=y
10145 +CONFIG_NFS_ACL_SUPPORT=m
10146 +CONFIG_NFS_COMMON=y
10147 +CONFIG_NFS_V4_2_SSC_HELPER=m
10148 +CONFIG_SUNRPC=m
10149 +CONFIG_SUNRPC_GSS=m
10150 +CONFIG_SUNRPC_BACKCHANNEL=y
10151 +CONFIG_SUNRPC_SWAP=y
10152 +CONFIG_RPCSEC_GSS_KRB5=m
10153 +# CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES is not set
10154 +CONFIG_SUNRPC_DEBUG=y
10155 +CONFIG_SUNRPC_XPRT_RDMA=m
10156 +CONFIG_CEPH_FS=m
10157 +CONFIG_CEPH_FSCACHE=y
10158 +CONFIG_CEPH_FS_POSIX_ACL=y
10159 +CONFIG_CEPH_FS_SECURITY_LABEL=y
10160 +CONFIG_CIFS=m
10161 +# CONFIG_CIFS_STATS2 is not set
10162 +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y
10163 +CONFIG_CIFS_WEAK_PW_HASH=y
10164 +CONFIG_CIFS_UPCALL=y
10165 +CONFIG_CIFS_XATTR=y
10166 +CONFIG_CIFS_POSIX=y
10167 +CONFIG_CIFS_DEBUG=y
10168 +# CONFIG_CIFS_DEBUG2 is not set
10169 +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set
10170 +CONFIG_CIFS_DFS_UPCALL=y
10171 +CONFIG_CIFS_SWN_UPCALL=y
10172 +# CONFIG_CIFS_SMB_DIRECT is not set
10173 +CONFIG_CIFS_FSCACHE=y
10174 +CONFIG_CODA_FS=m
10175 +CONFIG_AFS_FS=m
10176 +# CONFIG_AFS_DEBUG is not set
10177 +CONFIG_AFS_FSCACHE=y
10178 +# CONFIG_AFS_DEBUG_CURSOR is not set
10179 +CONFIG_9P_FS=m
10180 +CONFIG_9P_FSCACHE=y
10181 +CONFIG_9P_FS_POSIX_ACL=y
10182 +CONFIG_9P_FS_SECURITY=y
10183 +CONFIG_NLS=y
10184 +CONFIG_NLS_DEFAULT="utf8"
10185 +CONFIG_NLS_CODEPAGE_437=y
10186 +CONFIG_NLS_CODEPAGE_737=m
10187 +CONFIG_NLS_CODEPAGE_775=m
10188 +CONFIG_NLS_CODEPAGE_850=m
10189 +CONFIG_NLS_CODEPAGE_852=m
10190 +CONFIG_NLS_CODEPAGE_855=m
10191 +CONFIG_NLS_CODEPAGE_857=m
10192 +CONFIG_NLS_CODEPAGE_860=m
10193 +CONFIG_NLS_CODEPAGE_861=m
10194 +CONFIG_NLS_CODEPAGE_862=m
10195 +CONFIG_NLS_CODEPAGE_863=m
10196 +CONFIG_NLS_CODEPAGE_864=m
10197 +CONFIG_NLS_CODEPAGE_865=m
10198 +CONFIG_NLS_CODEPAGE_866=m
10199 +CONFIG_NLS_CODEPAGE_869=m
10200 +CONFIG_NLS_CODEPAGE_936=m
10201 +CONFIG_NLS_CODEPAGE_950=m
10202 +CONFIG_NLS_CODEPAGE_932=m
10203 +CONFIG_NLS_CODEPAGE_949=m
10204 +CONFIG_NLS_CODEPAGE_874=m
10205 +CONFIG_NLS_ISO8859_8=m
10206 +CONFIG_NLS_CODEPAGE_1250=m
10207 +CONFIG_NLS_CODEPAGE_1251=m
10208 +CONFIG_NLS_ASCII=m
10209 +CONFIG_NLS_ISO8859_1=m
10210 +CONFIG_NLS_ISO8859_2=m
10211 +CONFIG_NLS_ISO8859_3=m
10212 +CONFIG_NLS_ISO8859_4=m
10213 +CONFIG_NLS_ISO8859_5=m
10214 +CONFIG_NLS_ISO8859_6=m
10215 +CONFIG_NLS_ISO8859_7=m
10216 +CONFIG_NLS_ISO8859_9=m
10217 +CONFIG_NLS_ISO8859_13=m
10218 +CONFIG_NLS_ISO8859_14=m
10219 +CONFIG_NLS_ISO8859_15=m
10220 +CONFIG_NLS_KOI8_R=m
10221 +CONFIG_NLS_KOI8_U=m
10222 +CONFIG_NLS_MAC_ROMAN=m
10223 +CONFIG_NLS_MAC_CELTIC=m
10224 +CONFIG_NLS_MAC_CENTEURO=m
10225 +CONFIG_NLS_MAC_CROATIAN=m
10226 +CONFIG_NLS_MAC_CYRILLIC=m
10227 +CONFIG_NLS_MAC_GAELIC=m
10228 +CONFIG_NLS_MAC_GREEK=m
10229 +CONFIG_NLS_MAC_ICELAND=m
10230 +CONFIG_NLS_MAC_INUIT=m
10231 +CONFIG_NLS_MAC_ROMANIAN=m
10232 +CONFIG_NLS_MAC_TURKISH=m
10233 +CONFIG_NLS_UTF8=m
10234 +CONFIG_DLM=m
10235 +# CONFIG_DLM_DEBUG is not set
10236 +CONFIG_UNICODE=y
10237 +# CONFIG_UNICODE_NORMALIZATION_SELFTEST is not set
10238 +CONFIG_IO_WQ=y
10239 +# end of File systems
10242 +# Security options
10244 +CONFIG_KEYS=y
10245 +CONFIG_KEYS_REQUEST_CACHE=y
10246 +CONFIG_PERSISTENT_KEYRINGS=y
10247 +CONFIG_TRUSTED_KEYS=y
10248 +CONFIG_ENCRYPTED_KEYS=y
10249 +CONFIG_KEY_DH_OPERATIONS=y
10250 +CONFIG_KEY_NOTIFICATIONS=y
10251 +CONFIG_SECURITY_DMESG_RESTRICT=y
10252 +CONFIG_SECURITY=y
10253 +CONFIG_SECURITYFS=y
10254 +CONFIG_SECURITY_NETWORK=y
10255 +CONFIG_PAGE_TABLE_ISOLATION=y
10256 +CONFIG_SECURITY_INFINIBAND=y
10257 +CONFIG_SECURITY_NETWORK_XFRM=y
10258 +CONFIG_SECURITY_PATH=y
10259 +CONFIG_INTEL_TXT=y
10260 +CONFIG_LSM_MMAP_MIN_ADDR=0
10261 +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y
10262 +CONFIG_HARDENED_USERCOPY=y
10263 +CONFIG_HARDENED_USERCOPY_FALLBACK=y
10264 +# CONFIG_HARDENED_USERCOPY_PAGESPAN is not set
10265 +CONFIG_FORTIFY_SOURCE=y
10266 +# CONFIG_STATIC_USERMODEHELPER is not set
10267 +CONFIG_SECURITY_SELINUX=y
10268 +CONFIG_SECURITY_SELINUX_BOOTPARAM=y
10269 +# CONFIG_SECURITY_SELINUX_DISABLE is not set
10270 +CONFIG_SECURITY_SELINUX_DEVELOP=y
10271 +CONFIG_SECURITY_SELINUX_AVC_STATS=y
10272 +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
10273 +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9
10274 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256
10275 +CONFIG_SECURITY_SMACK=y
10276 +# CONFIG_SECURITY_SMACK_BRINGUP is not set
10277 +CONFIG_SECURITY_SMACK_NETFILTER=y
10278 +CONFIG_SECURITY_SMACK_APPEND_SIGNALS=y
10279 +CONFIG_SECURITY_TOMOYO=y
10280 +CONFIG_SECURITY_TOMOYO_MAX_ACCEPT_ENTRY=2048
10281 +CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG=1024
10282 +# CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER is not set
10283 +CONFIG_SECURITY_TOMOYO_POLICY_LOADER="/sbin/tomoyo-init"
10284 +CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER="/sbin/init"
10285 +# CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING is not set
10286 +CONFIG_SECURITY_APPARMOR=y
10287 +CONFIG_SECURITY_APPARMOR_HASH=y
10288 +CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y
10289 +# CONFIG_SECURITY_APPARMOR_DEBUG is not set
10290 +# CONFIG_SECURITY_LOADPIN is not set
10291 +CONFIG_SECURITY_YAMA=y
10292 +CONFIG_SECURITY_SAFESETID=y
10293 +CONFIG_SECURITY_LOCKDOWN_LSM=y
10294 +CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
10295 +CONFIG_LOCK_DOWN_KERNEL_FORCE_NONE=y
10296 +# CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY is not set
10297 +# CONFIG_LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY is not set
10298 +CONFIG_INTEGRITY=y
10299 +CONFIG_INTEGRITY_SIGNATURE=y
10300 +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
10301 +CONFIG_INTEGRITY_TRUSTED_KEYRING=y
10302 +CONFIG_INTEGRITY_PLATFORM_KEYRING=y
10303 +CONFIG_LOAD_UEFI_KEYS=y
10304 +CONFIG_INTEGRITY_AUDIT=y
10305 +CONFIG_IMA=y
10306 +CONFIG_IMA_MEASURE_PCR_IDX=10
10307 +CONFIG_IMA_LSM_RULES=y
10308 +# CONFIG_IMA_TEMPLATE is not set
10309 +CONFIG_IMA_NG_TEMPLATE=y
10310 +# CONFIG_IMA_SIG_TEMPLATE is not set
10311 +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng"
10312 +CONFIG_IMA_DEFAULT_HASH_SHA1=y
10313 +# CONFIG_IMA_DEFAULT_HASH_SHA256 is not set
10314 +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set
10315 +CONFIG_IMA_DEFAULT_HASH="sha1"
10316 +# CONFIG_IMA_WRITE_POLICY is not set
10317 +# CONFIG_IMA_READ_POLICY is not set
10318 +CONFIG_IMA_APPRAISE=y
10319 +# CONFIG_IMA_ARCH_POLICY is not set
10320 +# CONFIG_IMA_APPRAISE_BUILD_POLICY is not set
10321 +CONFIG_IMA_APPRAISE_BOOTPARAM=y
10322 +CONFIG_IMA_APPRAISE_MODSIG=y
10323 +CONFIG_IMA_TRUSTED_KEYRING=y
10324 +# CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY is not set
10325 +# CONFIG_IMA_BLACKLIST_KEYRING is not set
10326 +# CONFIG_IMA_LOAD_X509 is not set
10327 +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y
10328 +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y
10329 +# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set
10330 +CONFIG_EVM=y
10331 +CONFIG_EVM_ATTR_FSUUID=y
10332 +CONFIG_EVM_EXTRA_SMACK_XATTRS=y
10333 +CONFIG_EVM_ADD_XATTRS=y
10334 +# CONFIG_EVM_LOAD_X509 is not set
10335 +# CONFIG_DEFAULT_SECURITY_SELINUX is not set
10336 +# CONFIG_DEFAULT_SECURITY_SMACK is not set
10337 +# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
10338 +CONFIG_DEFAULT_SECURITY_APPARMOR=y
10339 +# CONFIG_DEFAULT_SECURITY_DAC is not set
10340 +CONFIG_LSM="lockdown,yama,integrity,apparmor"
10343 +# Kernel hardening options
10347 +# Memory initialization
10349 +CONFIG_INIT_STACK_NONE=y
10350 +CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
10351 +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set
10352 +# end of Memory initialization
10353 +# end of Kernel hardening options
10354 +# end of Security options
10356 +CONFIG_XOR_BLOCKS=m
10357 +CONFIG_ASYNC_CORE=m
10358 +CONFIG_ASYNC_MEMCPY=m
10359 +CONFIG_ASYNC_XOR=m
10360 +CONFIG_ASYNC_PQ=m
10361 +CONFIG_ASYNC_RAID6_RECOV=m
10362 +CONFIG_CRYPTO=y
10365 +# Crypto core or helper
10367 +CONFIG_CRYPTO_ALGAPI=y
10368 +CONFIG_CRYPTO_ALGAPI2=y
10369 +CONFIG_CRYPTO_AEAD=y
10370 +CONFIG_CRYPTO_AEAD2=y
10371 +CONFIG_CRYPTO_SKCIPHER=y
10372 +CONFIG_CRYPTO_SKCIPHER2=y
10373 +CONFIG_CRYPTO_HASH=y
10374 +CONFIG_CRYPTO_HASH2=y
10375 +CONFIG_CRYPTO_RNG=y
10376 +CONFIG_CRYPTO_RNG2=y
10377 +CONFIG_CRYPTO_RNG_DEFAULT=y
10378 +CONFIG_CRYPTO_AKCIPHER2=y
10379 +CONFIG_CRYPTO_AKCIPHER=y
10380 +CONFIG_CRYPTO_KPP2=y
10381 +CONFIG_CRYPTO_KPP=y
10382 +CONFIG_CRYPTO_ACOMP2=y
10383 +CONFIG_CRYPTO_MANAGER=y
10384 +CONFIG_CRYPTO_MANAGER2=y
10385 +CONFIG_CRYPTO_USER=m
10386 +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
10387 +CONFIG_CRYPTO_GF128MUL=y
10388 +CONFIG_CRYPTO_NULL=y
10389 +CONFIG_CRYPTO_NULL2=y
10390 +CONFIG_CRYPTO_PCRYPT=m
10391 +CONFIG_CRYPTO_CRYPTD=m
10392 +CONFIG_CRYPTO_AUTHENC=m
10393 +CONFIG_CRYPTO_TEST=m
10394 +CONFIG_CRYPTO_SIMD=m
10395 +CONFIG_CRYPTO_ENGINE=m
10398 +# Public-key cryptography
10400 +CONFIG_CRYPTO_RSA=y
10401 +CONFIG_CRYPTO_DH=y
10402 +CONFIG_CRYPTO_ECC=m
10403 +CONFIG_CRYPTO_ECDH=m
10404 +CONFIG_CRYPTO_ECRDSA=m
10405 +CONFIG_CRYPTO_SM2=m
10406 +CONFIG_CRYPTO_CURVE25519=m
10407 +CONFIG_CRYPTO_CURVE25519_X86=m
10410 +# Authenticated Encryption with Associated Data
10412 +CONFIG_CRYPTO_CCM=m
10413 +CONFIG_CRYPTO_GCM=y
10414 +CONFIG_CRYPTO_CHACHA20POLY1305=m
10415 +CONFIG_CRYPTO_AEGIS128=m
10416 +CONFIG_CRYPTO_AEGIS128_AESNI_SSE2=m
10417 +CONFIG_CRYPTO_SEQIV=y
10418 +CONFIG_CRYPTO_ECHAINIV=m
10421 +# Block modes
10423 +CONFIG_CRYPTO_CBC=y
10424 +CONFIG_CRYPTO_CFB=m
10425 +CONFIG_CRYPTO_CTR=y
10426 +CONFIG_CRYPTO_CTS=y
10427 +CONFIG_CRYPTO_ECB=y
10428 +CONFIG_CRYPTO_LRW=m
10429 +CONFIG_CRYPTO_OFB=m
10430 +CONFIG_CRYPTO_PCBC=m
10431 +CONFIG_CRYPTO_XTS=y
10432 +CONFIG_CRYPTO_KEYWRAP=m
10433 +CONFIG_CRYPTO_NHPOLY1305=m
10434 +CONFIG_CRYPTO_NHPOLY1305_SSE2=m
10435 +CONFIG_CRYPTO_NHPOLY1305_AVX2=m
10436 +CONFIG_CRYPTO_ADIANTUM=m
10437 +CONFIG_CRYPTO_ESSIV=m
10440 +# Hash modes
10442 +CONFIG_CRYPTO_CMAC=m
10443 +CONFIG_CRYPTO_HMAC=y
10444 +CONFIG_CRYPTO_XCBC=m
10445 +CONFIG_CRYPTO_VMAC=m
10448 +# Digest
10450 +CONFIG_CRYPTO_CRC32C=y
10451 +CONFIG_CRYPTO_CRC32C_INTEL=y
10452 +CONFIG_CRYPTO_CRC32=m
10453 +CONFIG_CRYPTO_CRC32_PCLMUL=m
10454 +CONFIG_CRYPTO_XXHASH=m
10455 +CONFIG_CRYPTO_BLAKE2B=m
10456 +CONFIG_CRYPTO_BLAKE2S=m
10457 +CONFIG_CRYPTO_BLAKE2S_X86=m
10458 +CONFIG_CRYPTO_CRCT10DIF=y
10459 +CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m
10460 +CONFIG_CRYPTO_GHASH=y
10461 +CONFIG_CRYPTO_POLY1305=m
10462 +CONFIG_CRYPTO_POLY1305_X86_64=m
10463 +CONFIG_CRYPTO_MD4=m
10464 +CONFIG_CRYPTO_MD5=y
10465 +CONFIG_CRYPTO_MICHAEL_MIC=m
10466 +CONFIG_CRYPTO_RMD160=m
10467 +CONFIG_CRYPTO_SHA1=y
10468 +CONFIG_CRYPTO_SHA1_SSSE3=m
10469 +CONFIG_CRYPTO_SHA256_SSSE3=m
10470 +CONFIG_CRYPTO_SHA512_SSSE3=m
10471 +CONFIG_CRYPTO_SHA256=y
10472 +CONFIG_CRYPTO_SHA512=y
10473 +CONFIG_CRYPTO_SHA3=m
10474 +CONFIG_CRYPTO_SM3=m
10475 +CONFIG_CRYPTO_STREEBOG=m
10476 +CONFIG_CRYPTO_WP512=m
10477 +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m
10480 +# Ciphers
10482 +CONFIG_CRYPTO_AES=y
10483 +CONFIG_CRYPTO_AES_TI=m
10484 +CONFIG_CRYPTO_AES_NI_INTEL=m
10485 +CONFIG_CRYPTO_BLOWFISH=m
10486 +CONFIG_CRYPTO_BLOWFISH_COMMON=m
10487 +CONFIG_CRYPTO_BLOWFISH_X86_64=m
10488 +CONFIG_CRYPTO_CAMELLIA=m
10489 +CONFIG_CRYPTO_CAMELLIA_X86_64=m
10490 +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m
10491 +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m
10492 +CONFIG_CRYPTO_CAST_COMMON=m
10493 +CONFIG_CRYPTO_CAST5=m
10494 +CONFIG_CRYPTO_CAST5_AVX_X86_64=m
10495 +CONFIG_CRYPTO_CAST6=m
10496 +CONFIG_CRYPTO_CAST6_AVX_X86_64=m
10497 +CONFIG_CRYPTO_DES=m
10498 +CONFIG_CRYPTO_DES3_EDE_X86_64=m
10499 +CONFIG_CRYPTO_FCRYPT=m
10500 +CONFIG_CRYPTO_CHACHA20=m
10501 +CONFIG_CRYPTO_CHACHA20_X86_64=m
10502 +CONFIG_CRYPTO_SERPENT=m
10503 +CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m
10504 +CONFIG_CRYPTO_SERPENT_AVX_X86_64=m
10505 +CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m
10506 +CONFIG_CRYPTO_SM4=m
10507 +CONFIG_CRYPTO_TWOFISH=m
10508 +CONFIG_CRYPTO_TWOFISH_COMMON=m
10509 +CONFIG_CRYPTO_TWOFISH_X86_64=m
10510 +CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m
10511 +CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m
10514 +# Compression
10516 +CONFIG_CRYPTO_DEFLATE=y
10517 +CONFIG_CRYPTO_LZO=y
10518 +CONFIG_CRYPTO_842=m
10519 +CONFIG_CRYPTO_LZ4=y
10520 +CONFIG_CRYPTO_LZ4HC=m
10521 +CONFIG_CRYPTO_ZSTD=y
10524 +# Random Number Generation
10526 +CONFIG_CRYPTO_ANSI_CPRNG=m
10527 +CONFIG_CRYPTO_DRBG_MENU=y
10528 +CONFIG_CRYPTO_DRBG_HMAC=y
10529 +CONFIG_CRYPTO_DRBG_HASH=y
10530 +CONFIG_CRYPTO_DRBG_CTR=y
10531 +CONFIG_CRYPTO_DRBG=y
10532 +CONFIG_CRYPTO_JITTERENTROPY=y
10533 +CONFIG_CRYPTO_USER_API=m
10534 +CONFIG_CRYPTO_USER_API_HASH=m
10535 +CONFIG_CRYPTO_USER_API_SKCIPHER=m
10536 +CONFIG_CRYPTO_USER_API_RNG=m
10537 +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set
10538 +CONFIG_CRYPTO_USER_API_AEAD=m
10539 +# CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE is not set
10540 +CONFIG_CRYPTO_STATS=y
10541 +CONFIG_CRYPTO_HASH_INFO=y
10544 +# Crypto library routines
10546 +CONFIG_CRYPTO_LIB_AES=y
10547 +CONFIG_CRYPTO_LIB_ARC4=m
10548 +CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S=m
10549 +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=m
10550 +CONFIG_CRYPTO_LIB_BLAKE2S=m
10551 +CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=m
10552 +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m
10553 +CONFIG_CRYPTO_LIB_CHACHA=m
10554 +CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519=m
10555 +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m
10556 +CONFIG_CRYPTO_LIB_CURVE25519=m
10557 +CONFIG_CRYPTO_LIB_DES=m
10558 +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=11
10559 +CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m
10560 +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m
10561 +CONFIG_CRYPTO_LIB_POLY1305=m
10562 +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
10563 +CONFIG_CRYPTO_LIB_SHA256=y
10564 +CONFIG_CRYPTO_HW=y
10565 +CONFIG_CRYPTO_DEV_PADLOCK=y
10566 +CONFIG_CRYPTO_DEV_PADLOCK_AES=m
10567 +CONFIG_CRYPTO_DEV_PADLOCK_SHA=m
10568 +CONFIG_CRYPTO_DEV_ATMEL_I2C=m
10569 +CONFIG_CRYPTO_DEV_ATMEL_ECC=m
10570 +CONFIG_CRYPTO_DEV_ATMEL_SHA204A=m
10571 +CONFIG_CRYPTO_DEV_CCP=y
10572 +CONFIG_CRYPTO_DEV_CCP_DD=m
10573 +CONFIG_CRYPTO_DEV_SP_CCP=y
10574 +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m
10575 +CONFIG_CRYPTO_DEV_SP_PSP=y
10576 +# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set
10577 +CONFIG_CRYPTO_DEV_QAT=m
10578 +CONFIG_CRYPTO_DEV_QAT_DH895xCC=m
10579 +CONFIG_CRYPTO_DEV_QAT_C3XXX=m
10580 +CONFIG_CRYPTO_DEV_QAT_C62X=m
10581 +CONFIG_CRYPTO_DEV_QAT_4XXX=m
10582 +CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m
10583 +CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m
10584 +CONFIG_CRYPTO_DEV_QAT_C62XVF=m
10585 +CONFIG_CRYPTO_DEV_NITROX=m
10586 +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m
10587 +CONFIG_CRYPTO_DEV_CHELSIO=m
10588 +CONFIG_CRYPTO_DEV_VIRTIO=m
10589 +CONFIG_CRYPTO_DEV_SAFEXCEL=m
10590 +CONFIG_CRYPTO_DEV_AMLOGIC_GXL=m
10591 +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG is not set
10592 +CONFIG_ASYMMETRIC_KEY_TYPE=y
10593 +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
10594 +CONFIG_ASYMMETRIC_TPM_KEY_SUBTYPE=m
10595 +CONFIG_X509_CERTIFICATE_PARSER=y
10596 +CONFIG_PKCS8_PRIVATE_KEY_PARSER=m
10597 +CONFIG_TPM_KEY_PARSER=m
10598 +CONFIG_PKCS7_MESSAGE_PARSER=y
10599 +CONFIG_PKCS7_TEST_KEY=m
10600 +CONFIG_SIGNED_PE_FILE_VERIFICATION=y
10603 +# Certificates for signature checking
10605 +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem"
10606 +CONFIG_SYSTEM_TRUSTED_KEYRING=y
10607 +CONFIG_SYSTEM_TRUSTED_KEYS=""
10608 +CONFIG_SYSTEM_EXTRA_CERTIFICATE=y
10609 +CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=4096
10610 +CONFIG_SECONDARY_TRUSTED_KEYRING=y
10611 +CONFIG_SYSTEM_BLACKLIST_KEYRING=y
10612 +CONFIG_SYSTEM_BLACKLIST_HASH_LIST=""
10613 +# end of Certificates for signature checking
10616 +# Library routines
10618 +CONFIG_RAID6_PQ=m
10619 +CONFIG_RAID6_PQ_BENCHMARK=y
10620 +CONFIG_LINEAR_RANGES=y
10621 +CONFIG_PACKING=y
10622 +CONFIG_BITREVERSE=y
10623 +CONFIG_GENERIC_STRNCPY_FROM_USER=y
10624 +CONFIG_GENERIC_STRNLEN_USER=y
10625 +CONFIG_GENERIC_NET_UTILS=y
10626 +CONFIG_GENERIC_FIND_FIRST_BIT=y
10627 +CONFIG_CORDIC=m
10628 +# CONFIG_PRIME_NUMBERS is not set
10629 +CONFIG_RATIONAL=y
10630 +CONFIG_GENERIC_PCI_IOMAP=y
10631 +CONFIG_GENERIC_IOMAP=y
10632 +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y
10633 +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y
10634 +CONFIG_ARCH_USE_SYM_ANNOTATIONS=y
10635 +CONFIG_CRC_CCITT=y
10636 +CONFIG_CRC16=y
10637 +CONFIG_CRC_T10DIF=y
10638 +CONFIG_CRC_ITU_T=m
10639 +CONFIG_CRC32=y
10640 +# CONFIG_CRC32_SELFTEST is not set
10641 +CONFIG_CRC32_SLICEBY8=y
10642 +# CONFIG_CRC32_SLICEBY4 is not set
10643 +# CONFIG_CRC32_SARWATE is not set
10644 +# CONFIG_CRC32_BIT is not set
10645 +CONFIG_CRC64=m
10646 +CONFIG_CRC4=m
10647 +CONFIG_CRC7=m
10648 +CONFIG_LIBCRC32C=m
10649 +CONFIG_CRC8=m
10650 +CONFIG_XXHASH=y
10651 +# CONFIG_RANDOM32_SELFTEST is not set
10652 +CONFIG_842_COMPRESS=m
10653 +CONFIG_842_DECOMPRESS=m
10654 +CONFIG_ZLIB_INFLATE=y
10655 +CONFIG_ZLIB_DEFLATE=y
10656 +CONFIG_LZO_COMPRESS=y
10657 +CONFIG_LZO_DECOMPRESS=y
10658 +CONFIG_LZ4_COMPRESS=y
10659 +CONFIG_LZ4HC_COMPRESS=y
10660 +CONFIG_LZ4_DECOMPRESS=y
10661 +CONFIG_ZSTD_COMPRESS=y
10662 +CONFIG_ZSTD_DECOMPRESS=y
10663 +CONFIG_XZ_DEC=y
10664 +CONFIG_XZ_DEC_X86=y
10665 +CONFIG_XZ_DEC_POWERPC=y
10666 +CONFIG_XZ_DEC_IA64=y
10667 +CONFIG_XZ_DEC_ARM=y
10668 +CONFIG_XZ_DEC_ARMTHUMB=y
10669 +CONFIG_XZ_DEC_SPARC=y
10670 +CONFIG_XZ_DEC_BCJ=y
10671 +CONFIG_XZ_DEC_TEST=m
10672 +CONFIG_DECOMPRESS_GZIP=y
10673 +CONFIG_DECOMPRESS_BZIP2=y
10674 +CONFIG_DECOMPRESS_LZMA=y
10675 +CONFIG_DECOMPRESS_XZ=y
10676 +CONFIG_DECOMPRESS_LZO=y
10677 +CONFIG_DECOMPRESS_LZ4=y
10678 +CONFIG_DECOMPRESS_ZSTD=y
10679 +CONFIG_GENERIC_ALLOCATOR=y
10680 +CONFIG_REED_SOLOMON=m
10681 +CONFIG_REED_SOLOMON_ENC8=y
10682 +CONFIG_REED_SOLOMON_DEC8=y
10683 +CONFIG_REED_SOLOMON_DEC16=y
10684 +CONFIG_BCH=m
10685 +CONFIG_TEXTSEARCH=y
10686 +CONFIG_TEXTSEARCH_KMP=m
10687 +CONFIG_TEXTSEARCH_BM=m
10688 +CONFIG_TEXTSEARCH_FSM=m
10689 +CONFIG_BTREE=y
10690 +CONFIG_INTERVAL_TREE=y
10691 +CONFIG_XARRAY_MULTI=y
10692 +CONFIG_ASSOCIATIVE_ARRAY=y
10693 +CONFIG_HAS_IOMEM=y
10694 +CONFIG_HAS_IOPORT_MAP=y
10695 +CONFIG_HAS_DMA=y
10696 +CONFIG_DMA_OPS=y
10697 +CONFIG_NEED_SG_DMA_LENGTH=y
10698 +CONFIG_NEED_DMA_MAP_STATE=y
10699 +CONFIG_ARCH_DMA_ADDR_T_64BIT=y
10700 +CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y
10701 +CONFIG_SWIOTLB=y
10702 +CONFIG_DMA_COHERENT_POOL=y
10703 +# CONFIG_DMA_API_DEBUG is not set
10704 +# CONFIG_DMA_MAP_BENCHMARK is not set
10705 +CONFIG_SGL_ALLOC=y
10706 +CONFIG_IOMMU_HELPER=y
10707 +CONFIG_CHECK_SIGNATURE=y
10708 +CONFIG_CPU_RMAP=y
10709 +CONFIG_DQL=y
10710 +CONFIG_GLOB=y
10711 +# CONFIG_GLOB_SELFTEST is not set
10712 +CONFIG_NLATTR=y
10713 +CONFIG_LRU_CACHE=m
10714 +CONFIG_CLZ_TAB=y
10715 +CONFIG_IRQ_POLL=y
10716 +CONFIG_MPILIB=y
10717 +CONFIG_SIGNATURE=y
10718 +CONFIG_DIMLIB=y
10719 +CONFIG_OID_REGISTRY=y
10720 +CONFIG_UCS2_STRING=y
10721 +CONFIG_HAVE_GENERIC_VDSO=y
10722 +CONFIG_GENERIC_GETTIMEOFDAY=y
10723 +CONFIG_GENERIC_VDSO_TIME_NS=y
10724 +CONFIG_FONT_SUPPORT=y
10725 +CONFIG_FONTS=y
10726 +CONFIG_FONT_8x8=y
10727 +CONFIG_FONT_8x16=y
10728 +# CONFIG_FONT_6x11 is not set
10729 +# CONFIG_FONT_7x14 is not set
10730 +# CONFIG_FONT_PEARL_8x8 is not set
10731 +CONFIG_FONT_ACORN_8x8=y
10732 +# CONFIG_FONT_MINI_4x6 is not set
10733 +CONFIG_FONT_6x10=y
10734 +# CONFIG_FONT_10x18 is not set
10735 +# CONFIG_FONT_SUN8x16 is not set
10736 +# CONFIG_FONT_SUN12x22 is not set
10737 +CONFIG_FONT_TER16x32=y
10738 +# CONFIG_FONT_6x8 is not set
10739 +CONFIG_SG_POOL=y
10740 +CONFIG_ARCH_HAS_PMEM_API=y
10741 +CONFIG_MEMREGION=y
10742 +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y
10743 +CONFIG_ARCH_HAS_COPY_MC=y
10744 +CONFIG_ARCH_STACKWALK=y
10745 +CONFIG_SBITMAP=y
10746 +CONFIG_PARMAN=m
10747 +CONFIG_OBJAGG=m
10748 +# CONFIG_STRING_SELFTEST is not set
10749 +# end of Library routines
10751 +CONFIG_PLDMFW=y
10754 +# Kernel hacking
10758 +# printk and dmesg options
10760 +CONFIG_PRINTK_TIME=y
10761 +# CONFIG_PRINTK_CALLER is not set
10762 +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7
10763 +CONFIG_CONSOLE_LOGLEVEL_QUIET=3
10764 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4
10765 +CONFIG_BOOT_PRINTK_DELAY=y
10766 +CONFIG_DYNAMIC_DEBUG=y
10767 +CONFIG_DYNAMIC_DEBUG_CORE=y
10768 +# CONFIG_SYMBOLIC_ERRNAME is not set
10769 +# CONFIG_DEBUG_BUGVERBOSE is not set
10770 +# end of printk and dmesg options
10773 +# Compile-time checks and compiler options
10775 +# CONFIG_DEBUG_INFO is not set
10776 +CONFIG_FRAME_WARN=1024
10777 +# CONFIG_STRIP_ASM_SYMS is not set
10778 +# CONFIG_READABLE_ASM is not set
10779 +# CONFIG_HEADERS_INSTALL is not set
10780 +# CONFIG_DEBUG_SECTION_MISMATCH is not set
10781 +CONFIG_SECTION_MISMATCH_WARN_ONLY=y
10782 +# CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_32B is not set
10783 +CONFIG_STACK_VALIDATION=y
10784 +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
10785 +# end of Compile-time checks and compiler options
10788 +# Generic Kernel Debugging Instruments
10790 +CONFIG_MAGIC_SYSRQ=y
10791 +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x01b6
10792 +CONFIG_MAGIC_SYSRQ_SERIAL=y
10793 +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE=""
10794 +CONFIG_DEBUG_FS=y
10795 +CONFIG_DEBUG_FS_ALLOW_ALL=y
10796 +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set
10797 +# CONFIG_DEBUG_FS_ALLOW_NONE is not set
10798 +CONFIG_HAVE_ARCH_KGDB=y
10799 +CONFIG_KGDB=y
10800 +CONFIG_KGDB_HONOUR_BLOCKLIST=y
10801 +CONFIG_KGDB_SERIAL_CONSOLE=y
10802 +# CONFIG_KGDB_TESTS is not set
10803 +CONFIG_KGDB_LOW_LEVEL_TRAP=y
10804 +CONFIG_KGDB_KDB=y
10805 +CONFIG_KDB_DEFAULT_ENABLE=0x1
10806 +CONFIG_KDB_KEYBOARD=y
10807 +CONFIG_KDB_CONTINUE_CATASTROPHIC=0
10808 +CONFIG_ARCH_HAS_EARLY_DEBUG=y
10809 +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y
10810 +# CONFIG_UBSAN is not set
10811 +CONFIG_HAVE_ARCH_KCSAN=y
10812 +CONFIG_HAVE_KCSAN_COMPILER=y
10813 +# CONFIG_KCSAN is not set
10814 +# end of Generic Kernel Debugging Instruments
10816 +CONFIG_DEBUG_KERNEL=y
10817 +CONFIG_DEBUG_MISC=y
10820 +# Memory Debugging
10822 +# CONFIG_PAGE_EXTENSION is not set
10823 +# CONFIG_DEBUG_PAGEALLOC is not set
10824 +# CONFIG_PAGE_OWNER is not set
10825 +CONFIG_PAGE_POISONING=y
10826 +# CONFIG_DEBUG_RODATA_TEST is not set
10827 +CONFIG_ARCH_HAS_DEBUG_WX=y
10828 +CONFIG_DEBUG_WX=y
10829 +CONFIG_GENERIC_PTDUMP=y
10830 +CONFIG_PTDUMP_CORE=y
10831 +# CONFIG_PTDUMP_DEBUGFS is not set
10832 +# CONFIG_DEBUG_OBJECTS is not set
10833 +# CONFIG_SLUB_DEBUG_ON is not set
10834 +# CONFIG_SLUB_STATS is not set
10835 +CONFIG_HAVE_DEBUG_KMEMLEAK=y
10836 +# CONFIG_DEBUG_KMEMLEAK is not set
10837 +# CONFIG_DEBUG_STACK_USAGE is not set
10838 +CONFIG_SCHED_STACK_END_CHECK=y
10839 +CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y
10840 +# CONFIG_DEBUG_VM is not set
10841 +# CONFIG_DEBUG_VM_PGTABLE is not set
10842 +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y
10843 +# CONFIG_DEBUG_VIRTUAL is not set
10844 +# CONFIG_DEBUG_MEMORY_INIT is not set
10845 +CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
10846 +# CONFIG_DEBUG_PER_CPU_MAPS is not set
10847 +CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP=y
10848 +# CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP is not set
10849 +CONFIG_HAVE_ARCH_KASAN=y
10850 +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y
10851 +CONFIG_CC_HAS_KASAN_GENERIC=y
10852 +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y
10853 +# CONFIG_KASAN is not set
10854 +CONFIG_HAVE_ARCH_KFENCE=y
10855 +CONFIG_KFENCE=y
10856 +CONFIG_KFENCE_STATIC_KEYS=y
10857 +CONFIG_KFENCE_SAMPLE_INTERVAL=0
10858 +CONFIG_KFENCE_NUM_OBJECTS=255
10859 +CONFIG_KFENCE_STRESS_TEST_FAULTS=0
10860 +# end of Memory Debugging
10862 +# CONFIG_DEBUG_SHIRQ is not set
10865 +# Debug Oops, Lockups and Hangs
10867 +# CONFIG_PANIC_ON_OOPS is not set
10868 +CONFIG_PANIC_ON_OOPS_VALUE=0
10869 +CONFIG_PANIC_TIMEOUT=0
10870 +CONFIG_LOCKUP_DETECTOR=y
10871 +CONFIG_SOFTLOCKUP_DETECTOR=y
10872 +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
10873 +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
10874 +CONFIG_HARDLOCKUP_DETECTOR_PERF=y
10875 +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y
10876 +CONFIG_HARDLOCKUP_DETECTOR=y
10877 +# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set
10878 +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0
10879 +CONFIG_DETECT_HUNG_TASK=y
10880 +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
10881 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
10882 +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
10883 +# CONFIG_WQ_WATCHDOG is not set
10884 +# CONFIG_TEST_LOCKUP is not set
10885 +# end of Debug Oops, Lockups and Hangs
10888 +# Scheduler Debugging
10890 +CONFIG_SCHED_DEBUG=y
10891 +CONFIG_SCHED_INFO=y
10892 +CONFIG_SCHEDSTATS=y
10893 +# end of Scheduler Debugging
10895 +# CONFIG_DEBUG_TIMEKEEPING is not set
10896 +# CONFIG_DEBUG_PREEMPT is not set
10899 +# Lock Debugging (spinlocks, mutexes, etc...)
10901 +CONFIG_LOCK_DEBUGGING_SUPPORT=y
10902 +# CONFIG_PROVE_LOCKING is not set
10903 +# CONFIG_LOCK_STAT is not set
10904 +# CONFIG_DEBUG_RT_MUTEXES is not set
10905 +# CONFIG_DEBUG_SPINLOCK is not set
10906 +# CONFIG_DEBUG_MUTEXES is not set
10907 +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set
10908 +# CONFIG_DEBUG_RWSEMS is not set
10909 +# CONFIG_DEBUG_LOCK_ALLOC is not set
10910 +# CONFIG_DEBUG_ATOMIC_SLEEP is not set
10911 +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
10912 +# CONFIG_LOCK_TORTURE_TEST is not set
10913 +# CONFIG_WW_MUTEX_SELFTEST is not set
10914 +# CONFIG_SCF_TORTURE_TEST is not set
10915 +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set
10916 +# end of Lock Debugging (spinlocks, mutexes, etc...)
10918 +# CONFIG_DEBUG_IRQFLAGS is not set
10919 +CONFIG_STACKTRACE=y
10920 +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set
10921 +# CONFIG_DEBUG_KOBJECT is not set
10924 +# Debug kernel data structures
10926 +# CONFIG_DEBUG_LIST is not set
10927 +# CONFIG_DEBUG_PLIST is not set
10928 +# CONFIG_DEBUG_SG is not set
10929 +# CONFIG_DEBUG_NOTIFIERS is not set
10930 +# CONFIG_BUG_ON_DATA_CORRUPTION is not set
10931 +# end of Debug kernel data structures
10933 +# CONFIG_DEBUG_CREDENTIALS is not set
10936 +# RCU Debugging
10938 +# CONFIG_RCU_SCALE_TEST is not set
10939 +# CONFIG_RCU_TORTURE_TEST is not set
10940 +# CONFIG_RCU_REF_SCALE_TEST is not set
10941 +CONFIG_RCU_CPU_STALL_TIMEOUT=60
10942 +# CONFIG_RCU_TRACE is not set
10943 +# CONFIG_RCU_EQS_DEBUG is not set
10944 +# CONFIG_RCU_STRICT_GRACE_PERIOD is not set
10945 +# end of RCU Debugging
10947 +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set
10948 +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
10949 +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set
10950 +CONFIG_LATENCYTOP=y
10951 +CONFIG_USER_STACKTRACE_SUPPORT=y
10952 +CONFIG_HAVE_FUNCTION_TRACER=y
10953 +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
10954 +CONFIG_HAVE_DYNAMIC_FTRACE=y
10955 +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
10956 +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y
10957 +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y
10958 +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
10959 +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
10960 +CONFIG_HAVE_FENTRY=y
10961 +CONFIG_HAVE_OBJTOOL_MCOUNT=y
10962 +CONFIG_HAVE_C_RECORDMCOUNT=y
10963 +CONFIG_TRACING_SUPPORT=y
10964 +# CONFIG_FTRACE is not set
10965 +# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
10966 +CONFIG_SAMPLES=y
10967 +# CONFIG_SAMPLE_AUXDISPLAY is not set
10968 +# CONFIG_SAMPLE_KOBJECT is not set
10969 +# CONFIG_SAMPLE_KPROBES is not set
10970 +# CONFIG_SAMPLE_HW_BREAKPOINT is not set
10971 +# CONFIG_SAMPLE_KFIFO is not set
10972 +# CONFIG_SAMPLE_KDB is not set
10973 +# CONFIG_SAMPLE_RPMSG_CLIENT is not set
10974 +# CONFIG_SAMPLE_CONFIGFS is not set
10975 +# CONFIG_SAMPLE_VFIO_MDEV_MTTY is not set
10976 +# CONFIG_SAMPLE_VFIO_MDEV_MDPY is not set
10977 +# CONFIG_SAMPLE_VFIO_MDEV_MDPY_FB is not set
10978 +# CONFIG_SAMPLE_VFIO_MDEV_MBOCHS is not set
10979 +# CONFIG_SAMPLE_WATCHDOG is not set
10980 +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y
10981 +CONFIG_STRICT_DEVMEM=y
10982 +# CONFIG_IO_STRICT_DEVMEM is not set
10985 +# x86 Debugging
10987 +CONFIG_TRACE_IRQFLAGS_SUPPORT=y
10988 +CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y
10989 +CONFIG_EARLY_PRINTK_USB=y
10990 +# CONFIG_X86_VERBOSE_BOOTUP is not set
10991 +CONFIG_EARLY_PRINTK=y
10992 +CONFIG_EARLY_PRINTK_DBGP=y
10993 +CONFIG_EARLY_PRINTK_USB_XDBC=y
10994 +# CONFIG_EFI_PGT_DUMP is not set
10995 +# CONFIG_DEBUG_TLBFLUSH is not set
10996 +# CONFIG_IOMMU_DEBUG is not set
10997 +CONFIG_HAVE_MMIOTRACE_SUPPORT=y
10998 +# CONFIG_X86_DECODER_SELFTEST is not set
10999 +# CONFIG_IO_DELAY_0X80 is not set
11000 +CONFIG_IO_DELAY_0XED=y
11001 +# CONFIG_IO_DELAY_UDELAY is not set
11002 +# CONFIG_IO_DELAY_NONE is not set
11003 +# CONFIG_DEBUG_BOOT_PARAMS is not set
11004 +# CONFIG_CPA_DEBUG is not set
11005 +# CONFIG_DEBUG_ENTRY is not set
11006 +# CONFIG_DEBUG_NMI_SELFTEST is not set
11007 +CONFIG_X86_DEBUG_FPU=y
11008 +CONFIG_PUNIT_ATOM_DEBUG=m
11009 +CONFIG_UNWINDER_ORC=y
11010 +# CONFIG_UNWINDER_FRAME_POINTER is not set
11011 +# CONFIG_UNWINDER_GUESS is not set
11012 +# end of x86 Debugging
11015 +# Kernel Testing and Coverage
11017 +# CONFIG_KUNIT is not set
11018 +CONFIG_NOTIFIER_ERROR_INJECTION=m
11019 +CONFIG_PM_NOTIFIER_ERROR_INJECT=m
11020 +# CONFIG_NETDEV_NOTIFIER_ERROR_INJECT is not set
11021 +CONFIG_FUNCTION_ERROR_INJECTION=y
11022 +# CONFIG_FAULT_INJECTION is not set
11023 +CONFIG_ARCH_HAS_KCOV=y
11024 +CONFIG_CC_HAS_SANCOV_TRACE_PC=y
11025 +# CONFIG_KCOV is not set
11026 +CONFIG_RUNTIME_TESTING_MENU=y
11027 +# CONFIG_LKDTM is not set
11028 +# CONFIG_TEST_LIST_SORT is not set
11029 +# CONFIG_TEST_MIN_HEAP is not set
11030 +# CONFIG_TEST_SORT is not set
11031 +# CONFIG_KPROBES_SANITY_TEST is not set
11032 +# CONFIG_BACKTRACE_SELF_TEST is not set
11033 +# CONFIG_RBTREE_TEST is not set
11034 +# CONFIG_REED_SOLOMON_TEST is not set
11035 +# CONFIG_INTERVAL_TREE_TEST is not set
11036 +# CONFIG_PERCPU_TEST is not set
11037 +# CONFIG_ATOMIC64_SELFTEST is not set
11038 +# CONFIG_ASYNC_RAID6_TEST is not set
11039 +# CONFIG_TEST_HEXDUMP is not set
11040 +# CONFIG_TEST_STRING_HELPERS is not set
11041 +# CONFIG_TEST_STRSCPY is not set
11042 +# CONFIG_TEST_KSTRTOX is not set
11043 +# CONFIG_TEST_PRINTF is not set
11044 +# CONFIG_TEST_BITMAP is not set
11045 +# CONFIG_TEST_UUID is not set
11046 +# CONFIG_TEST_XARRAY is not set
11047 +# CONFIG_TEST_OVERFLOW is not set
11048 +# CONFIG_TEST_RHASHTABLE is not set
11049 +# CONFIG_TEST_HASH is not set
11050 +# CONFIG_TEST_IDA is not set
11051 +# CONFIG_TEST_PARMAN is not set
11052 +# CONFIG_TEST_LKM is not set
11053 +# CONFIG_TEST_BITOPS is not set
11054 +# CONFIG_TEST_VMALLOC is not set
11055 +# CONFIG_TEST_USER_COPY is not set
11056 +CONFIG_TEST_BPF=m
11057 +CONFIG_TEST_BLACKHOLE_DEV=m
11058 +# CONFIG_FIND_BIT_BENCHMARK is not set
11059 +# CONFIG_TEST_FIRMWARE is not set
11060 +# CONFIG_TEST_SYSCTL is not set
11061 +# CONFIG_TEST_UDELAY is not set
11062 +# CONFIG_TEST_STATIC_KEYS is not set
11063 +# CONFIG_TEST_KMOD is not set
11064 +# CONFIG_TEST_MEMCAT_P is not set
11065 +# CONFIG_TEST_OBJAGG is not set
11066 +# CONFIG_TEST_STACKINIT is not set
11067 +# CONFIG_TEST_MEMINIT is not set
11068 +# CONFIG_TEST_HMM is not set
11069 +# CONFIG_TEST_FREE_PAGES is not set
11070 +# CONFIG_TEST_FPU is not set
11071 +CONFIG_MEMTEST=y
11072 +# CONFIG_HYPERV_TESTING is not set
11073 +# end of Kernel Testing and Coverage
11074 +# end of Kernel hacking
11075 diff --git a/.gitignore b/.gitignore
11076 index 3af66272d6f1..127012c1f717 100644
11077 --- a/.gitignore
11078 +++ b/.gitignore
11079 @@ -57,6 +57,7 @@ modules.order
11080  /tags
11081  /TAGS
11082  /linux
11083 +/modules-only.symvers
11084  /vmlinux
11085  /vmlinux.32
11086  /vmlinux.symvers
11087 diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
11088 index 04545725f187..e38e2c55b2fa 100644
11089 --- a/Documentation/admin-guide/kernel-parameters.txt
11090 +++ b/Documentation/admin-guide/kernel-parameters.txt
11091 @@ -358,6 +358,10 @@
11092         autoconf=       [IPV6]
11093                         See Documentation/networking/ipv6.rst.
11095 +       autogroup=      [KNL] Enable or disable scheduler automatic task group
11096 +                       creation.
11097 +                       Format: <bool>
11099         show_lapic=     [APIC,X86] Advanced Programmable Interrupt Controller
11100                         Limit apic dumping. The parameter defines the maximal
11101                         number of local apics being dumped. Also it is possible
11102 @@ -1869,13 +1873,6 @@
11103                         bypassed by not enabling DMAR with this option. In
11104                         this case, gfx device will use physical address for
11105                         DMA.
11106 -               forcedac [X86-64]
11107 -                       With this option iommu will not optimize to look
11108 -                       for io virtual address below 32-bit forcing dual
11109 -                       address cycle on pci bus for cards supporting greater
11110 -                       than 32-bit addressing. The default is to look
11111 -                       for translation below 32-bit and if not available
11112 -                       then look in the higher range.
11113                 strict [Default Off]
11114                         With this option on every unmap_single operation will
11115                         result in a hardware IOTLB flush operation as opposed
11116 @@ -1964,6 +1961,14 @@
11117                 nobypass        [PPC/POWERNV]
11118                         Disable IOMMU bypass, using IOMMU for PCI devices.
11120 +       iommu.forcedac= [ARM64, X86] Control IOVA allocation for PCI devices.
11121 +                       Format: { "0" | "1" }
11122 +                       0 - Try to allocate a 32-bit DMA address first, before
11123 +                         falling back to the full range if needed.
11124 +                       1 - Allocate directly from the full usable range,
11125 +                         forcing Dual Address Cycle for PCI cards supporting
11126 +                         greater than 32-bit addressing.
11128         iommu.strict=   [ARM64] Configure TLB invalidation behaviour
11129                         Format: { "0" | "1" }
11130                         0 - Lazy mode.
11131 @@ -3196,8 +3201,6 @@
11132         noapic          [SMP,APIC] Tells the kernel to not make use of any
11133                         IOAPICs that may be present in the system.
11135 -       noautogroup     Disable scheduler automatic task group creation.
11137         nobats          [PPC] Do not use BATs for mapping kernel lowmem
11138                         on "Classic" PPC cores.
11140 @@ -3660,6 +3663,15 @@
11141                 nomsi           [MSI] If the PCI_MSI kernel config parameter is
11142                                 enabled, this kernel boot option can be used to
11143                                 disable the use of MSI interrupts system-wide.
11144 +               pcie_acs_override =
11145 +                                       [PCIE] Override missing PCIe ACS support for:
11146 +                               downstream
11147 +                                       All downstream ports - full ACS capabilities
11148 +                               multifunction
11149 +                                       All multifunction devices - multifunction ACS subset
11150 +                               id:nnnn:nnnn
11151 +                                       Specific device - full ACS capabilities
11152 +                                       Specified as vid:did (vendor/device ID) in hex
11153                 noioapicquirk   [APIC] Disable all boot interrupt quirks.
11154                                 Safety option to keep boot IRQs enabled. This
11155                                 should never be necessary.
11156 diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst
11157 index 586cd4b86428..cf4a90d7a058 100644
11158 --- a/Documentation/admin-guide/sysctl/vm.rst
11159 +++ b/Documentation/admin-guide/sysctl/vm.rst
11160 @@ -26,6 +26,8 @@ Currently, these files are in /proc/sys/vm:
11162  - admin_reserve_kbytes
11163  - block_dump
11164 +- clean_low_kbytes
11165 +- clean_min_kbytes
11166  - compact_memory
11167  - compaction_proactiveness
11168  - compact_unevictable_allowed
11169 @@ -113,6 +115,41 @@ block_dump enables block I/O debugging when set to a nonzero value. More
11170  information on block I/O debugging is in Documentation/admin-guide/laptops/laptop-mode.rst.
11173 +clean_low_kbytes
11174 +=====================
11176 +This knob provides *best-effort* protection of clean file pages. The clean file
11177 +pages on the current node won't be reclaimed under memory pressure when their
11178 +amount is below vm.clean_low_kbytes *unless* we threaten to OOM or have no
11179 +free swap space or vm.swappiness=0.
11181 +Protection of clean file pages may be used to prevent thrashing and
11182 +reducing I/O under low-memory conditions.
11184 +Setting it to a high value may result in a early eviction of anonymous pages
11185 +into the swap space by attempting to hold the protected amount of clean file
11186 +pages in memory.
11188 +The default value is defined by CONFIG_CLEAN_LOW_KBYTES.
11191 +clean_min_kbytes
11192 +=====================
11194 +This knob provides *hard* protection of clean file pages. The clean file pages
11195 +on the current node won't be reclaimed under memory pressure when their amount
11196 +is below vm.clean_min_kbytes.
11198 +Hard protection of clean file pages may be used to avoid high latency and
11199 +prevent livelock in near-OOM conditions.
11201 +Setting it to a high value may result in a early out-of-memory condition due to
11202 +the inability to reclaim the protected amount of clean file pages when other
11203 +types of pages cannot be reclaimed.
11205 +The default value is defined by CONFIG_CLEAN_MIN_KBYTES.
11208  compact_memory
11209  ==============
11211 diff --git a/Documentation/devicetree/bindings/media/renesas,vin.yaml b/Documentation/devicetree/bindings/media/renesas,vin.yaml
11212 index fe7c4cbfe4ba..dd1a5ce5896c 100644
11213 --- a/Documentation/devicetree/bindings/media/renesas,vin.yaml
11214 +++ b/Documentation/devicetree/bindings/media/renesas,vin.yaml
11215 @@ -193,23 +193,35 @@ required:
11216    - interrupts
11217    - clocks
11218    - power-domains
11219 -  - resets
11221 -if:
11222 -  properties:
11223 -    compatible:
11224 -      contains:
11225 -        enum:
11226 -          - renesas,vin-r8a7778
11227 -          - renesas,vin-r8a7779
11228 -          - renesas,rcar-gen2-vin
11229 -then:
11230 -  required:
11231 -    - port
11232 -else:
11233 -  required:
11234 -    - renesas,id
11235 -    - ports
11237 +allOf:
11238 +  - if:
11239 +      not:
11240 +        properties:
11241 +          compatible:
11242 +            contains:
11243 +              enum:
11244 +                - renesas,vin-r8a7778
11245 +                - renesas,vin-r8a7779
11246 +    then:
11247 +      required:
11248 +        - resets
11250 +  - if:
11251 +      properties:
11252 +        compatible:
11253 +          contains:
11254 +            enum:
11255 +              - renesas,vin-r8a7778
11256 +              - renesas,vin-r8a7779
11257 +              - renesas,rcar-gen2-vin
11258 +    then:
11259 +      required:
11260 +        - port
11261 +    else:
11262 +      required:
11263 +        - renesas,id
11264 +        - ports
11266  additionalProperties: false
11268 diff --git a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
11269 index 4a2bcc0158e2..8fdfbc763d70 100644
11270 --- a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
11271 +++ b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
11272 @@ -17,6 +17,7 @@ allOf:
11273  properties:
11274    compatible:
11275      oneOf:
11276 +      - const: renesas,pcie-r8a7779       # R-Car H1
11277        - items:
11278            - enum:
11279                - renesas,pcie-r8a7742      # RZ/G1H
11280 @@ -74,7 +75,16 @@ required:
11281    - clocks
11282    - clock-names
11283    - power-domains
11284 -  - resets
11286 +if:
11287 +  not:
11288 +    properties:
11289 +      compatible:
11290 +        contains:
11291 +          const: renesas,pcie-r8a7779
11292 +then:
11293 +  required:
11294 +    - resets
11296  unevaluatedProperties: false
11298 diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
11299 index 626447fee092..7808ec8bc712 100644
11300 --- a/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
11301 +++ b/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
11302 @@ -25,11 +25,13 @@ properties:
11303        - qcom,msm8998-qmp-pcie-phy
11304        - qcom,msm8998-qmp-ufs-phy
11305        - qcom,msm8998-qmp-usb3-phy
11306 +      - qcom,sc7180-qmp-usb3-phy
11307        - qcom,sc8180x-qmp-ufs-phy
11308        - qcom,sc8180x-qmp-usb3-phy
11309        - qcom,sdm845-qhp-pcie-phy
11310        - qcom,sdm845-qmp-pcie-phy
11311        - qcom,sdm845-qmp-ufs-phy
11312 +      - qcom,sdm845-qmp-usb3-phy
11313        - qcom,sdm845-qmp-usb3-uni-phy
11314        - qcom,sm8150-qmp-ufs-phy
11315        - qcom,sm8150-qmp-usb3-phy
11316 diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml
11317 index 33974ad10afe..62c0179d1765 100644
11318 --- a/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml
11319 +++ b/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml
11320 @@ -14,9 +14,7 @@ properties:
11321    compatible:
11322      enum:
11323        - qcom,sc7180-qmp-usb3-dp-phy
11324 -      - qcom,sc7180-qmp-usb3-phy
11325        - qcom,sdm845-qmp-usb3-dp-phy
11326 -      - qcom,sdm845-qmp-usb3-phy
11327    reg:
11328      items:
11329        - description: Address and length of PHY's USB serdes block.
11330 diff --git a/Documentation/devicetree/bindings/serial/8250.yaml b/Documentation/devicetree/bindings/serial/8250.yaml
11331 index f54cae9ff7b2..d3f87f2bfdc2 100644
11332 --- a/Documentation/devicetree/bindings/serial/8250.yaml
11333 +++ b/Documentation/devicetree/bindings/serial/8250.yaml
11334 @@ -93,11 +93,6 @@ properties:
11335                - mediatek,mt7622-btif
11336                - mediatek,mt7623-btif
11337            - const: mediatek,mtk-btif
11338 -      - items:
11339 -          - enum:
11340 -              - mediatek,mt7622-btif
11341 -              - mediatek,mt7623-btif
11342 -          - const: mediatek,mtk-btif
11343        - items:
11344            - const: mrvl,mmp-uart
11345            - const: intel,xscale-uart
11346 diff --git a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
11347 index 8631678283f9..865be05083c3 100644
11348 --- a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
11349 +++ b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
11350 @@ -80,7 +80,8 @@ required:
11351    - interrupts
11352    - clocks
11354 -additionalProperties: false
11355 +additionalProperties:
11356 +  type: object
11358  examples:
11359    - |
11360 diff --git a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
11361 index b33a76eeac4e..f963204e0b16 100644
11362 --- a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
11363 +++ b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
11364 @@ -28,14 +28,7 @@ properties:
11365        - renesas,r8a77980-thermal # R-Car V3H
11366        - renesas,r8a779a0-thermal # R-Car V3U
11368 -  reg:
11369 -    minItems: 2
11370 -    maxItems: 4
11371 -    items:
11372 -      - description: TSC1 registers
11373 -      - description: TSC2 registers
11374 -      - description: TSC3 registers
11375 -      - description: TSC4 registers
11376 +  reg: true
11378    interrupts:
11379      items:
11380 @@ -71,8 +64,25 @@ if:
11381            enum:
11382              - renesas,r8a779a0-thermal
11383  then:
11384 +  properties:
11385 +    reg:
11386 +      minItems: 2
11387 +      maxItems: 3
11388 +      items:
11389 +        - description: TSC1 registers
11390 +        - description: TSC2 registers
11391 +        - description: TSC3 registers
11392    required:
11393      - interrupts
11394 +else:
11395 +  properties:
11396 +    reg:
11397 +      items:
11398 +        - description: TSC0 registers
11399 +        - description: TSC1 registers
11400 +        - description: TSC2 registers
11401 +        - description: TSC3 registers
11402 +        - description: TSC4 registers
11404  additionalProperties: false
11406 @@ -111,3 +121,20 @@ examples:
11407                      };
11408              };
11409      };
11410 +  - |
11411 +    #include <dt-bindings/clock/r8a779a0-cpg-mssr.h>
11412 +    #include <dt-bindings/interrupt-controller/arm-gic.h>
11413 +    #include <dt-bindings/power/r8a779a0-sysc.h>
11415 +    tsc_r8a779a0: thermal@e6190000 {
11416 +            compatible = "renesas,r8a779a0-thermal";
11417 +            reg = <0xe6190000 0x200>,
11418 +                  <0xe6198000 0x200>,
11419 +                  <0xe61a0000 0x200>,
11420 +                  <0xe61a8000 0x200>,
11421 +                  <0xe61b0000 0x200>;
11422 +            clocks = <&cpg CPG_MOD 919>;
11423 +            power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
11424 +            resets = <&cpg 919>;
11425 +            #thermal-sensor-cells = <1>;
11426 +    };
11427 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
11428 index e361fc95ca29..82e3eee7363b 100644
11429 --- a/Documentation/dontdiff
11430 +++ b/Documentation/dontdiff
11431 @@ -178,6 +178,7 @@ mktables
11432  mktree
11433  mkutf8data
11434  modpost
11435 +modules-only.symvers
11436  modules.builtin
11437  modules.builtin.modinfo
11438  modules.nsdeps
11439 diff --git a/Documentation/driver-api/xilinx/eemi.rst b/Documentation/driver-api/xilinx/eemi.rst
11440 index 9dcbc6f18d75..c1bc47b9000d 100644
11441 --- a/Documentation/driver-api/xilinx/eemi.rst
11442 +++ b/Documentation/driver-api/xilinx/eemi.rst
11443 @@ -16,35 +16,8 @@ components running across different processing clusters on a chip or
11444  device to communicate with a power management controller (PMC) on a
11445  device to issue or respond to power management requests.
11447 -EEMI ops is a structure containing all eemi APIs supported by Zynq MPSoC.
11448 -The zynqmp-firmware driver maintain all EEMI APIs in zynqmp_eemi_ops
11449 -structure. Any driver who want to communicate with PMC using EEMI APIs
11450 -can call zynqmp_pm_get_eemi_ops().
11452 -Example of EEMI ops::
11454 -       /* zynqmp-firmware driver maintain all EEMI APIs */
11455 -       struct zynqmp_eemi_ops {
11456 -               int (*get_api_version)(u32 *version);
11457 -               int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out);
11458 -       };
11460 -       static const struct zynqmp_eemi_ops eemi_ops = {
11461 -               .get_api_version = zynqmp_pm_get_api_version,
11462 -               .query_data = zynqmp_pm_query_data,
11463 -       };
11465 -Example of EEMI ops usage::
11467 -       static const struct zynqmp_eemi_ops *eemi_ops;
11468 -       u32 ret_payload[PAYLOAD_ARG_CNT];
11469 -       int ret;
11471 -       eemi_ops = zynqmp_pm_get_eemi_ops();
11472 -       if (IS_ERR(eemi_ops))
11473 -               return PTR_ERR(eemi_ops);
11475 -       ret = eemi_ops->query_data(qdata, ret_payload);
11476 +Any driver who wants to communicate with PMC using EEMI APIs use the
11477 +functions provided for each function.
11479  IOCTL
11480  ------
11481 diff --git a/Documentation/filesystems/ntfs3.rst b/Documentation/filesystems/ntfs3.rst
11482 new file mode 100644
11483 index 000000000000..ffe9ea0c1499
11484 --- /dev/null
11485 +++ b/Documentation/filesystems/ntfs3.rst
11486 @@ -0,0 +1,106 @@
11487 +.. SPDX-License-Identifier: GPL-2.0
11489 +=====
11490 +NTFS3
11491 +=====
11494 +Summary and Features
11495 +====================
11497 +NTFS3 is fully functional NTFS Read-Write driver. The driver works with
11498 +NTFS versions up to 3.1, normal/compressed/sparse files
11499 +and journal replaying. File system type to use on mount is 'ntfs3'.
11501 +- This driver implements NTFS read/write support for normal, sparse and
11502 +  compressed files.
11503 +- Supports native journal replaying;
11504 +- Supports extended attributes
11505 +       Predefined extended attributes:
11506 +       - 'system.ntfs_security' gets/sets security
11507 +                       descriptor (SECURITY_DESCRIPTOR_RELATIVE)
11508 +       - 'system.ntfs_attrib' gets/sets ntfs file/dir attributes.
11509 +               Note: applied to empty files, this allows to switch type between
11510 +               sparse(0x200), compressed(0x800) and normal;
11511 +- Supports NFS export of mounted NTFS volumes.
11513 +Mount Options
11514 +=============
11516 +The list below describes mount options supported by NTFS3 driver in addition to
11517 +generic ones.
11519 +===============================================================================
11521 +nls=name               This option informs the driver how to interpret path
11522 +                       strings and translate them to Unicode and back. If
11523 +                       this option is not set, the default codepage will be
11524 +                       used (CONFIG_NLS_DEFAULT).
11525 +                       Examples:
11526 +                               'nls=utf8'
11528 +uid=
11529 +gid=
11530 +umask=                 Controls the default permissions for files/directories created
11531 +                       after the NTFS volume is mounted.
11533 +fmask=
11534 +dmask=                 Instead of specifying umask which applies both to
11535 +                       files and directories, fmask applies only to files and
11536 +                       dmask only to directories.
11538 +nohidden               Files with the Windows-specific HIDDEN (FILE_ATTRIBUTE_HIDDEN)
11539 +                       attribute will not be shown under Linux.
11541 +sys_immutable          Files with the Windows-specific SYSTEM
11542 +                       (FILE_ATTRIBUTE_SYSTEM) attribute will be marked as system
11543 +                       immutable files.
11545 +discard                        Enable support of the TRIM command for improved performance
11546 +                       on delete operations, which is recommended for use with the
11547 +                       solid-state drives (SSD).
11549 +force                  Forces the driver to mount partitions even if 'dirty' flag
11550 +                       (volume dirty) is set. Not recommended for use.
11552 +sparse                 Create new files as "sparse".
11554 +showmeta               Use this parameter to show all meta-files (System Files) on
11555 +                       a mounted NTFS partition.
11556 +                       By default, all meta-files are hidden.
11558 +prealloc               Preallocate space for files excessively when file size is
11559 +                       increasing on writes. Decreases fragmentation in case of
11560 +                       parallel write operations to different files.
11562 +no_acs_rules           "No access rules" mount option sets access rights for
11563 +                       files/folders to 777 and owner/group to root. This mount
11564 +                       option absorbs all other permissions:
11565 +                       - permissions change for files/folders will be reported
11566 +                               as successful, but they will remain 777;
11567 +                       - owner/group change will be reported as successful, but
11568 +                               they will stay as root
11570 +acl                    Support POSIX ACLs (Access Control Lists). Effective if
11571 +                       supported by Kernel. Not to be confused with NTFS ACLs.
11572 +                       The option specified as acl enables support for POSIX ACLs.
11574 +noatime                        All files and directories will not update their last access
11575 +                       time attribute if a partition is mounted with this parameter.
11576 +                       This option can speed up file system operation.
11578 +===============================================================================
11580 +ToDo list
11581 +=========
11583 +- Full journaling support (currently journal replaying is supported) over JBD.
11586 +References
11587 +==========
11588 +https://www.paragon-software.com/home/ntfs-linux-professional/
11589 +       - Commercial version of the NTFS driver for Linux.
11591 +almaz.alexandrovich@paragon-software.com
11592 +       - Direct e-mail address for feedback and requests on the NTFS3 implementation.
11593 diff --git a/Documentation/locking/futex2.rst b/Documentation/locking/futex2.rst
11594 new file mode 100644
11595 index 000000000000..3ab49f0e741c
11596 --- /dev/null
11597 +++ b/Documentation/locking/futex2.rst
11598 @@ -0,0 +1,198 @@
11599 +.. SPDX-License-Identifier: GPL-2.0
11601 +======
11602 +futex2
11603 +======
11605 +:Author: André Almeida <andrealmeid@collabora.com>
11607 +futex, or fast user mutex, is a set of syscalls to allow userspace to create
11608 +performant synchronization mechanisms, such as mutexes, semaphores and
11609 +conditional variables in userspace. C standard libraries, like glibc, uses it
11610 +as a means to implement more high level interfaces like pthreads.
11612 +The interface
11613 +=============
11615 +uAPI functions
11616 +--------------
11618 +.. kernel-doc:: kernel/futex2.c
11619 +   :identifiers: sys_futex_wait sys_futex_wake sys_futex_waitv sys_futex_requeue
11621 +uAPI structures
11622 +---------------
11624 +.. kernel-doc:: include/uapi/linux/futex.h
11626 +The ``flag`` argument
11627 +---------------------
11629 +The flag is used to specify the size of the futex word
11630 +(FUTEX_[8, 16, 32]). It's mandatory to define one, since there's no
11631 +default size.
11633 +By default, the timeout uses a monotonic clock, but can be used as a realtime
11634 +one by using the FUTEX_REALTIME_CLOCK flag.
11636 +By default, futexes are of the private type, that means that this user address
11637 +will be accessed by threads that share the same memory region. This allows for
11638 +some internal optimizations, so they are faster. However, if the address needs
11639 +to be shared with different processes (like using ``mmap()`` or ``shm()``), they
11640 +need to be defined as shared and the flag FUTEX_SHARED_FLAG is used to set that.
11642 +By default, the operation has no NUMA-awareness, meaning that the user can't
11643 +choose the memory node where the kernel side futex data will be stored. The
11644 +user can choose the node where it wants to operate by setting the
11645 +FUTEX_NUMA_FLAG and using the following structure (where X can be 8, 16, or
11646 +32)::
11648 + struct futexX_numa {
11649 +         __uX value;
11650 +         __sX hint;
11651 + };
11653 +This structure should be passed at the ``void *uaddr`` of futex functions. The
11654 +address of the structure will be used to be waited on/waken on, and the
11655 +``value`` will be compared to ``val`` as usual. The ``hint`` member is used to
11656 +define which node the futex will use. When waiting, the futex will be
11657 +registered on a kernel-side table stored on that node; when waking, the futex
11658 +will be searched for on that given table. That means that there's no redundancy
11659 +between tables, and the wrong ``hint`` value will lead to undesired behavior.
11660 +Userspace is responsible for dealing with node migrations issues that may
11661 +occur. ``hint`` can range from [0, MAX_NUMA_NODES), for specifying a node, or
11662 +-1, to use the same node the current process is using.
11664 +When not using FUTEX_NUMA_FLAG on a NUMA system, the futex will be stored on a
11665 +global table on allocated on the first node.
11667 +The ``timo`` argument
11668 +---------------------
11670 +As per the Y2038 work done in the kernel, new interfaces shouldn't add timeout
11671 +options known to be buggy. Given that, ``timo`` should be a 64-bit timeout at
11672 +all platforms, using an absolute timeout value.
11674 +Implementation
11675 +==============
11677 +The internal implementation follows a similar design to the original futex.
11678 +Given that we want to replicate the same external behavior of current futex,
11679 +this should be somewhat expected.
11681 +Waiting
11682 +-------
11684 +For the wait operations, they are all treated as if you want to wait on N
11685 +futexes, so the path for futex_wait and futex_waitv is the basically the same.
11686 +For both syscalls, the first step is to prepare an internal list for the list
11687 +of futexes to wait for (using struct futexv_head). For futex_wait() calls, this
11688 +list will have a single object.
11690 +We have a hash table, where waiters register themselves before sleeping. Then
11691 +the wake function checks this table looking for waiters at uaddr.  The hash
11692 +bucket to be used is determined by a struct futex_key, that stores information
11693 +to uniquely identify an address from a given process. Given the huge address
11694 +space, there'll be hash collisions, so we store information to be later used on
11695 +collision treatment.
11697 +First, for every futex we want to wait on, we check if (``*uaddr == val``).
11698 +This check is done holding the bucket lock, so we are correctly serialized with
11699 +any futex_wake() calls. If any waiter fails the check above, we dequeue all
11700 +futexes. The check (``*uaddr == val``) can fail for two reasons:
11702 +- The values are different, and we return -EAGAIN. However, if while
11703 +  dequeueing we found that some futexes were awakened, we prioritize this
11704 +  and return success.
11706 +- When trying to access the user address, we do so with page faults
11707 +  disabled because we are holding a bucket's spin lock (and can't sleep
11708 +  while holding a spin lock). If there's an error, it might be a page
11709 +  fault, or an invalid address. We release the lock, dequeue everyone
11710 +  (because it's illegal to sleep while there are futexes enqueued, we
11711 +  could lose wakeups) and try again with page fault enabled. If we
11712 +  succeed, this means that the address is valid, but we need to do
11713 +  all the work again. For serialization reasons, we need to have the
11714 +  spin lock when getting the user value. Additionally, for shared
11715 +  futexes, we also need to recalculate the hash, since the underlying
11716 +  mapping mechanisms could have changed when dealing with page fault.
11717 +  If, even with page fault enabled, we can't access the address, it
11718 +  means it's an invalid user address, and we return -EFAULT. For this
11719 +  case, we prioritize the error, even if some futexes were awaken.
11721 +If the check is OK, they are enqueued on a linked list in our bucket, and
11722 +proceed to the next one. If all waiters succeed, we put the thread to sleep
11723 +until a futex_wake() call, timeout expires or we get a signal. After waking up,
11724 +we dequeue everyone, and check if some futex was awakened. This dequeue is done
11725 +by iteratively walking at each element of struct futex_head list.
11727 +All enqueuing/dequeuing operations requires to hold the bucket lock, to avoid
11728 +racing while modifying the list.
11730 +Waking
11731 +------
11733 +We get the bucket that's storing the waiters at uaddr, and wake the required
11734 +number of waiters, checking for hash collision.
11736 +There's an optimization that makes futex_wake() not take the bucket lock if
11737 +there's no one to be woken on that bucket. It checks an atomic counter that each
11738 +bucket has, if it says 0, then the syscall exits. In order for this to work, the
11739 +waiter thread increases it before taking the lock, so the wake thread will
11740 +correctly see that there's someone waiting and will continue the path to take
11741 +the bucket lock. To get the correct serialization, the waiter issues a memory
11742 +barrier after increasing the bucket counter and the waker issues a memory
11743 +barrier before checking it.
11745 +Requeuing
11746 +---------
11748 +The requeue path first checks for each struct futex_requeue and their flags.
11749 +Then, it will compare the expected value with the one at uaddr1::uaddr.
11750 +Following the same serialization explained at Waking_, we increase the atomic
11751 +counter for the bucket of uaddr2 before taking the lock. We need to have both
11752 +buckets locks at same time so we don't race with other futex operation. To
11753 +ensure the locks are taken in the same order for all threads (and thus avoiding
11754 +deadlocks), every requeue operation takes the "smaller" bucket first, when
11755 +comparing both addresses.
11757 +If the compare with user value succeeds, we proceed by waking ``nr_wake``
11758 +futexes, and then requeuing ``nr_requeue`` from bucket of uaddr1 to the uaddr2.
11759 +This consists in a simple list deletion/addition and replacing the old futex key
11760 +with the new one.
11762 +Futex keys
11763 +----------
11765 +There are two types of futexes: private and shared ones. The private are futexes
11766 +meant to be used by threads that share the same memory space, are easier to be
11767 +uniquely identified and thus can have some performance optimization. The
11768 +elements for identifying one are: the start address of the page where the
11769 +address is, the address offset within the page and the current->mm pointer.
11771 +Now, for uniquely identifying a shared futex:
11773 +- If the page containing the user address is an anonymous page, we can
11774 +  just use the same data used for private futexes (the start address of
11775 +  the page, the address offset within the page and the current->mm
11776 +  pointer); that will be enough for uniquely identifying such futex. We
11777 +  also set one bit at the key to differentiate if a private futex is
11778 +  used on the same address (mixing shared and private calls does not
11779 +  work).
11781 +- If the page is file-backed, current->mm maybe isn't the same one for
11782 +  every user of this futex, so we need to use other data: the
11783 +  page->index, a UUID for the struct inode and the offset within the
11784 +  page.
11786 +Note that members of futex_key don't have any particular meaning after they
11787 +are part of the struct - they are just bytes to identify a futex.  Given that,
11788 +we don't need to use a particular name or type that matches the original data,
11789 +we only need to care about the bitsize of each component and make both private
11790 +and shared fit in the same memory space.
11792 +Source code documentation
11793 +=========================
11795 +.. kernel-doc:: kernel/futex2.c
11796 +   :no-identifiers: sys_futex_wait sys_futex_wake sys_futex_waitv sys_futex_requeue
11797 diff --git a/Documentation/locking/index.rst b/Documentation/locking/index.rst
11798 index 7003bd5aeff4..9bf03c7fa1ec 100644
11799 --- a/Documentation/locking/index.rst
11800 +++ b/Documentation/locking/index.rst
11801 @@ -24,6 +24,7 @@ locking
11802      percpu-rw-semaphore
11803      robust-futexes
11804      robust-futex-ABI
11805 +    futex2
11807  .. only::  subproject and html
11809 diff --git a/Documentation/powerpc/syscall64-abi.rst b/Documentation/powerpc/syscall64-abi.rst
11810 index dabee3729e5a..56490c4c0c07 100644
11811 --- a/Documentation/powerpc/syscall64-abi.rst
11812 +++ b/Documentation/powerpc/syscall64-abi.rst
11813 @@ -109,6 +109,16 @@ auxiliary vector.
11815  scv 0 syscalls will always behave as PPC_FEATURE2_HTM_NOSC.
11817 +ptrace
11818 +------
11819 +When ptracing system calls (PTRACE_SYSCALL), the pt_regs.trap value contains
11820 +the system call type that can be used to distinguish between sc and scv 0
11821 +system calls, and the different register conventions can be accounted for.
11823 +If the value of (pt_regs.trap & 0xfff0) is 0xc00 then the system call was
11824 +performed with the sc instruction, if it is 0x3000 then the system call was
11825 +performed with the scv 0 instruction.
11827  vsyscall
11828  ========
11830 diff --git a/Documentation/userspace-api/media/v4l/subdev-formats.rst b/Documentation/userspace-api/media/v4l/subdev-formats.rst
11831 index 7f16cbe46e5c..e6a9faa81197 100644
11832 --- a/Documentation/userspace-api/media/v4l/subdev-formats.rst
11833 +++ b/Documentation/userspace-api/media/v4l/subdev-formats.rst
11834 @@ -1567,8 +1567,8 @@ The following tables list existing packed RGB formats.
11835        - MEDIA_BUS_FMT_RGB101010_1X30
11836        - 0x1018
11837        -
11838 -      - 0
11839 -      - 0
11840 +      -
11841 +      -
11842        - r\ :sub:`9`
11843        - r\ :sub:`8`
11844        - r\ :sub:`7`
11845 diff --git a/Documentation/vm/index.rst b/Documentation/vm/index.rst
11846 index eff5fbd492d0..c353b3f55924 100644
11847 --- a/Documentation/vm/index.rst
11848 +++ b/Documentation/vm/index.rst
11849 @@ -17,6 +17,7 @@ various features of the Linux memory management
11851     swap_numa
11852     zswap
11853 +   multigen_lru
11855  Kernel developers MM documentation
11856  ==================================
11857 diff --git a/Documentation/vm/multigen_lru.rst b/Documentation/vm/multigen_lru.rst
11858 new file mode 100644
11859 index 000000000000..cf772aeca317
11860 --- /dev/null
11861 +++ b/Documentation/vm/multigen_lru.rst
11862 @@ -0,0 +1,192 @@
11863 +=====================
11864 +Multigenerational LRU
11865 +=====================
11867 +Quick Start
11868 +===========
11869 +Build Options
11870 +-------------
11871 +:Required: Set ``CONFIG_LRU_GEN=y``.
11873 +:Optional: Change ``CONFIG_NR_LRU_GENS`` to a number ``X`` to support
11874 + a maximum of ``X`` generations.
11876 +:Optional: Change ``CONFIG_TIERS_PER_GEN`` to a number ``Y`` to support
11877 + a maximum of ``Y`` tiers per generation.
11879 +:Optional: Set ``CONFIG_LRU_GEN_ENABLED=y`` to turn the feature on by
11880 + default.
11882 +Runtime Options
11883 +---------------
11884 +:Required: Write ``1`` to ``/sys/kernel/mm/lru_gen/enable`` if the
11885 + feature was not turned on by default.
11887 +:Optional: Change ``/sys/kernel/mm/lru_gen/spread`` to a number ``N``
11888 + to spread pages out across ``N+1`` generations. ``N`` should be less
11889 + than ``X``. Larger values make the background aging more aggressive.
11891 +:Optional: Read ``/sys/kernel/debug/lru_gen`` to verify the feature.
11892 + This file has the following output:
11896 +  memcg  memcg_id  memcg_path
11897 +    node  node_id
11898 +      min_gen  birth_time  anon_size  file_size
11899 +      ...
11900 +      max_gen  birth_time  anon_size  file_size
11902 +Given a memcg and a node, ``min_gen`` is the oldest generation
11903 +(number) and ``max_gen`` is the youngest. Birth time is in
11904 +milliseconds. The sizes of anon and file types are in pages.
11906 +Recipes
11907 +-------
11908 +:Android on ARMv8.1+: ``X=4``, ``N=0``
11910 +:Android on pre-ARMv8.1 CPUs: Not recommended due to the lack of
11911 + ``ARM64_HW_AFDBM``
11913 +:Laptops running Chrome on x86_64: ``X=7``, ``N=2``
11915 +:Working set estimation: Write ``+ memcg_id node_id gen [swappiness]``
11916 + to ``/sys/kernel/debug/lru_gen`` to account referenced pages to
11917 + generation ``max_gen`` and create the next generation ``max_gen+1``.
11918 + ``gen`` should be equal to ``max_gen``. A swap file and a non-zero
11919 + ``swappiness`` are required to scan anon type. If swapping is not
11920 + desired, set ``vm.swappiness`` to ``0``.
11922 +:Proactive reclaim: Write ``- memcg_id node_id gen [swappiness]
11923 + [nr_to_reclaim]`` to ``/sys/kernel/debug/lru_gen`` to evict
11924 + generations less than or equal to ``gen``. ``gen`` should be less
11925 + than ``max_gen-1`` as ``max_gen`` and ``max_gen-1`` are active
11926 + generations and therefore protected from the eviction. Use
11927 + ``nr_to_reclaim`` to limit the number of pages to be evicted.
11928 + Multiple command lines are supported, so does concatenation with
11929 + delimiters ``,`` and ``;``.
11931 +Framework
11932 +=========
11933 +For each ``lruvec``, evictable pages are divided into multiple
11934 +generations. The youngest generation number is stored in ``max_seq``
11935 +for both anon and file types as they are aged on an equal footing. The
11936 +oldest generation numbers are stored in ``min_seq[2]`` separately for
11937 +anon and file types as clean file pages can be evicted regardless of
11938 +swap and write-back constraints. Generation numbers are truncated into
11939 +``order_base_2(CONFIG_NR_LRU_GENS+1)`` bits in order to fit into
11940 +``page->flags``. The sliding window technique is used to prevent
11941 +truncated generation numbers from overlapping. Each truncated
11942 +generation number is an index to an array of per-type and per-zone
11943 +lists. Evictable pages are added to the per-zone lists indexed by
11944 +``max_seq`` or ``min_seq[2]`` (modulo ``CONFIG_NR_LRU_GENS``),
11945 +depending on whether they are being faulted in.
11947 +Each generation is then divided into multiple tiers. Tiers represent
11948 +levels of usage from file descriptors only. Pages accessed N times via
11949 +file descriptors belong to tier order_base_2(N). In contrast to moving
11950 +across generations which requires the lru lock, moving across tiers
11951 +only involves an atomic operation on ``page->flags`` and therefore has
11952 +a negligible cost.
11954 +The workflow comprises two conceptually independent functions: the
11955 +aging and the eviction.
11957 +Aging
11958 +-----
11959 +The aging produces young generations. Given an ``lruvec``, the aging
11960 +scans page tables for referenced pages of this ``lruvec``. Upon
11961 +finding one, the aging updates its generation number to ``max_seq``.
11962 +After each round of scan, the aging increments ``max_seq``.
11964 +The aging maintains either a system-wide ``mm_struct`` list or
11965 +per-memcg ``mm_struct`` lists, and it only scans page tables of
11966 +processes that have been scheduled since the last scan. Since scans
11967 +are differential with respect to referenced pages, the cost is roughly
11968 +proportional to their number.
11970 +The aging is due when both of ``min_seq[2]`` reaches ``max_seq-1``,
11971 +assuming both anon and file types are reclaimable.
11973 +Eviction
11974 +--------
11975 +The eviction consumes old generations. Given an ``lruvec``, the
11976 +eviction scans the pages on the per-zone lists indexed by either of
11977 +``min_seq[2]``. It first tries to select a type based on the values of
11978 +``min_seq[2]``. When anon and file types are both available from the
11979 +same generation, it selects the one that has a lower refault rate.
11981 +During a scan, the eviction sorts pages according to their generation
11982 +numbers, if the aging has found them referenced.  It also moves pages
11983 +from the tiers that have higher refault rates than tier 0 to the next
11984 +generation.
11986 +When it finds all the per-zone lists of a selected type are empty, the
11987 +eviction increments ``min_seq[2]`` indexed by this selected type.
11989 +Rationale
11990 +=========
11991 +Limitations of Current Implementation
11992 +-------------------------------------
11993 +Notion of Active/Inactive
11994 +~~~~~~~~~~~~~~~~~~~~~~~~~
11995 +For servers equipped with hundreds of gigabytes of memory, the
11996 +granularity of the active/inactive is too coarse to be useful for job
11997 +scheduling. False active/inactive rates are relatively high, and thus
11998 +the assumed savings may not materialize.
12000 +For phones and laptops, executable pages are frequently evicted
12001 +despite the fact that there are many less recently used anon pages.
12002 +Major faults on executable pages cause ``janks`` (slow UI renderings)
12003 +and negatively impact user experience.
12005 +For ``lruvec``\s from different memcgs or nodes, comparisons are
12006 +impossible due to the lack of a common frame of reference.
12008 +Incremental Scans via ``rmap``
12009 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12010 +Each incremental scan picks up at where the last scan left off and
12011 +stops after it has found a handful of unreferenced pages. For
12012 +workloads using a large amount of anon memory, incremental scans lose
12013 +the advantage under sustained memory pressure due to high ratios of
12014 +the number of scanned pages to the number of reclaimed pages. On top
12015 +of that, the ``rmap`` has poor memory locality due to its complex data
12016 +structures. The combined effects typically result in a high amount of
12017 +CPU usage in the reclaim path.
12019 +Benefits of Multigenerational LRU
12020 +---------------------------------
12021 +Notion of Generation Numbers
12022 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12023 +The notion of generation numbers introduces a quantitative approach to
12024 +memory overcommit. A larger number of pages can be spread out across
12025 +configurable generations, and thus they have relatively low false
12026 +active/inactive rates. Each generation includes all pages that have
12027 +been referenced since the last generation.
12029 +Given an ``lruvec``, scans and the selections between anon and file
12030 +types are all based on generation numbers, which are simple and yet
12031 +effective. For different ``lruvec``\s, comparisons are still possible
12032 +based on birth times of generations.
12034 +Differential Scans via Page Tables
12035 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12036 +Each differential scan discovers all pages that have been referenced
12037 +since the last scan. Specifically, it walks the ``mm_struct`` list
12038 +associated with an ``lruvec`` to scan page tables of processes that
12039 +have been scheduled since the last scan. The cost of each differential
12040 +scan is roughly proportional to the number of referenced pages it
12041 +discovers. Unless address spaces are extremely sparse, page tables
12042 +usually have better memory locality than the ``rmap``. The end result
12043 +is generally a significant reduction in CPU usage, for workloads
12044 +using a large amount of anon memory.
12046 +To-do List
12047 +==========
12048 +KVM Optimization
12049 +----------------
12050 +Support shadow page table scanning.
12052 +NUMA Optimization
12053 +-----------------
12054 +Support NUMA policies and per-node RSS counters.
12055 diff --git a/MAINTAINERS b/MAINTAINERS
12056 index 9450e052f1b1..b7a2162d159a 100644
12057 --- a/MAINTAINERS
12058 +++ b/MAINTAINERS
12059 @@ -7377,7 +7377,7 @@ F:        Documentation/locking/*futex*
12060  F:     include/asm-generic/futex.h
12061  F:     include/linux/futex.h
12062  F:     include/uapi/linux/futex.h
12063 -F:     kernel/futex.c
12064 +F:     kernel/futex*
12065  F:     tools/perf/bench/futex*
12066  F:     tools/testing/selftests/futex/
12068 @@ -12775,6 +12775,13 @@ T:     git git://git.kernel.org/pub/scm/linux/kernel/git/aia21/ntfs.git
12069  F:     Documentation/filesystems/ntfs.rst
12070  F:     fs/ntfs/
12072 +NTFS3 FILESYSTEM
12073 +M:     Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
12074 +S:     Supported
12075 +W:     http://www.paragon-software.com/
12076 +F:     Documentation/filesystems/ntfs3.rst
12077 +F:     fs/ntfs3/
12079  NUBUS SUBSYSTEM
12080  M:     Finn Thain <fthain@telegraphics.com.au>
12081  L:     linux-m68k@lists.linux-m68k.org
12082 @@ -19912,6 +19919,18 @@ F:     Documentation/vm/zsmalloc.rst
12083  F:     include/linux/zsmalloc.h
12084  F:     mm/zsmalloc.c
12086 +ZSTD
12087 +M:     Nick Terrell <terrelln@fb.com>
12088 +S:     Maintained
12089 +B:     https://github.com/facebook/zstd/issues
12090 +T:     git git://github.com/terrelln/linux.git
12091 +F:     include/linux/zstd*
12092 +F:     lib/zstd/
12093 +F:     lib/decompress_unzstd.c
12094 +F:     crypto/zstd.c
12095 +N:     zstd
12096 +K:     zstd
12098  ZSWAP COMPRESSED SWAP CACHING
12099  M:     Seth Jennings <sjenning@redhat.com>
12100  M:     Dan Streetman <ddstreet@ieee.org>
12101 diff --git a/Makefile b/Makefile
12102 index 3a10a8e08b6d..ef62c826e868 100644
12103 --- a/Makefile
12104 +++ b/Makefile
12105 @@ -1,7 +1,7 @@
12106  # SPDX-License-Identifier: GPL-2.0
12107  VERSION = 5
12108  PATCHLEVEL = 12
12109 -SUBLEVEL = 0
12110 +SUBLEVEL = 8
12111  EXTRAVERSION =
12112  NAME = Frozen Wasteland
12114 @@ -775,16 +775,16 @@ KBUILD_CFLAGS += -Wno-gnu
12115  KBUILD_CFLAGS += -mno-global-merge
12116  else
12118 -# These warnings generated too much noise in a regular build.
12119 -# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
12120 -KBUILD_CFLAGS += -Wno-unused-but-set-variable
12122  # Warn about unmarked fall-throughs in switch statement.
12123  # Disabled for clang while comment to attribute conversion happens and
12124  # https://github.com/ClangBuiltLinux/linux/issues/636 is discussed.
12125  KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,)
12126  endif
12128 +# These warnings generated too much noise in a regular build.
12129 +# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
12130 +KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
12132  KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
12133  ifdef CONFIG_FRAME_POINTER
12134  KBUILD_CFLAGS  += -fno-omit-frame-pointer -fno-optimize-sibling-calls
12135 @@ -1066,8 +1066,8 @@ endif # INSTALL_MOD_STRIP
12136  export mod_strip_cmd
12138  # CONFIG_MODULE_COMPRESS, if defined, will cause module to be compressed
12139 -# after they are installed in agreement with CONFIG_MODULE_COMPRESS_GZIP
12140 -# or CONFIG_MODULE_COMPRESS_XZ.
12141 +# after they are installed in agreement with CONFIG_MODULE_COMPRESS_GZIP,
12142 +# CONFIG_MODULE_COMPRESS_XZ, or CONFIG_MODULE_COMPRESS_ZSTD.
12144  mod_compress_cmd = true
12145  ifdef CONFIG_MODULE_COMPRESS
12146 @@ -1077,6 +1077,9 @@ ifdef CONFIG_MODULE_COMPRESS
12147    ifdef CONFIG_MODULE_COMPRESS_XZ
12148      mod_compress_cmd = $(XZ) --lzma2=dict=2MiB -f
12149    endif # CONFIG_MODULE_COMPRESS_XZ
12150 +  ifdef CONFIG_MODULE_COMPRESS_ZSTD
12151 +    mod_compress_cmd = $(ZSTD) -T0 --rm -f -q
12152 +  endif # CONFIG_MODULE_COMPRESS_ZSTD
12153  endif # CONFIG_MODULE_COMPRESS
12154  export mod_compress_cmd
12156 @@ -1513,7 +1516,7 @@ endif # CONFIG_MODULES
12157  # make distclean Remove editor backup files, patch leftover files and the like
12159  # Directories & files removed with 'make clean'
12160 -CLEAN_FILES += include/ksym vmlinux.symvers \
12161 +CLEAN_FILES += include/ksym vmlinux.symvers modules-only.symvers \
12162                modules.builtin modules.builtin.modinfo modules.nsdeps \
12163                compile_commands.json .thinlto-cache
12165 diff --git a/arch/Kconfig b/arch/Kconfig
12166 index ecfd3520b676..cbd7f66734ee 100644
12167 --- a/arch/Kconfig
12168 +++ b/arch/Kconfig
12169 @@ -782,6 +782,15 @@ config HAVE_ARCH_TRANSPARENT_HUGEPAGE
12170  config HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
12171         bool
12173 +config HAVE_ARCH_PARENT_PMD_YOUNG
12174 +       bool
12175 +       depends on PGTABLE_LEVELS > 2
12176 +       help
12177 +         Architectures that select this are able to set the accessed bit on
12178 +         non-leaf PMD entries in addition to leaf PTE entries where pages are
12179 +         mapped. For them, page table walkers that clear the accessed bit may
12180 +         stop at non-leaf PMD entries when they do not see the accessed bit.
12182  config HAVE_ARCH_HUGE_VMAP
12183         bool
12185 diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
12186 index ad9b7fe4dba3..4a9d33372fe2 100644
12187 --- a/arch/arc/include/asm/page.h
12188 +++ b/arch/arc/include/asm/page.h
12189 @@ -7,6 +7,18 @@
12191  #include <uapi/asm/page.h>
12193 +#ifdef CONFIG_ARC_HAS_PAE40
12195 +#define MAX_POSSIBLE_PHYSMEM_BITS      40
12196 +#define PAGE_MASK_PHYS                 (0xff00000000ull | PAGE_MASK)
12198 +#else /* CONFIG_ARC_HAS_PAE40 */
12200 +#define MAX_POSSIBLE_PHYSMEM_BITS      32
12201 +#define PAGE_MASK_PHYS                 PAGE_MASK
12203 +#endif /* CONFIG_ARC_HAS_PAE40 */
12205  #ifndef __ASSEMBLY__
12207  #define clear_page(paddr)              memset((paddr), 0, PAGE_SIZE)
12208 diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
12209 index 163641726a2b..5878846f00cf 100644
12210 --- a/arch/arc/include/asm/pgtable.h
12211 +++ b/arch/arc/include/asm/pgtable.h
12212 @@ -107,8 +107,8 @@
12213  #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
12215  /* Set of bits not changed in pte_modify */
12216 -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
12218 +#define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
12219 +                                                          _PAGE_SPECIAL)
12220  /* More Abbrevaited helpers */
12221  #define PAGE_U_NONE     __pgprot(___DEF)
12222  #define PAGE_U_R        __pgprot(___DEF | _PAGE_READ)
12223 @@ -132,13 +132,7 @@
12224  #define PTE_BITS_IN_PD0                (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
12225  #define PTE_BITS_RWX           (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
12227 -#ifdef CONFIG_ARC_HAS_PAE40
12228 -#define PTE_BITS_NON_RWX_IN_PD1        (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
12229 -#define MAX_POSSIBLE_PHYSMEM_BITS 40
12230 -#else
12231 -#define PTE_BITS_NON_RWX_IN_PD1        (PAGE_MASK | _PAGE_CACHEABLE)
12232 -#define MAX_POSSIBLE_PHYSMEM_BITS 32
12233 -#endif
12234 +#define PTE_BITS_NON_RWX_IN_PD1        (PAGE_MASK_PHYS | _PAGE_CACHEABLE)
12236  /**************************************************************************
12237   * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
12238 diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
12239 index 2a97e2718a21..2a4ad619abfb 100644
12240 --- a/arch/arc/include/uapi/asm/page.h
12241 +++ b/arch/arc/include/uapi/asm/page.h
12242 @@ -33,5 +33,4 @@
12244  #define PAGE_MASK      (~(PAGE_SIZE-1))
12247  #endif /* _UAPI__ASM_ARC_PAGE_H */
12248 diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
12249 index 1743506081da..2cb8dfe866b6 100644
12250 --- a/arch/arc/kernel/entry.S
12251 +++ b/arch/arc/kernel/entry.S
12252 @@ -177,7 +177,7 @@ tracesys:
12254         ; Do the Sys Call as we normally would.
12255         ; Validate the Sys Call number
12256 -       cmp     r8,  NR_syscalls
12257 +       cmp     r8,  NR_syscalls - 1
12258         mov.hi  r0, -ENOSYS
12259         bhi     tracesys_exit
12261 @@ -255,7 +255,7 @@ ENTRY(EV_Trap)
12262         ;============ Normal syscall case
12264         ; syscall num shd not exceed the total system calls avail
12265 -       cmp     r8,  NR_syscalls
12266 +       cmp     r8,  NR_syscalls - 1
12267         mov.hi  r0, -ENOSYS
12268         bhi     .Lret_from_system_call
12270 diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
12271 index ce07e697916c..1bcc6985b9a0 100644
12272 --- a/arch/arc/mm/init.c
12273 +++ b/arch/arc/mm/init.c
12274 @@ -157,7 +157,16 @@ void __init setup_arch_memory(void)
12275         min_high_pfn = PFN_DOWN(high_mem_start);
12276         max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
12278 -       max_zone_pfn[ZONE_HIGHMEM] = min_low_pfn;
12279 +       /*
12280 +        * max_high_pfn should be ok here for both HIGHMEM and HIGHMEM+PAE.
12281 +        * For HIGHMEM without PAE max_high_pfn should be less than
12282 +        * min_low_pfn to guarantee that these two regions don't overlap.
12283 +        * For PAE case highmem is greater than lowmem, so it is natural
12284 +        * to use max_high_pfn.
12285 +        *
12286 +        * In both cases, holes should be handled by pfn_valid().
12287 +        */
12288 +       max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
12290         high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
12292 diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
12293 index fac4adc90204..95c649fbc95a 100644
12294 --- a/arch/arc/mm/ioremap.c
12295 +++ b/arch/arc/mm/ioremap.c
12296 @@ -53,9 +53,10 @@ EXPORT_SYMBOL(ioremap);
12297  void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
12298                            unsigned long flags)
12300 +       unsigned int off;
12301         unsigned long vaddr;
12302         struct vm_struct *area;
12303 -       phys_addr_t off, end;
12304 +       phys_addr_t end;
12305         pgprot_t prot = __pgprot(flags);
12307         /* Don't allow wraparound, zero size */
12308 @@ -72,7 +73,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
12310         /* Mappings have to be page-aligned */
12311         off = paddr & ~PAGE_MASK;
12312 -       paddr &= PAGE_MASK;
12313 +       paddr &= PAGE_MASK_PHYS;
12314         size = PAGE_ALIGN(end + 1) - paddr;
12316         /*
12317 diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
12318 index 9bb3c24f3677..9c7c68247289 100644
12319 --- a/arch/arc/mm/tlb.c
12320 +++ b/arch/arc/mm/tlb.c
12321 @@ -576,7 +576,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
12322                       pte_t *ptep)
12324         unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
12325 -       phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
12326 +       phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
12327         struct page *page = pfn_to_page(pte_pfn(*ptep));
12329         create_tlb(vma, vaddr, ptep);
12330 diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
12331 index fd94e27ba4fa..c1f804768621 100644
12332 --- a/arch/arm/boot/compressed/Makefile
12333 +++ b/arch/arm/boot/compressed/Makefile
12334 @@ -118,8 +118,8 @@ asflags-y := -DZIMAGE
12336  # Supply kernel BSS size to the decompressor via a linker symbol.
12337  KBSS_SZ = $(shell echo $$(($$($(NM) $(obj)/../../../../vmlinux | \
12338 -               sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \
12339 -                      -e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) )
12340 +               sed -n -e 's/^\([^ ]*\) [ABD] __bss_start$$/-0x\1/p' \
12341 +                      -e 's/^\([^ ]*\) [ABD] __bss_stop$$/+0x\1/p') )) )
12342  LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
12343  # Supply ZRELADDR to the decompressor via a linker symbol.
12344  ifneq ($(CONFIG_AUTO_ZRELADDR),y)
12345 diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
12346 index 6c9804d2f3b4..6df1ce545061 100644
12347 --- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
12348 +++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
12349 @@ -713,9 +713,9 @@ &i2c7 {
12350         multi-master;
12351         status = "okay";
12353 -       si7021-a20@20 {
12354 +       si7021-a20@40 {
12355                 compatible = "silabs,si7020";
12356 -               reg = <0x20>;
12357 +               reg = <0x40>;
12358         };
12360         tmp275@48 {
12361 diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts
12362 index 775ceb3acb6c..edca66c232c1 100644
12363 --- a/arch/arm/boot/dts/at91-sam9x60ek.dts
12364 +++ b/arch/arm/boot/dts/at91-sam9x60ek.dts
12365 @@ -8,6 +8,7 @@
12366   */
12367  /dts-v1/;
12368  #include "sam9x60.dtsi"
12369 +#include <dt-bindings/input/input.h>
12371  / {
12372         model = "Microchip SAM9X60-EK";
12373 @@ -84,7 +85,7 @@ gpio_keys {
12374                 sw1 {
12375                         label = "SW1";
12376                         gpios = <&pioD 18 GPIO_ACTIVE_LOW>;
12377 -                       linux,code=<0x104>;
12378 +                       linux,code=<KEY_PROG1>;
12379                         wakeup-source;
12380                 };
12381         };
12382 diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
12383 index 84e1180f3e89..a9e6fee55a2a 100644
12384 --- a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
12385 +++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
12386 @@ -11,6 +11,7 @@
12387  #include "at91-sama5d27_som1.dtsi"
12388  #include <dt-bindings/mfd/atmel-flexcom.h>
12389  #include <dt-bindings/gpio/gpio.h>
12390 +#include <dt-bindings/input/input.h>
12392  / {
12393         model = "Atmel SAMA5D27 SOM1 EK";
12394 @@ -466,7 +467,7 @@ gpio_keys {
12395                 pb4 {
12396                         label = "USER";
12397                         gpios = <&pioA PIN_PA29 GPIO_ACTIVE_LOW>;
12398 -                       linux,code = <0x104>;
12399 +                       linux,code = <KEY_PROG1>;
12400                         wakeup-source;
12401                 };
12402         };
12403 diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
12404 index 180a08765cb8..ff83967fd008 100644
12405 --- a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
12406 +++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
12407 @@ -8,6 +8,7 @@
12408   */
12409  /dts-v1/;
12410  #include "at91-sama5d27_wlsom1.dtsi"
12411 +#include <dt-bindings/input/input.h>
12413  / {
12414         model = "Microchip SAMA5D27 WLSOM1 EK";
12415 @@ -35,7 +36,7 @@ gpio_keys {
12416                 sw4 {
12417                         label = "USER BUTTON";
12418                         gpios = <&pioA PIN_PB2 GPIO_ACTIVE_LOW>;
12419 -                       linux,code = <0x104>;
12420 +                       linux,code = <KEY_PROG1>;
12421                         wakeup-source;
12422                 };
12423         };
12424 diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts
12425 index 46722a163184..bd64721fa23c 100644
12426 --- a/arch/arm/boot/dts/at91-sama5d2_icp.dts
12427 +++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts
12428 @@ -12,6 +12,7 @@
12429  #include "sama5d2.dtsi"
12430  #include "sama5d2-pinfunc.h"
12431  #include <dt-bindings/gpio/gpio.h>
12432 +#include <dt-bindings/input/input.h>
12433  #include <dt-bindings/mfd/atmel-flexcom.h>
12435  / {
12436 @@ -51,7 +52,7 @@ gpio_keys {
12437                 sw4 {
12438                         label = "USER_PB1";
12439                         gpios = <&pioA PIN_PD0 GPIO_ACTIVE_LOW>;
12440 -                       linux,code = <0x104>;
12441 +                       linux,code = <KEY_PROG1>;
12442                         wakeup-source;
12443                 };
12444         };
12445 diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
12446 index 8de57d164acd..dfd150eb0fd8 100644
12447 --- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
12448 +++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
12449 @@ -11,6 +11,7 @@
12450  #include "sama5d2-pinfunc.h"
12451  #include <dt-bindings/mfd/atmel-flexcom.h>
12452  #include <dt-bindings/gpio/gpio.h>
12453 +#include <dt-bindings/input/input.h>
12454  #include <dt-bindings/pinctrl/at91.h>
12456  / {
12457 @@ -402,7 +403,7 @@ gpio_keys {
12458                 bp1 {
12459                         label = "PB_USER";
12460                         gpios = <&pioA PIN_PA10 GPIO_ACTIVE_LOW>;
12461 -                       linux,code = <0x104>;
12462 +                       linux,code = <KEY_PROG1>;
12463                         wakeup-source;
12464                 };
12465         };
12466 diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
12467 index 4e7cf21f124c..509c732a0d8b 100644
12468 --- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
12469 +++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
12470 @@ -10,6 +10,7 @@
12471  #include "sama5d2-pinfunc.h"
12472  #include <dt-bindings/mfd/atmel-flexcom.h>
12473  #include <dt-bindings/gpio/gpio.h>
12474 +#include <dt-bindings/input/input.h>
12475  #include <dt-bindings/regulator/active-semi,8945a-regulator.h>
12477  / {
12478 @@ -712,7 +713,7 @@ gpio_keys {
12479                 bp1 {
12480                         label = "PB_USER";
12481                         gpios = <&pioA PIN_PB9 GPIO_ACTIVE_LOW>;
12482 -                       linux,code = <0x104>;
12483 +                       linux,code = <KEY_PROG1>;
12484                         wakeup-source;
12485                 };
12486         };
12487 diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
12488 index 5179258f9247..9c55a921263b 100644
12489 --- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
12490 +++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
12491 @@ -7,6 +7,7 @@
12492   */
12493  /dts-v1/;
12494  #include "sama5d36.dtsi"
12495 +#include <dt-bindings/input/input.h>
12497  / {
12498         model = "SAMA5D3 Xplained";
12499 @@ -354,7 +355,7 @@ gpio_keys {
12500                 bp3 {
12501                         label = "PB_USER";
12502                         gpios = <&pioE 29 GPIO_ACTIVE_LOW>;
12503 -                       linux,code = <0x104>;
12504 +                       linux,code = <KEY_PROG1>;
12505                         wakeup-source;
12506                 };
12507         };
12508 diff --git a/arch/arm/boot/dts/at91sam9260ek.dts b/arch/arm/boot/dts/at91sam9260ek.dts
12509 index d3446e42b598..ce96345d28a3 100644
12510 --- a/arch/arm/boot/dts/at91sam9260ek.dts
12511 +++ b/arch/arm/boot/dts/at91sam9260ek.dts
12512 @@ -7,6 +7,7 @@
12513   */
12514  /dts-v1/;
12515  #include "at91sam9260.dtsi"
12516 +#include <dt-bindings/input/input.h>
12518  / {
12519         model = "Atmel at91sam9260ek";
12520 @@ -156,7 +157,7 @@ btn3 {
12521                 btn4 {
12522                         label = "Button 4";
12523                         gpios = <&pioA 31 GPIO_ACTIVE_LOW>;
12524 -                       linux,code = <0x104>;
12525 +                       linux,code = <KEY_PROG1>;
12526                         wakeup-source;
12527                 };
12528         };
12529 diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
12530 index 6e6e672c0b86..87bb39060e8b 100644
12531 --- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
12532 +++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
12533 @@ -5,6 +5,7 @@
12534   * Copyright (C) 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
12535   */
12536  #include "at91sam9g20.dtsi"
12537 +#include <dt-bindings/input/input.h>
12539  / {
12541 @@ -234,7 +235,7 @@ btn3 {
12542                 btn4 {
12543                         label = "Button 4";
12544                         gpios = <&pioA 31 GPIO_ACTIVE_LOW>;
12545 -                       linux,code = <0x104>;
12546 +                       linux,code = <KEY_PROG1>;
12547                         wakeup-source;
12548                 };
12549         };
12550 diff --git a/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts b/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
12551 index 6a96655d8626..8ed403767540 100644
12552 --- a/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
12553 +++ b/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
12554 @@ -21,8 +21,8 @@ chosen {
12556         memory@0 {
12557                 device_type = "memory";
12558 -               reg = <0x00000000 0x08000000
12559 -                      0x88000000 0x08000000>;
12560 +               reg = <0x00000000 0x08000000>,
12561 +                     <0x88000000 0x08000000>;
12562         };
12564         leds {
12565 diff --git a/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts b/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
12566 index 3b0029e61b4c..667b118ba4ee 100644
12567 --- a/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
12568 +++ b/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
12569 @@ -21,8 +21,8 @@ chosen {
12571         memory@0 {
12572                 device_type = "memory";
12573 -               reg = <0x00000000 0x08000000
12574 -                      0x88000000 0x08000000>;
12575 +               reg = <0x00000000 0x08000000>,
12576 +                     <0x88000000 0x08000000>;
12577         };
12579         leds {
12580 diff --git a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
12581 index 90f57bad6b24..ff31ce45831a 100644
12582 --- a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
12583 +++ b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
12584 @@ -21,8 +21,8 @@ chosen {
12586         memory@0 {
12587                 device_type = "memory";
12588 -               reg = <0x00000000 0x08000000
12589 -                      0x88000000 0x18000000>;
12590 +               reg = <0x00000000 0x08000000>,
12591 +                     <0x88000000 0x18000000>;
12592         };
12594         spi {
12595 diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
12596 index fed75e6ab58c..61c7b137607e 100644
12597 --- a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
12598 +++ b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
12599 @@ -22,8 +22,8 @@ chosen {
12601         memory {
12602                 device_type = "memory";
12603 -               reg = <0x00000000 0x08000000
12604 -                      0x88000000 0x08000000>;
12605 +               reg = <0x00000000 0x08000000>,
12606 +                     <0x88000000 0x08000000>;
12607         };
12609         leds {
12610 diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts b/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
12611 index 79542e18915c..4c60eda296d9 100644
12612 --- a/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
12613 +++ b/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
12614 @@ -21,8 +21,8 @@ chosen {
12616         memory@0 {
12617                 device_type = "memory";
12618 -               reg = <0x00000000 0x08000000
12619 -                      0x88000000 0x08000000>;
12620 +               reg = <0x00000000 0x08000000>,
12621 +                     <0x88000000 0x08000000>;
12622         };
12624         leds {
12625 diff --git a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
12626 index 51c64f0b2560..9ca6d1b2590d 100644
12627 --- a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
12628 +++ b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
12629 @@ -21,8 +21,8 @@ chosen {
12631         memory@0 {
12632                 device_type = "memory";
12633 -               reg = <0x00000000 0x08000000
12634 -                      0x88000000 0x08000000>;
12635 +               reg = <0x00000000 0x08000000>,
12636 +                     <0x88000000 0x08000000>;
12637         };
12639         leds {
12640 diff --git a/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts b/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
12641 index c29950b43a95..0e273c598732 100644
12642 --- a/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
12643 +++ b/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
12644 @@ -21,8 +21,8 @@ chosen {
12646         memory@0 {
12647                 device_type = "memory";
12648 -               reg = <0x00000000 0x08000000
12649 -                      0x88000000 0x08000000>;
12650 +               reg = <0x00000000 0x08000000>,
12651 +                     <0x88000000 0x08000000>;
12652         };
12654         leds {
12655 diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
12656 index 2f2d2b0a6893..d857751ec507 100644
12657 --- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
12658 +++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
12659 @@ -21,8 +21,8 @@ chosen {
12661         memory@0 {
12662                 device_type = "memory";
12663 -               reg = <0x00000000 0x08000000
12664 -                      0x88000000 0x08000000>;
12665 +               reg = <0x00000000 0x08000000>,
12666 +                     <0x88000000 0x08000000>;
12667         };
12669         spi {
12670 diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
12671 index 0e349e39f608..8b1a05a0f1a1 100644
12672 --- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
12673 +++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
12674 @@ -21,8 +21,8 @@ chosen {
12676         memory@0 {
12677                 device_type = "memory";
12678 -               reg = <0x00000000 0x08000000
12679 -                      0x88000000 0x08000000>;
12680 +               reg = <0x00000000 0x08000000>,
12681 +                     <0x88000000 0x08000000>;
12682         };
12684         spi {
12685 diff --git a/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts b/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
12686 index 8f1e565c3db4..6c6bb7b17d27 100644
12687 --- a/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
12688 +++ b/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
12689 @@ -21,8 +21,8 @@ chosen {
12691         memory {
12692                 device_type = "memory";
12693 -               reg = <0x00000000 0x08000000
12694 -                      0x88000000 0x08000000>;
12695 +               reg = <0x00000000 0x08000000>,
12696 +                     <0x88000000 0x08000000>;
12697         };
12699         leds {
12700 diff --git a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
12701 index ce888b1835d1..d29e7f80ea6a 100644
12702 --- a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
12703 +++ b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
12704 @@ -21,8 +21,8 @@ chosen {
12706         memory {
12707                 device_type = "memory";
12708 -               reg = <0x00000000 0x08000000
12709 -                      0x88000000 0x18000000>;
12710 +               reg = <0x00000000 0x08000000>,
12711 +                     <0x88000000 0x18000000>;
12712         };
12714         leds {
12715 diff --git a/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts b/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
12716 index ed8619b54d69..38fbefdf2e4e 100644
12717 --- a/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
12718 +++ b/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
12719 @@ -18,8 +18,8 @@ chosen {
12721         memory {
12722                 device_type = "memory";
12723 -               reg = <0x00000000 0x08000000
12724 -                      0x88000000 0x08000000>;
12725 +               reg = <0x00000000 0x08000000>,
12726 +                     <0x88000000 0x08000000>;
12727         };
12729         gpio-keys {
12730 diff --git a/arch/arm/boot/dts/bcm4709-netgear-r7000.dts b/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
12731 index 1f87993eae1d..7989a53597d4 100644
12732 --- a/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
12733 +++ b/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
12734 @@ -21,8 +21,8 @@ chosen {
12736         memory {
12737                 device_type = "memory";
12738 -               reg = <0x00000000 0x08000000
12739 -                      0x88000000 0x08000000>;
12740 +               reg = <0x00000000 0x08000000>,
12741 +                     <0x88000000 0x08000000>;
12742         };
12744         leds {
12745 diff --git a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
12746 index 6c6199a53d09..87b655be674c 100644
12747 --- a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
12748 +++ b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
12749 @@ -32,8 +32,8 @@ chosen {
12751         memory {
12752                 device_type = "memory";
12753 -               reg = <0x00000000 0x08000000
12754 -                      0x88000000 0x08000000>;
12755 +               reg = <0x00000000 0x08000000>,
12756 +                     <0x88000000 0x08000000>;
12757         };
12759         leds {
12760 diff --git a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
12761 index 911c65fbf251..e635a15041dd 100644
12762 --- a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
12763 +++ b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
12764 @@ -21,8 +21,8 @@ chosen {
12766         memory@0 {
12767                 device_type = "memory";
12768 -               reg = <0x00000000 0x08000000
12769 -                      0x88000000 0x08000000>;
12770 +               reg = <0x00000000 0x08000000>,
12771 +                     <0x88000000 0x08000000>;
12772         };
12774         nand: nand@18028000 {
12775 diff --git a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
12776 index 3725f2b0d60b..4b24b25389b5 100644
12777 --- a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
12778 +++ b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
12779 @@ -18,8 +18,8 @@ chosen {
12781         memory@0 {
12782                 device_type = "memory";
12783 -               reg = <0x00000000 0x08000000
12784 -                      0x88000000 0x08000000>;
12785 +               reg = <0x00000000 0x08000000>,
12786 +                     <0x88000000 0x08000000>;
12787         };
12789         gpio-keys {
12790 diff --git a/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts b/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
12791 index 50f7cd08cfbb..a6dc99955e19 100644
12792 --- a/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
12793 +++ b/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
12794 @@ -18,8 +18,8 @@ chosen {
12796         memory@0 {
12797                 device_type = "memory";
12798 -               reg = <0x00000000 0x08000000
12799 -                      0x88000000 0x18000000>;
12800 +               reg = <0x00000000 0x08000000>,
12801 +                     <0x88000000 0x18000000>;
12802         };
12804         leds {
12805 diff --git a/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts b/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
12806 index bcc420f85b56..ff98837bc0db 100644
12807 --- a/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
12808 +++ b/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
12809 @@ -18,8 +18,8 @@ chosen {
12811         memory@0 {
12812                 device_type = "memory";
12813 -               reg = <0x00000000 0x08000000
12814 -                      0x88000000 0x18000000>;
12815 +               reg = <0x00000000 0x08000000>,
12816 +                     <0x88000000 0x18000000>;
12817         };
12819         leds {
12820 diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
12821 index 4f8d777ae18d..452b8d0ab180 100644
12822 --- a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
12823 +++ b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
12824 @@ -18,8 +18,8 @@ chosen {
12826         memory {
12827                 device_type = "memory";
12828 -               reg = <0x00000000 0x08000000
12829 -                      0x88000000 0x18000000>;
12830 +               reg = <0x00000000 0x08000000>,
12831 +                     <0x88000000 0x18000000>;
12832         };
12834         leds {
12835 diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
12836 index e17e9a17fb00..b76bfe6efcd4 100644
12837 --- a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
12838 +++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
12839 @@ -18,8 +18,8 @@ chosen {
12841         memory@0 {
12842                 device_type = "memory";
12843 -               reg = <0x00000000 0x08000000
12844 -                      0x88000000 0x08000000>;
12845 +               reg = <0x00000000 0x08000000>,
12846 +                     <0x88000000 0x08000000>;
12847         };
12849         leds {
12850 diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
12851 index 60cc87ecc7ec..32d5a50578ec 100644
12852 --- a/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
12853 +++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
12854 @@ -18,8 +18,8 @@ chosen {
12856         memory@0 {
12857                 device_type = "memory";
12858 -               reg = <0x00000000 0x08000000
12859 -                      0x88000000 0x18000000>;
12860 +               reg = <0x00000000 0x08000000>,
12861 +                     <0x88000000 0x18000000>;
12862         };
12864         leds {
12865 diff --git a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
12866 index f42a1703f4ab..42097a4c2659 100644
12867 --- a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
12868 +++ b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
12869 @@ -18,8 +18,8 @@ chosen {
12871         memory@0 {
12872                 device_type = "memory";
12873 -               reg = <0x00000000 0x08000000
12874 -                      0x88000000 0x18000000>;
12875 +               reg = <0x00000000 0x08000000>,
12876 +                     <0x88000000 0x18000000>;
12877         };
12879         leds {
12880 diff --git a/arch/arm/boot/dts/bcm47094-phicomm-k3.dts b/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
12881 index ac3a4483dcb3..a2566ad4619c 100644
12882 --- a/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
12883 +++ b/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
12884 @@ -15,8 +15,8 @@ / {
12886         memory@0 {
12887                 device_type = "memory";
12888 -               reg = <0x00000000 0x08000000
12889 -                      0x88000000 0x18000000>;
12890 +               reg = <0x00000000 0x08000000>,
12891 +                     <0x88000000 0x18000000>;
12892         };
12894         gpio-keys {
12895 diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
12896 index 3bf90d9e3335..a294a02f2d23 100644
12897 --- a/arch/arm/boot/dts/dra7-l4.dtsi
12898 +++ b/arch/arm/boot/dts/dra7-l4.dtsi
12899 @@ -1168,7 +1168,7 @@ timer2: timer@0 {
12900                         };
12901                 };
12903 -               target-module@34000 {                   /* 0x48034000, ap 7 46.0 */
12904 +               timer3_target: target-module@34000 {    /* 0x48034000, ap 7 46.0 */
12905                         compatible = "ti,sysc-omap4-timer", "ti,sysc";
12906                         reg = <0x34000 0x4>,
12907                               <0x34010 0x4>;
12908 @@ -1195,7 +1195,7 @@ timer3: timer@0 {
12909                         };
12910                 };
12912 -               target-module@36000 {                   /* 0x48036000, ap 9 4e.0 */
12913 +               timer4_target: target-module@36000 {    /* 0x48036000, ap 9 4e.0 */
12914                         compatible = "ti,sysc-omap4-timer", "ti,sysc";
12915                         reg = <0x36000 0x4>,
12916                               <0x36010 0x4>;
12917 diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
12918 index ce1194744f84..53d68786a61f 100644
12919 --- a/arch/arm/boot/dts/dra7.dtsi
12920 +++ b/arch/arm/boot/dts/dra7.dtsi
12921 @@ -46,6 +46,7 @@ aliases {
12923         timer {
12924                 compatible = "arm,armv7-timer";
12925 +               status = "disabled";    /* See ARM architected timer wrap erratum i940 */
12926                 interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
12927                              <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
12928                              <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
12929 @@ -1241,3 +1242,22 @@ timer@0 {
12930                 assigned-clock-parents = <&sys_32k_ck>;
12931         };
12932  };
12934 +/* Local timers, see ARM architected timer wrap erratum i940 */
12935 +&timer3_target {
12936 +       ti,no-reset-on-init;
12937 +       ti,no-idle;
12938 +       timer@0 {
12939 +               assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER3_CLKCTRL 24>;
12940 +               assigned-clock-parents = <&timer_sys_clk_div>;
12941 +       };
12944 +&timer4_target {
12945 +       ti,no-reset-on-init;
12946 +       ti,no-idle;
12947 +       timer@0 {
12948 +               assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 24>;
12949 +               assigned-clock-parents = <&timer_sys_clk_div>;
12950 +       };
12952 diff --git a/arch/arm/boot/dts/exynos4210-i9100.dts b/arch/arm/boot/dts/exynos4210-i9100.dts
12953 index 304a8ee2364c..d98c78207aaf 100644
12954 --- a/arch/arm/boot/dts/exynos4210-i9100.dts
12955 +++ b/arch/arm/boot/dts/exynos4210-i9100.dts
12956 @@ -136,7 +136,7 @@ battery@36 {
12957                         compatible = "maxim,max17042";
12959                         interrupt-parent = <&gpx2>;
12960 -                       interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
12961 +                       interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
12963                         pinctrl-0 = <&max17042_fuel_irq>;
12964                         pinctrl-names = "default";
12965 diff --git a/arch/arm/boot/dts/exynos4412-midas.dtsi b/arch/arm/boot/dts/exynos4412-midas.dtsi
12966 index 111c32bae02c..fc77c1bfd844 100644
12967 --- a/arch/arm/boot/dts/exynos4412-midas.dtsi
12968 +++ b/arch/arm/boot/dts/exynos4412-midas.dtsi
12969 @@ -173,7 +173,7 @@ i2c_max77693: i2c-gpio-1 {
12970                 pmic@66 {
12971                         compatible = "maxim,max77693";
12972                         interrupt-parent = <&gpx1>;
12973 -                       interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
12974 +                       interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
12975                         pinctrl-names = "default";
12976                         pinctrl-0 = <&max77693_irq>;
12977                         reg = <0x66>;
12978 @@ -221,7 +221,7 @@ i2c_max77693_fuel: i2c-gpio-3 {
12979                 fuel-gauge@36 {
12980                         compatible = "maxim,max17047";
12981                         interrupt-parent = <&gpx2>;
12982 -                       interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
12983 +                       interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
12984                         pinctrl-names = "default";
12985                         pinctrl-0 = <&max77693_fuel_irq>;
12986                         reg = <0x36>;
12987 @@ -665,7 +665,7 @@ &i2c_7 {
12988         max77686: pmic@9 {
12989                 compatible = "maxim,max77686";
12990                 interrupt-parent = <&gpx0>;
12991 -               interrupts = <7 IRQ_TYPE_NONE>;
12992 +               interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
12993                 pinctrl-0 = <&max77686_irq>;
12994                 pinctrl-names = "default";
12995                 reg = <0x09>;
12996 diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
12997 index 2b20d9095d9f..eebe6a3952ce 100644
12998 --- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
12999 +++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
13000 @@ -278,7 +278,7 @@ usb3503: usb-hub@8 {
13001         max77686: pmic@9 {
13002                 compatible = "maxim,max77686";
13003                 interrupt-parent = <&gpx3>;
13004 -               interrupts = <2 IRQ_TYPE_NONE>;
13005 +               interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
13006                 pinctrl-names = "default";
13007                 pinctrl-0 = <&max77686_irq>;
13008                 reg = <0x09>;
13009 diff --git a/arch/arm/boot/dts/exynos4412-p4note.dtsi b/arch/arm/boot/dts/exynos4412-p4note.dtsi
13010 index b2f9d5448a18..9e750890edb8 100644
13011 --- a/arch/arm/boot/dts/exynos4412-p4note.dtsi
13012 +++ b/arch/arm/boot/dts/exynos4412-p4note.dtsi
13013 @@ -146,7 +146,7 @@ fuel-gauge@36 {
13014                         pinctrl-0 = <&fuel_alert_irq>;
13015                         pinctrl-names = "default";
13016                         interrupt-parent = <&gpx2>;
13017 -                       interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
13018 +                       interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
13019                         maxim,rsns-microohm = <10000>;
13020                         maxim,over-heat-temp = <600>;
13021                         maxim,over-volt = <4300>;
13022 @@ -322,7 +322,7 @@ &i2c_7 {
13023         max77686: pmic@9 {
13024                 compatible = "maxim,max77686";
13025                 interrupt-parent = <&gpx0>;
13026 -               interrupts = <7 IRQ_TYPE_NONE>;
13027 +               interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
13028                 pinctrl-0 = <&max77686_irq>;
13029                 pinctrl-names = "default";
13030                 reg = <0x09>;
13031 diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
13032 index 8b5a79a8720c..39bbe18145cf 100644
13033 --- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
13034 +++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
13035 @@ -134,7 +134,7 @@ max77686: pmic@9 {
13036                 compatible = "maxim,max77686";
13037                 reg = <0x09>;
13038                 interrupt-parent = <&gpx3>;
13039 -               interrupts = <2 IRQ_TYPE_NONE>;
13040 +               interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
13041                 pinctrl-names = "default";
13042                 pinctrl-0 = <&max77686_irq>;
13043                 #clock-cells = <1>;
13044 diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
13045 index 6635f6184051..2335c4687349 100644
13046 --- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi
13047 +++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
13048 @@ -292,7 +292,7 @@ &i2c_0 {
13049         max77686: pmic@9 {
13050                 compatible = "maxim,max77686";
13051                 interrupt-parent = <&gpx3>;
13052 -               interrupts = <2 IRQ_TYPE_NONE>;
13053 +               interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
13054                 pinctrl-names = "default";
13055                 pinctrl-0 = <&max77686_irq>;
13056                 wakeup-source;
13057 diff --git a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
13058 index 0cda654371ae..56ee02ceba7d 100644
13059 --- a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
13060 +++ b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
13061 @@ -575,7 +575,7 @@ fuelgauge: max17048@36 {
13062                         maxim,rcomp = /bits/ 8 <0x4d>;
13064                         interrupt-parent = <&msmgpio>;
13065 -                       interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
13066 +                       interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
13068                         pinctrl-names = "default";
13069                         pinctrl-0 = <&fuelgauge_pin>;
13070 diff --git a/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts b/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
13071 index a0f7f461f48c..2dadb836c5fe 100644
13072 --- a/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
13073 +++ b/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
13074 @@ -717,7 +717,7 @@ fuelgauge@36 {
13075                         maxim,rcomp = /bits/ 8 <0x56>;
13077                         interrupt-parent = <&pma8084_gpios>;
13078 -                       interrupts = <21 IRQ_TYPE_EDGE_FALLING>;
13079 +                       interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
13081                         pinctrl-names = "default";
13082                         pinctrl-0 = <&fuelgauge_pin>;
13083 diff --git a/arch/arm/boot/dts/r8a7790-lager.dts b/arch/arm/boot/dts/r8a7790-lager.dts
13084 index 09a152b91557..1d6f0c5d02e9 100644
13085 --- a/arch/arm/boot/dts/r8a7790-lager.dts
13086 +++ b/arch/arm/boot/dts/r8a7790-lager.dts
13087 @@ -53,6 +53,9 @@ aliases {
13088                 i2c11 = &i2cexio1;
13089                 i2c12 = &i2chdmi;
13090                 i2c13 = &i2cpwr;
13091 +               mmc0 = &mmcif1;
13092 +               mmc1 = &sdhi0;
13093 +               mmc2 = &sdhi2;
13094         };
13096         chosen {
13097 diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
13098 index f603cba5441f..6af1727b8269 100644
13099 --- a/arch/arm/boot/dts/r8a7791-koelsch.dts
13100 +++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
13101 @@ -53,6 +53,9 @@ aliases {
13102                 i2c12 = &i2cexio1;
13103                 i2c13 = &i2chdmi;
13104                 i2c14 = &i2cexio4;
13105 +               mmc0 = &sdhi0;
13106 +               mmc1 = &sdhi1;
13107 +               mmc2 = &sdhi2;
13108         };
13110         chosen {
13111 diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
13112 index c6d563fb7ec7..bf51e29c793a 100644
13113 --- a/arch/arm/boot/dts/r8a7791-porter.dts
13114 +++ b/arch/arm/boot/dts/r8a7791-porter.dts
13115 @@ -28,6 +28,8 @@ aliases {
13116                 serial0 = &scif0;
13117                 i2c9 = &gpioi2c2;
13118                 i2c10 = &i2chdmi;
13119 +               mmc0 = &sdhi0;
13120 +               mmc1 = &sdhi2;
13121         };
13123         chosen {
13124 diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts
13125 index abf487e8fe0f..2b59a0491350 100644
13126 --- a/arch/arm/boot/dts/r8a7793-gose.dts
13127 +++ b/arch/arm/boot/dts/r8a7793-gose.dts
13128 @@ -49,6 +49,9 @@ aliases {
13129                 i2c10 = &gpioi2c4;
13130                 i2c11 = &i2chdmi;
13131                 i2c12 = &i2cexio4;
13132 +               mmc0 = &sdhi0;
13133 +               mmc1 = &sdhi1;
13134 +               mmc2 = &sdhi2;
13135         };
13137         chosen {
13138 diff --git a/arch/arm/boot/dts/r8a7794-alt.dts b/arch/arm/boot/dts/r8a7794-alt.dts
13139 index 3f1cc5bbf329..32025986b3b9 100644
13140 --- a/arch/arm/boot/dts/r8a7794-alt.dts
13141 +++ b/arch/arm/boot/dts/r8a7794-alt.dts
13142 @@ -19,6 +19,9 @@ aliases {
13143                 i2c10 = &gpioi2c4;
13144                 i2c11 = &i2chdmi;
13145                 i2c12 = &i2cexio4;
13146 +               mmc0 = &mmcif0;
13147 +               mmc1 = &sdhi0;
13148 +               mmc2 = &sdhi1;
13149         };
13151         chosen {
13152 diff --git a/arch/arm/boot/dts/r8a7794-silk.dts b/arch/arm/boot/dts/r8a7794-silk.dts
13153 index 677596f6c9c9..af066ee5e275 100644
13154 --- a/arch/arm/boot/dts/r8a7794-silk.dts
13155 +++ b/arch/arm/boot/dts/r8a7794-silk.dts
13156 @@ -31,6 +31,8 @@ aliases {
13157                 serial0 = &scif2;
13158                 i2c9 = &gpioi2c1;
13159                 i2c10 = &i2chdmi;
13160 +               mmc0 = &mmcif0;
13161 +               mmc1 = &sdhi1;
13162         };
13164         chosen {
13165 diff --git a/arch/arm/boot/dts/s5pv210-fascinate4g.dts b/arch/arm/boot/dts/s5pv210-fascinate4g.dts
13166 index ca064359dd30..b47d8300e536 100644
13167 --- a/arch/arm/boot/dts/s5pv210-fascinate4g.dts
13168 +++ b/arch/arm/boot/dts/s5pv210-fascinate4g.dts
13169 @@ -115,7 +115,7 @@ &fg {
13170         compatible = "maxim,max77836-battery";
13172         interrupt-parent = <&gph3>;
13173 -       interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
13174 +       interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
13176         pinctrl-names = "default";
13177         pinctrl-0 = <&fg_irq>;
13178 diff --git a/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
13179 index cb3677f0a1cb..b580397ede83 100644
13180 --- a/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
13181 +++ b/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
13182 @@ -8,37 +8,43 @@
13183  / {
13184         soc {
13185                 i2c@80128000 {
13186 -                       /* Marked:
13187 -                        * 129
13188 -                        * M35
13189 -                        * L3GD20
13190 -                        */
13191 -                       l3gd20@6a {
13192 -                               /* Gyroscope */
13193 -                               compatible = "st,l3gd20";
13194 -                               status = "disabled";
13195 +                       accelerometer@19 {
13196 +                               compatible = "st,lsm303dlhc-accel";
13197                                 st,drdy-int-pin = <1>;
13198 -                               drive-open-drain;
13199 -                               reg = <0x6a>; // 0x6a or 0x6b
13200 +                               reg = <0x19>;
13201                                 vdd-supply = <&ab8500_ldo_aux1_reg>;
13202                                 vddio-supply = <&db8500_vsmps2_reg>;
13203 +                               interrupt-parent = <&gpio2>;
13204 +                               interrupts = <18 IRQ_TYPE_EDGE_RISING>,
13205 +                                            <19 IRQ_TYPE_EDGE_RISING>;
13206 +                               pinctrl-names = "default";
13207 +                               pinctrl-0 = <&accel_tvk_mode>;
13208                         };
13209 -                       /*
13210 -                        * Marked:
13211 -                        * 2122
13212 -                        * C3H
13213 -                        * DQEEE
13214 -                        * LIS3DH?
13215 -                        */
13216 -                       lis3dh@18 {
13217 -                               /* Accelerometer */
13218 -                               compatible = "st,lis3dh-accel";
13219 +                       magnetometer@1e {
13220 +                               compatible = "st,lsm303dlm-magn";
13221                                 st,drdy-int-pin = <1>;
13222 -                               reg = <0x18>;
13223 +                               reg = <0x1e>;
13224                                 vdd-supply = <&ab8500_ldo_aux1_reg>;
13225                                 vddio-supply = <&db8500_vsmps2_reg>;
13226 +                               // This interrupt is not properly working with the driver
13227 +                               // interrupt-parent = <&gpio1>;
13228 +                               // interrupts = <0 IRQ_TYPE_EDGE_RISING>;
13229                                 pinctrl-names = "default";
13230 -                               pinctrl-0 = <&accel_tvk_mode>;
13231 +                               pinctrl-0 = <&magn_tvk_mode>;
13232 +                       };
13233 +                       gyroscope@68 {
13234 +                               /* Gyroscope */
13235 +                               compatible = "st,l3g4200d-gyro";
13236 +                               reg = <0x68>;
13237 +                               vdd-supply = <&ab8500_ldo_aux1_reg>;
13238 +                               vddio-supply = <&db8500_vsmps2_reg>;
13239 +                       };
13240 +                       pressure@5c {
13241 +                               /* Barometer/pressure sensor */
13242 +                               compatible = "st,lps001wp-press";
13243 +                               reg = <0x5c>;
13244 +                               vdd-supply = <&ab8500_ldo_aux1_reg>;
13245 +                               vddio-supply = <&db8500_vsmps2_reg>;
13246                         };
13247                 };
13249 @@ -54,5 +60,26 @@ panel {
13250                                 };
13251                         };
13252                 };
13254 +               pinctrl {
13255 +                       accelerometer {
13256 +                               accel_tvk_mode: accel_tvk {
13257 +                                       /* Accelerometer interrupt lines 1 & 2 */
13258 +                                       tvk_cfg {
13259 +                                               pins = "GPIO82_C1", "GPIO83_D3";
13260 +                                               ste,config = <&gpio_in_pd>;
13261 +                                       };
13262 +                               };
13263 +                       };
13264 +                       magnetometer {
13265 +                               magn_tvk_mode: magn_tvk {
13266 +                                       /* GPIO 32 used for DRDY, pull this down */
13267 +                                       tvk_cfg {
13268 +                                               pins = "GPIO32_V2";
13269 +                                               ste,config = <&gpio_in_pd>;
13270 +                                       };
13271 +                               };
13272 +                       };
13273 +               };
13274         };
13275  };
13276 diff --git a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
13277 index 7b4249ed1983..060baa8b7e9d 100644
13278 --- a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
13279 +++ b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
13280 @@ -1891,10 +1891,15 @@ pins2 {
13281         usart2_idle_pins_c: usart2-idle-2 {
13282                 pins1 {
13283                         pinmux = <STM32_PINMUX('D', 5, ANALOG)>, /* USART2_TX */
13284 -                                <STM32_PINMUX('D', 4, ANALOG)>, /* USART2_RTS */
13285                                  <STM32_PINMUX('D', 3, ANALOG)>; /* USART2_CTS_NSS */
13286                 };
13287                 pins2 {
13288 +                       pinmux = <STM32_PINMUX('D', 4, AF7)>; /* USART2_RTS */
13289 +                       bias-disable;
13290 +                       drive-push-pull;
13291 +                       slew-rate = <3>;
13292 +               };
13293 +               pins3 {
13294                         pinmux = <STM32_PINMUX('D', 6, AF7)>; /* USART2_RX */
13295                         bias-disable;
13296                 };
13297 @@ -1940,10 +1945,15 @@ pins2 {
13298         usart3_idle_pins_b: usart3-idle-1 {
13299                 pins1 {
13300                         pinmux = <STM32_PINMUX('B', 10, ANALOG)>, /* USART3_TX */
13301 -                                <STM32_PINMUX('G', 8, ANALOG)>, /* USART3_RTS */
13302                                  <STM32_PINMUX('I', 10, ANALOG)>; /* USART3_CTS_NSS */
13303                 };
13304                 pins2 {
13305 +                       pinmux = <STM32_PINMUX('G', 8, AF8)>; /* USART3_RTS */
13306 +                       bias-disable;
13307 +                       drive-push-pull;
13308 +                       slew-rate = <0>;
13309 +               };
13310 +               pins3 {
13311                         pinmux = <STM32_PINMUX('B', 12, AF8)>; /* USART3_RX */
13312                         bias-disable;
13313                 };
13314 @@ -1976,10 +1986,15 @@ pins2 {
13315         usart3_idle_pins_c: usart3-idle-2 {
13316                 pins1 {
13317                         pinmux = <STM32_PINMUX('B', 10, ANALOG)>, /* USART3_TX */
13318 -                                <STM32_PINMUX('G', 8, ANALOG)>, /* USART3_RTS */
13319                                  <STM32_PINMUX('B', 13, ANALOG)>; /* USART3_CTS_NSS */
13320                 };
13321                 pins2 {
13322 +                       pinmux = <STM32_PINMUX('G', 8, AF8)>; /* USART3_RTS */
13323 +                       bias-disable;
13324 +                       drive-push-pull;
13325 +                       slew-rate = <0>;
13326 +               };
13327 +               pins3 {
13328                         pinmux = <STM32_PINMUX('B', 12, AF8)>; /* USART3_RX */
13329                         bias-disable;
13330                 };
13331 diff --git a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
13332 index d3b99535d755..f9c0f6884cc1 100644
13333 --- a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
13334 +++ b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
13335 @@ -448,7 +448,7 @@ touchscreen@4c {
13337                         reset-gpios = <&gpio TEGRA_GPIO(Q, 7) GPIO_ACTIVE_LOW>;
13339 -                       avdd-supply = <&vdd_3v3_sys>;
13340 +                       vdda-supply = <&vdd_3v3_sys>;
13341                         vdd-supply  = <&vdd_3v3_sys>;
13342                 };
13344 diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi
13345 index b0b15c97306b..e81e5937a60a 100644
13346 --- a/arch/arm/boot/dts/uniphier-pxs2.dtsi
13347 +++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi
13348 @@ -583,7 +583,7 @@ eth: ethernet@65000000 {
13349                         clocks = <&sys_clk 6>;
13350                         reset-names = "ether";
13351                         resets = <&sys_rst 6>;
13352 -                       phy-mode = "rgmii";
13353 +                       phy-mode = "rgmii-id";
13354                         local-mac-address = [00 00 00 00 00 00];
13355                         socionext,syscon-phy-mode = <&soc_glue 0>;
13357 diff --git a/arch/arm/crypto/blake2s-core.S b/arch/arm/crypto/blake2s-core.S
13358 index bed897e9a181..86345751bbf3 100644
13359 --- a/arch/arm/crypto/blake2s-core.S
13360 +++ b/arch/arm/crypto/blake2s-core.S
13361 @@ -8,6 +8,7 @@
13362   */
13364  #include <linux/linkage.h>
13365 +#include <asm/assembler.h>
13367         // Registers used to hold message words temporarily.  There aren't
13368         // enough ARM registers to hold the whole message block, so we have to
13369 @@ -38,6 +39,23 @@
13370  #endif
13371  .endm
13373 +.macro _le32_bswap     a, tmp
13374 +#ifdef __ARMEB__
13375 +       rev_l           \a, \tmp
13376 +#endif
13377 +.endm
13379 +.macro _le32_bswap_8x  a, b, c, d, e, f, g, h,  tmp
13380 +       _le32_bswap     \a, \tmp
13381 +       _le32_bswap     \b, \tmp
13382 +       _le32_bswap     \c, \tmp
13383 +       _le32_bswap     \d, \tmp
13384 +       _le32_bswap     \e, \tmp
13385 +       _le32_bswap     \f, \tmp
13386 +       _le32_bswap     \g, \tmp
13387 +       _le32_bswap     \h, \tmp
13388 +.endm
13390  // Execute a quarter-round of BLAKE2s by mixing two columns or two diagonals.
13391  // (a0, b0, c0, d0) and (a1, b1, c1, d1) give the registers containing the two
13392  // columns/diagonals.  s0-s1 are the word offsets to the message words the first
13393 @@ -180,8 +198,10 @@ ENTRY(blake2s_compress_arch)
13394         tst             r1, #3
13395         bne             .Lcopy_block_misaligned
13396         ldmia           r1!, {r2-r9}
13397 +       _le32_bswap_8x  r2, r3, r4, r5, r6, r7, r8, r9,  r14
13398         stmia           r12!, {r2-r9}
13399         ldmia           r1!, {r2-r9}
13400 +       _le32_bswap_8x  r2, r3, r4, r5, r6, r7, r8, r9,  r14
13401         stmia           r12, {r2-r9}
13402  .Lcopy_block_done:
13403         str             r1, [sp, #68]           // Update message pointer
13404 @@ -268,6 +288,7 @@ ENTRY(blake2s_compress_arch)
13405  1:
13406  #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13407         ldr             r3, [r1], #4
13408 +       _le32_bswap     r3, r4
13409  #else
13410         ldrb            r3, [r1, #0]
13411         ldrb            r4, [r1, #1]
13412 diff --git a/arch/arm/crypto/curve25519-core.S b/arch/arm/crypto/curve25519-core.S
13413 index be18af52e7dc..b697fa5d059a 100644
13414 --- a/arch/arm/crypto/curve25519-core.S
13415 +++ b/arch/arm/crypto/curve25519-core.S
13416 @@ -10,8 +10,8 @@
13417  #include <linux/linkage.h>
13419  .text
13420 -.fpu neon
13421  .arch armv7-a
13422 +.fpu neon
13423  .align 4
13425  ENTRY(curve25519_neon)
13426 diff --git a/arch/arm/crypto/poly1305-glue.c b/arch/arm/crypto/poly1305-glue.c
13427 index 3023c1acfa19..c31bd8f7c092 100644
13428 --- a/arch/arm/crypto/poly1305-glue.c
13429 +++ b/arch/arm/crypto/poly1305-glue.c
13430 @@ -29,7 +29,7 @@ void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit)
13432  static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
13434 -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
13435 +void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
13437         poly1305_init_arm(&dctx->h, key);
13438         dctx->s[0] = get_unaligned_le32(key + 16);
13439 diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
13440 index be8050b0c3df..70993af22d80 100644
13441 --- a/arch/arm/kernel/asm-offsets.c
13442 +++ b/arch/arm/kernel/asm-offsets.c
13443 @@ -24,6 +24,7 @@
13444  #include <asm/vdso_datapage.h>
13445  #include <asm/hardware/cache-l2x0.h>
13446  #include <linux/kbuild.h>
13447 +#include <linux/arm-smccc.h>
13448  #include "signal.h"
13450  /*
13451 @@ -148,6 +149,8 @@ int main(void)
13452    DEFINE(SLEEP_SAVE_SP_PHYS,   offsetof(struct sleep_save_sp, save_ptr_stash_phys));
13453    DEFINE(SLEEP_SAVE_SP_VIRT,   offsetof(struct sleep_save_sp, save_ptr_stash));
13454  #endif
13455 +  DEFINE(ARM_SMCCC_QUIRK_ID_OFFS,      offsetof(struct arm_smccc_quirk, id));
13456 +  DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS,   offsetof(struct arm_smccc_quirk, state));
13457    BLANK();
13458    DEFINE(DMA_BIDIRECTIONAL,    DMA_BIDIRECTIONAL);
13459    DEFINE(DMA_TO_DEVICE,                DMA_TO_DEVICE);
13460 diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
13461 index 08660ae9dcbc..b1423fb130ea 100644
13462 --- a/arch/arm/kernel/hw_breakpoint.c
13463 +++ b/arch/arm/kernel/hw_breakpoint.c
13464 @@ -886,7 +886,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
13465                         info->trigger = addr;
13466                         pr_debug("breakpoint fired: address = 0x%x\n", addr);
13467                         perf_bp_event(bp, regs);
13468 -                       if (!bp->overflow_handler)
13469 +                       if (is_default_overflow_handler(bp))
13470                                 enable_single_step(bp, addr);
13471                         goto unlock;
13472                 }
13473 diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S
13474 index 00664c78faca..931df62a7831 100644
13475 --- a/arch/arm/kernel/smccc-call.S
13476 +++ b/arch/arm/kernel/smccc-call.S
13477 @@ -3,7 +3,9 @@
13478   * Copyright (c) 2015, Linaro Limited
13479   */
13480  #include <linux/linkage.h>
13481 +#include <linux/arm-smccc.h>
13483 +#include <asm/asm-offsets.h>
13484  #include <asm/opcodes-sec.h>
13485  #include <asm/opcodes-virt.h>
13486  #include <asm/unwind.h>
13487 @@ -27,7 +29,14 @@ UNWIND(      .fnstart)
13488  UNWIND(        .save   {r4-r7})
13489         ldm     r12, {r4-r7}
13490         \instr
13491 -       pop     {r4-r7}
13492 +       ldr     r4, [sp, #36]
13493 +       cmp     r4, #0
13494 +       beq     1f                      // No quirk structure
13495 +       ldr     r5, [r4, #ARM_SMCCC_QUIRK_ID_OFFS]
13496 +       cmp     r5, #ARM_SMCCC_QUIRK_QCOM_A6
13497 +       bne     1f                      // No quirk present
13498 +       str     r6, [r4, #ARM_SMCCC_QUIRK_STATE_OFFS]
13499 +1:     pop     {r4-r7}
13500         ldr     r12, [sp, #(4 * 4)]
13501         stm     r12, {r0-r3}
13502         bx      lr
13503 diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
13504 index 24bd20564be7..43f0a3ebf390 100644
13505 --- a/arch/arm/kernel/suspend.c
13506 +++ b/arch/arm/kernel/suspend.c
13507 @@ -1,4 +1,5 @@
13508  // SPDX-License-Identifier: GPL-2.0
13509 +#include <linux/ftrace.h>
13510  #include <linux/init.h>
13511  #include <linux/slab.h>
13512  #include <linux/mm_types.h>
13513 @@ -25,6 +26,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
13514         if (!idmap_pgd)
13515                 return -EINVAL;
13517 +       /*
13518 +        * Function graph tracer state gets incosistent when the kernel
13519 +        * calls functions that never return (aka suspend finishers) hence
13520 +        * disable graph tracing during their execution.
13521 +        */
13522 +       pause_graph_tracing();
13524         /*
13525          * Provide a temporary page table with an identity mapping for
13526          * the MMU-enable code, required for resuming.  On successful
13527 @@ -32,6 +40,9 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
13528          * back to the correct page tables.
13529          */
13530         ret = __cpu_suspend(arg, fn, __mpidr);
13532 +       unpause_graph_tracing();
13534         if (ret == 0) {
13535                 cpu_switch_mm(mm->pgd, mm);
13536                 local_flush_bp_all();
13537 @@ -45,7 +56,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
13538  int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
13540         u32 __mpidr = cpu_logical_map(smp_processor_id());
13541 -       return __cpu_suspend(arg, fn, __mpidr);
13542 +       int ret;
13544 +       pause_graph_tracing();
13545 +       ret = __cpu_suspend(arg, fn, __mpidr);
13546 +       unpause_graph_tracing();
13548 +       return ret;
13550  #define        idmap_pgd       NULL
13551  #endif
13552 diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
13553 index dcc1191291a2..24a700535747 100644
13554 --- a/arch/arm/tools/syscall.tbl
13555 +++ b/arch/arm/tools/syscall.tbl
13556 @@ -456,3 +456,7 @@
13557  440    common  process_madvise                 sys_process_madvise
13558  441    common  epoll_pwait2                    sys_epoll_pwait2
13559  442    common  mount_setattr                   sys_mount_setattr
13560 +443    common  futex_wait                      sys_futex_wait
13561 +444    common  futex_wake                      sys_futex_wake
13562 +445    common  futex_waitv                     sys_futex_waitv
13563 +446    common  futex_requeue                   sys_futex_requeue
13564 diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts
13565 index 6e4ad66ff536..8d5d368dbe90 100644
13566 --- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts
13567 +++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts
13568 @@ -65,6 +65,7 @@ port@3 {
13569         port@7 {
13570                 label = "sw";
13571                 reg = <7>;
13572 +               phy-mode = "rgmii";
13574                 fixed-link {
13575                         speed = <1000>;
13576 diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
13577 index 9354077f74cd..9e799328c6db 100644
13578 --- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
13579 +++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
13580 @@ -131,7 +131,7 @@ usb@d000 {
13581                         status = "disabled";
13582                 };
13584 -               ethernet-switch@80000 {
13585 +               bus@80000 {
13586                         compatible = "simple-bus";
13587                         #size-cells = <1>;
13588                         #address-cells = <1>;
13589 diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
13590 index 0d38327043f8..cd3c3edd48fa 100644
13591 --- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
13592 +++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
13593 @@ -28,6 +28,10 @@ &bq25895 {
13594         ti,termination-current = <144000>;  /* uA */
13595  };
13597 +&buck3_reg {
13598 +       regulator-always-on;
13601  &proximity {
13602         proximity-near-level = <25>;
13603  };
13604 diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
13605 index 7a2df148c6a3..456dcd4a7793 100644
13606 --- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
13607 +++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
13608 @@ -156,7 +156,8 @@ uart1: serial@12200 {
13609                         };
13611                         nb_periph_clk: nb-periph-clk@13000 {
13612 -                               compatible = "marvell,armada-3700-periph-clock-nb";
13613 +                               compatible = "marvell,armada-3700-periph-clock-nb",
13614 +                                            "syscon";
13615                                 reg = <0x13000 0x100>;
13616                                 clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
13617                                 <&tbg 3>, <&xtalclk>;
13618 diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
13619 index 6dffada2e66b..28aa634c9780 100644
13620 --- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
13621 +++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
13622 @@ -294,7 +294,7 @@ &pwm0 {
13624  &pwrap {
13625         /* Only MT8173 E1 needs USB power domain */
13626 -       power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
13627 +       power-domains = <&spm MT8173_POWER_DOMAIN_USB>;
13629         pmic: mt6397 {
13630                 compatible = "mediatek,mt6397";
13631 diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
13632 index 7fa870e4386a..ecb37a7e6870 100644
13633 --- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
13634 +++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
13635 @@ -1235,7 +1235,7 @@ dsi1: dsi@1401c000 {
13636                                  <&mmsys CLK_MM_DSI1_DIGITAL>,
13637                                  <&mipi_tx1>;
13638                         clock-names = "engine", "digital", "hs";
13639 -                       phy = <&mipi_tx1>;
13640 +                       phys = <&mipi_tx1>;
13641                         phy-names = "dphy";
13642                         status = "disabled";
13643                 };
13644 diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
13645 index 80519a145f13..16f4b1fc0fb9 100644
13646 --- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
13647 +++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
13648 @@ -983,6 +983,9 @@ mmsys: syscon@14000000 {
13649                         compatible = "mediatek,mt8183-mmsys", "syscon";
13650                         reg = <0 0x14000000 0 0x1000>;
13651                         #clock-cells = <1>;
13652 +                       mboxes = <&gce 0 CMDQ_THR_PRIO_HIGHEST>,
13653 +                                <&gce 1 CMDQ_THR_PRIO_HIGHEST>;
13654 +                       mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0 0x1000>;
13655                 };
13657                 ovl0: ovl@14008000 {
13658 @@ -1058,6 +1061,7 @@ ccorr0: ccorr@1400f000 {
13659                         interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_LOW>;
13660                         power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
13661                         clocks = <&mmsys CLK_MM_DISP_CCORR0>;
13662 +                       mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0xf000 0x1000>;
13663                 };
13665                 aal0: aal@14010000 {
13666 @@ -1067,6 +1071,7 @@ aal0: aal@14010000 {
13667                         interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_LOW>;
13668                         power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
13669                         clocks = <&mmsys CLK_MM_DISP_AAL0>;
13670 +                       mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0 0x1000>;
13671                 };
13673                 gamma0: gamma@14011000 {
13674 @@ -1075,6 +1080,7 @@ gamma0: gamma@14011000 {
13675                         interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_LOW>;
13676                         power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
13677                         clocks = <&mmsys CLK_MM_DISP_GAMMA0>;
13678 +                       mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x1000 0x1000>;
13679                 };
13681                 dither0: dither@14012000 {
13682 @@ -1083,6 +1089,7 @@ dither0: dither@14012000 {
13683                         interrupts = <GIC_SPI 235 IRQ_TYPE_LEVEL_LOW>;
13684                         power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
13685                         clocks = <&mmsys CLK_MM_DISP_DITHER0>;
13686 +                       mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x2000 0x1000>;
13687                 };
13689                 dsi0: dsi@14014000 {
13690 diff --git a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
13691 index 63fd70086bb8..9f27e7ed5e22 100644
13692 --- a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
13693 +++ b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
13694 @@ -56,7 +56,7 @@ &i2c0 {
13695         tca6416: gpio@20 {
13696                 compatible = "ti,tca6416";
13697                 reg = <0x20>;
13698 -               reset-gpios = <&pio 65 GPIO_ACTIVE_HIGH>;
13699 +               reset-gpios = <&pio 65 GPIO_ACTIVE_LOW>;
13700                 pinctrl-names = "default";
13701                 pinctrl-0 = <&tca6416_pins>;
13703 diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
13704 index 07c8b2c926c0..b8f7cf5cbdab 100644
13705 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
13706 +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
13707 @@ -22,9 +22,11 @@ charger-thermal {
13708                         thermal-sensors = <&pm6150_adc_tm 1>;
13710                         trips {
13711 -                               temperature = <125000>;
13712 -                               hysteresis = <1000>;
13713 -                               type = "critical";
13714 +                               charger-crit {
13715 +                                       temperature = <125000>;
13716 +                                       hysteresis = <1000>;
13717 +                                       type = "critical";
13718 +                               };
13719                         };
13720                 };
13721         };
13722 @@ -768,17 +770,17 @@ &sdhc_2 {
13723  };
13725  &spi0 {
13726 -       pinctrl-0 = <&qup_spi0_cs_gpio>;
13727 +       pinctrl-0 = <&qup_spi0_cs_gpio_init_high>, <&qup_spi0_cs_gpio>;
13728         cs-gpios = <&tlmm 37 GPIO_ACTIVE_LOW>;
13729  };
13731  &spi6 {
13732 -       pinctrl-0 = <&qup_spi6_cs_gpio>;
13733 +       pinctrl-0 = <&qup_spi6_cs_gpio_init_high>, <&qup_spi6_cs_gpio>;
13734         cs-gpios = <&tlmm 62 GPIO_ACTIVE_LOW>;
13735  };
13737  ap_spi_fp: &spi10 {
13738 -       pinctrl-0 = <&qup_spi10_cs_gpio>;
13739 +       pinctrl-0 = <&qup_spi10_cs_gpio_init_high>, <&qup_spi10_cs_gpio>;
13740         cs-gpios = <&tlmm 89 GPIO_ACTIVE_LOW>;
13742         cros_ec_fp: ec@0 {
13743 @@ -1339,6 +1341,27 @@ pinconf {
13744                 };
13745         };
13747 +       qup_spi0_cs_gpio_init_high: qup-spi0-cs-gpio-init-high {
13748 +               pinconf {
13749 +                       pins = "gpio37";
13750 +                       output-high;
13751 +               };
13752 +       };
13754 +       qup_spi6_cs_gpio_init_high: qup-spi6-cs-gpio-init-high {
13755 +               pinconf {
13756 +                       pins = "gpio62";
13757 +                       output-high;
13758 +               };
13759 +       };
13761 +       qup_spi10_cs_gpio_init_high: qup-spi10-cs-gpio-init-high {
13762 +               pinconf {
13763 +                       pins = "gpio89";
13764 +                       output-high;
13765 +               };
13766 +       };
13768         qup_uart3_sleep: qup-uart3-sleep {
13769                 pinmux {
13770                         pins = "gpio38", "gpio39",
13771 diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
13772 index c4ac6f5dc008..96d36b38f269 100644
13773 --- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
13774 +++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
13775 @@ -1015,7 +1015,7 @@ swm: swm@c85 {
13776                 left_spkr: wsa8810-left{
13777                         compatible = "sdw10217201000";
13778                         reg = <0 1>;
13779 -                       powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
13780 +                       powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
13781                         #thermal-sensor-cells = <0>;
13782                         sound-name-prefix = "SpkrLeft";
13783                         #sound-dai-cells = <0>;
13784 @@ -1023,7 +1023,7 @@ left_spkr: wsa8810-left{
13786                 right_spkr: wsa8810-right{
13787                         compatible = "sdw10217201000";
13788 -                       powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
13789 +                       powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
13790                         reg = <0 2>;
13791                         #thermal-sensor-cells = <0>;
13792                         sound-name-prefix = "SpkrRight";
13793 diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
13794 index 454f794af547..6a2ed02d383d 100644
13795 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
13796 +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
13797 @@ -2382,7 +2382,7 @@ tlmm: pinctrl@3400000 {
13798                         #gpio-cells = <2>;
13799                         interrupt-controller;
13800                         #interrupt-cells = <2>;
13801 -                       gpio-ranges = <&tlmm 0 0 150>;
13802 +                       gpio-ranges = <&tlmm 0 0 151>;
13803                         wakeup-parent = <&pdc_intc>;
13805                         cci0_default: cci0-default {
13806 diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
13807 index e5bb17bc2f46..778613d3410b 100644
13808 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
13809 +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
13810 @@ -914,7 +914,7 @@ tlmm: pinctrl@3100000 {
13811                               <0x0 0x03D00000 0x0 0x300000>;
13812                         reg-names = "west", "east", "north", "south";
13813                         interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
13814 -                       gpio-ranges = <&tlmm 0 0 175>;
13815 +                       gpio-ranges = <&tlmm 0 0 176>;
13816                         gpio-controller;
13817                         #gpio-cells = <2>;
13818                         interrupt-controller;
13819 diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
13820 index 947e1accae3a..46a6c18cea91 100644
13821 --- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
13822 +++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
13823 @@ -279,7 +279,7 @@ mmcx_reg: mmcx-reg {
13825         pmu {
13826                 compatible = "arm,armv8-pmuv3";
13827 -               interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
13828 +               interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
13829         };
13831         psci {
13832 @@ -2327,10 +2327,9 @@ mdss: mdss@ae00000 {
13833                         reg = <0 0x0ae00000 0 0x1000>;
13834                         reg-names = "mdss";
13836 -                       interconnects = <&gem_noc MASTER_AMPSS_M0 &config_noc SLAVE_DISPLAY_CFG>,
13837 -                                       <&mmss_noc MASTER_MDP_PORT0 &mc_virt SLAVE_EBI_CH0>,
13838 +                       interconnects = <&mmss_noc MASTER_MDP_PORT0 &mc_virt SLAVE_EBI_CH0>,
13839                                         <&mmss_noc MASTER_MDP_PORT1 &mc_virt SLAVE_EBI_CH0>;
13840 -                       interconnect-names = "notused", "mdp0-mem", "mdp1-mem";
13841 +                       interconnect-names = "mdp0-mem", "mdp1-mem";
13843                         power-domains = <&dispcc MDSS_GDSC>;
13845 @@ -2580,7 +2579,7 @@ opp-358000000 {
13847                 dispcc: clock-controller@af00000 {
13848                         compatible = "qcom,sm8250-dispcc";
13849 -                       reg = <0 0x0af00000 0 0x20000>;
13850 +                       reg = <0 0x0af00000 0 0x10000>;
13851                         mmcx-supply = <&mmcx_reg>;
13852                         clocks = <&rpmhcc RPMH_CXO_CLK>,
13853                                  <&dsi0_phy 0>,
13854 @@ -2588,28 +2587,14 @@ dispcc: clock-controller@af00000 {
13855                                  <&dsi1_phy 0>,
13856                                  <&dsi1_phy 1>,
13857                                  <0>,
13858 -                                <0>,
13859 -                                <0>,
13860 -                                <0>,
13861 -                                <0>,
13862 -                                <0>,
13863 -                                <0>,
13864 -                                <0>,
13865 -                                <&sleep_clk>;
13866 +                                <0>;
13867                         clock-names = "bi_tcxo",
13868                                       "dsi0_phy_pll_out_byteclk",
13869                                       "dsi0_phy_pll_out_dsiclk",
13870                                       "dsi1_phy_pll_out_byteclk",
13871                                       "dsi1_phy_pll_out_dsiclk",
13872 -                                     "dp_link_clk_divsel_ten",
13873 -                                     "dp_vco_divided_clk_src_mux",
13874 -                                     "dptx1_phy_pll_link_clk",
13875 -                                     "dptx1_phy_pll_vco_div_clk",
13876 -                                     "dptx2_phy_pll_link_clk",
13877 -                                     "dptx2_phy_pll_vco_div_clk",
13878 -                                     "edp_phy_pll_link_clk",
13879 -                                     "edp_phy_pll_vco_div_clk",
13880 -                                     "sleep_clk";
13881 +                                     "dp_phy_pll_link_clk",
13882 +                                     "dp_phy_pll_vco_div_clk";
13883                         #clock-cells = <1>;
13884                         #reset-cells = <1>;
13885                         #power-domain-cells = <1>;
13886 @@ -2689,7 +2674,7 @@ tlmm: pinctrl@f100000 {
13887                         #gpio-cells = <2>;
13888                         interrupt-controller;
13889                         #interrupt-cells = <2>;
13890 -                       gpio-ranges = <&tlmm 0 0 180>;
13891 +                       gpio-ranges = <&tlmm 0 0 181>;
13892                         wakeup-parent = <&pdc>;
13894                         pri_mi2s_active: pri-mi2s-active {
13895 @@ -3754,7 +3739,7 @@ timer {
13896                                 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
13897                              <GIC_PPI 11
13898                                 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
13899 -                            <GIC_PPI 12
13900 +                            <GIC_PPI 10
13901                                 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
13902         };
13904 diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
13905 index 5ef460458f5c..e2fca420e518 100644
13906 --- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
13907 +++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
13908 @@ -153,7 +153,7 @@ memory@80000000 {
13910         pmu {
13911                 compatible = "arm,armv8-pmuv3";
13912 -               interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
13913 +               interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
13914         };
13916         psci {
13917 @@ -382,7 +382,7 @@ tlmm: pinctrl@f100000 {
13918                         #gpio-cells = <2>;
13919                         interrupt-controller;
13920                         #interrupt-cells = <2>;
13921 -                       gpio-ranges = <&tlmm 0 0 203>;
13922 +                       gpio-ranges = <&tlmm 0 0 204>;
13924                         qup_uart3_default_state: qup-uart3-default-state {
13925                                 rx {
13926 diff --git a/arch/arm64/boot/dts/renesas/hihope-common.dtsi b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
13927 index 7a3da9b06f67..0c7e6f790590 100644
13928 --- a/arch/arm64/boot/dts/renesas/hihope-common.dtsi
13929 +++ b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
13930 @@ -12,6 +12,9 @@ / {
13931         aliases {
13932                 serial0 = &scif2;
13933                 serial1 = &hscif0;
13934 +               mmc0 = &sdhi3;
13935 +               mmc1 = &sdhi0;
13936 +               mmc2 = &sdhi2;
13937         };
13939         chosen {
13940 diff --git a/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts b/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
13941 index 501cb05da228..3cf2e076940f 100644
13942 --- a/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
13943 +++ b/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
13944 @@ -21,6 +21,9 @@ aliases {
13945                 serial4 = &hscif2;
13946                 serial5 = &scif5;
13947                 ethernet0 = &avb;
13948 +               mmc0 = &sdhi3;
13949 +               mmc1 = &sdhi0;
13950 +               mmc2 = &sdhi2;
13951         };
13953         chosen {
13954 diff --git a/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts b/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts
13955 index 71763f4402a7..3c0d59def8ee 100644
13956 --- a/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts
13957 +++ b/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts
13958 @@ -22,6 +22,9 @@ aliases {
13959                 serial5 = &scif5;
13960                 serial6 = &scif4;
13961                 ethernet0 = &avb;
13962 +               mmc0 = &sdhi3;
13963 +               mmc1 = &sdhi0;
13964 +               mmc2 = &sdhi2;
13965         };
13967         chosen {
13968 diff --git a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
13969 index ea87cb5a459c..33257c6440b2 100644
13970 --- a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
13971 +++ b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
13972 @@ -17,6 +17,8 @@ / {
13973         aliases {
13974                 serial0 = &scif2;
13975                 serial1 = &hscif2;
13976 +               mmc0 = &sdhi0;
13977 +               mmc1 = &sdhi3;
13978         };
13980         chosen {
13981 diff --git a/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts b/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts
13982 index 273f062f2909..7b6649a3ded0 100644
13983 --- a/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts
13984 +++ b/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts
13985 @@ -22,6 +22,9 @@ aliases {
13986                 serial5 = &scif5;
13987                 serial6 = &scif4;
13988                 ethernet0 = &avb;
13989 +               mmc0 = &sdhi3;
13990 +               mmc1 = &sdhi0;
13991 +               mmc2 = &sdhi2;
13992         };
13994         chosen {
13995 diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
13996 index ec7ca72399ec..1ffa4a995a7a 100644
13997 --- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi
13998 +++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
13999 @@ -992,8 +992,8 @@ port@1 {
14001                                         reg = <1>;
14003 -                                       vin4csi41: endpoint@2 {
14004 -                                               reg = <2>;
14005 +                                       vin4csi41: endpoint@3 {
14006 +                                               reg = <3>;
14007                                                 remote-endpoint = <&csi41vin4>;
14008                                         };
14009                                 };
14010 @@ -1020,8 +1020,8 @@ port@1 {
14012                                         reg = <1>;
14014 -                                       vin5csi41: endpoint@2 {
14015 -                                               reg = <2>;
14016 +                                       vin5csi41: endpoint@3 {
14017 +                                               reg = <3>;
14018                                                 remote-endpoint = <&csi41vin5>;
14019                                         };
14020                                 };
14021 @@ -1048,8 +1048,8 @@ port@1 {
14023                                         reg = <1>;
14025 -                                       vin6csi41: endpoint@2 {
14026 -                                               reg = <2>;
14027 +                                       vin6csi41: endpoint@3 {
14028 +                                               reg = <3>;
14029                                                 remote-endpoint = <&csi41vin6>;
14030                                         };
14031                                 };
14032 @@ -1076,8 +1076,8 @@ port@1 {
14034                                         reg = <1>;
14036 -                                       vin7csi41: endpoint@2 {
14037 -                                               reg = <2>;
14038 +                                       vin7csi41: endpoint@3 {
14039 +                                               reg = <3>;
14040                                                 remote-endpoint = <&csi41vin7>;
14041                                         };
14042                                 };
14043 diff --git a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
14044 index f74f8b9993f1..6d6cdc4c324b 100644
14045 --- a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
14046 +++ b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
14047 @@ -16,6 +16,9 @@ / {
14048         aliases {
14049                 serial0 = &scif2;
14050                 ethernet0 = &avb;
14051 +               mmc0 = &sdhi3;
14052 +               mmc1 = &sdhi0;
14053 +               mmc2 = &sdhi1;
14054         };
14056         chosen {
14057 diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi
14058 index fa284a7260d6..e202e8aa6941 100644
14059 --- a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi
14060 +++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi
14061 @@ -12,6 +12,14 @@ / {
14062         model = "Renesas Falcon CPU board";
14063         compatible = "renesas,falcon-cpu", "renesas,r8a779a0";
14065 +       aliases {
14066 +               serial0 = &scif0;
14067 +       };
14069 +       chosen {
14070 +               stdout-path = "serial0:115200n8";
14071 +       };
14073         memory@48000000 {
14074                 device_type = "memory";
14075                 /* first 128MB is reserved for secure area. */
14076 diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts b/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts
14077 index 5617b81dd7dc..273857ae38f3 100644
14078 --- a/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts
14079 +++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts
14080 @@ -14,11 +14,6 @@ / {
14082         aliases {
14083                 ethernet0 = &avb0;
14084 -               serial0 = &scif0;
14085 -       };
14087 -       chosen {
14088 -               stdout-path = "serial0:115200n8";
14089         };
14090  };
14092 diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
14093 index dfd6ae8b564f..86ac48e2c849 100644
14094 --- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
14095 +++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
14096 @@ -60,10 +60,7 @@ extalr_clk: extalr {
14098         pmu_a76 {
14099                 compatible = "arm,cortex-a76-pmu";
14100 -               interrupts-extended = <&gic GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
14101 -                                     <&gic GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
14102 -                                     <&gic GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
14103 -                                     <&gic GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
14104 +               interrupts-extended = <&gic GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
14105         };
14107         /* External SCIF clock - to be overridden by boards that provide it */
14108 diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
14109 index c22bb38994e8..15bb1eeb6601 100644
14110 --- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
14111 +++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
14112 @@ -36,6 +36,9 @@ aliases {
14113                 serial0 = &scif2;
14114                 serial1 = &hscif1;
14115                 ethernet0 = &avb;
14116 +               mmc0 = &sdhi2;
14117 +               mmc1 = &sdhi0;
14118 +               mmc2 = &sdhi3;
14119         };
14121         chosen {
14122 diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
14123 index e9ed2597f1c2..61bd4df09df0 100644
14124 --- a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
14125 +++ b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
14126 @@ -16,6 +16,7 @@ / {
14127         aliases {
14128                 serial1 = &hscif0;
14129                 serial2 = &scif1;
14130 +               mmc2 = &sdhi3;
14131         };
14133         clksndsel: clksndsel {
14134 diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi
14135 index a04eae55dd6c..3d88e95c65a5 100644
14136 --- a/arch/arm64/boot/dts/renesas/ulcb.dtsi
14137 +++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi
14138 @@ -23,6 +23,8 @@ / {
14139         aliases {
14140                 serial0 = &scif2;
14141                 ethernet0 = &avb;
14142 +               mmc0 = &sdhi2;
14143 +               mmc1 = &sdhi0;
14144         };
14146         chosen {
14147 diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
14148 index a87b8a678719..8f2c1c1e2c64 100644
14149 --- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
14150 +++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
14151 @@ -734,7 +734,7 @@ eth: ethernet@65000000 {
14152                         clocks = <&sys_clk 6>;
14153                         reset-names = "ether";
14154                         resets = <&sys_rst 6>;
14155 -                       phy-mode = "rgmii";
14156 +                       phy-mode = "rgmii-id";
14157                         local-mac-address = [00 00 00 00 00 00];
14158                         socionext,syscon-phy-mode = <&soc_glue 0>;
14160 diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
14161 index 0e52dadf54b3..be97da132258 100644
14162 --- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
14163 +++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
14164 @@ -564,7 +564,7 @@ eth0: ethernet@65000000 {
14165                         clocks = <&sys_clk 6>;
14166                         reset-names = "ether";
14167                         resets = <&sys_rst 6>;
14168 -                       phy-mode = "rgmii";
14169 +                       phy-mode = "rgmii-id";
14170                         local-mac-address = [00 00 00 00 00 00];
14171                         socionext,syscon-phy-mode = <&soc_glue 0>;
14173 @@ -585,7 +585,7 @@ eth1: ethernet@65200000 {
14174                         clocks = <&sys_clk 7>;
14175                         reset-names = "ether";
14176                         resets = <&sys_rst 7>;
14177 -                       phy-mode = "rgmii";
14178 +                       phy-mode = "rgmii-id";
14179                         local-mac-address = [00 00 00 00 00 00];
14180                         socionext,syscon-phy-mode = <&soc_glue 1>;
14182 diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
14183 index 8c84dafb7125..f1e7da3dfa27 100644
14184 --- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
14185 +++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
14186 @@ -1042,13 +1042,16 @@ main_sdhci0: mmc@4f80000 {
14187                 assigned-clocks = <&k3_clks 91 1>;
14188                 assigned-clock-parents = <&k3_clks 91 2>;
14189                 bus-width = <8>;
14190 -               mmc-hs400-1_8v;
14191 +               mmc-hs200-1_8v;
14192                 mmc-ddr-1_8v;
14193                 ti,otap-del-sel-legacy = <0xf>;
14194                 ti,otap-del-sel-mmc-hs = <0xf>;
14195                 ti,otap-del-sel-ddr52 = <0x5>;
14196                 ti,otap-del-sel-hs200 = <0x6>;
14197                 ti,otap-del-sel-hs400 = <0x0>;
14198 +               ti,itap-del-sel-legacy = <0x10>;
14199 +               ti,itap-del-sel-mmc-hs = <0xa>;
14200 +               ti,itap-del-sel-ddr52 = <0x3>;
14201                 ti,trm-icp = <0x8>;
14202                 ti,strobe-sel = <0x77>;
14203                 dma-coherent;
14204 @@ -1069,9 +1072,15 @@ main_sdhci1: mmc@4fb0000 {
14205                 ti,otap-del-sel-sdr25 = <0xf>;
14206                 ti,otap-del-sel-sdr50 = <0xc>;
14207                 ti,otap-del-sel-ddr50 = <0xc>;
14208 +               ti,itap-del-sel-legacy = <0x0>;
14209 +               ti,itap-del-sel-sd-hs = <0x0>;
14210 +               ti,itap-del-sel-sdr12 = <0x0>;
14211 +               ti,itap-del-sel-sdr25 = <0x0>;
14212 +               ti,itap-del-sel-ddr50 = <0x2>;
14213                 ti,trm-icp = <0x8>;
14214                 ti,clkbuf-sel = <0x7>;
14215                 dma-coherent;
14216 +               sdhci-caps-mask = <0x2 0x0>;
14217         };
14219         main_sdhci2: mmc@4f98000 {
14220 @@ -1089,9 +1098,15 @@ main_sdhci2: mmc@4f98000 {
14221                 ti,otap-del-sel-sdr25 = <0xf>;
14222                 ti,otap-del-sel-sdr50 = <0xc>;
14223                 ti,otap-del-sel-ddr50 = <0xc>;
14224 +               ti,itap-del-sel-legacy = <0x0>;
14225 +               ti,itap-del-sel-sd-hs = <0x0>;
14226 +               ti,itap-del-sel-sdr12 = <0x0>;
14227 +               ti,itap-del-sel-sdr25 = <0x0>;
14228 +               ti,itap-del-sel-ddr50 = <0x2>;
14229                 ti,trm-icp = <0x8>;
14230                 ti,clkbuf-sel = <0x7>;
14231                 dma-coherent;
14232 +               sdhci-caps-mask = <0x2 0x0>;
14233         };
14235         usbss0: cdns-usb@4104000 {
14236 diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
14237 index bbdb54702aa7..247011356d11 100644
14238 --- a/arch/arm64/crypto/aes-modes.S
14239 +++ b/arch/arm64/crypto/aes-modes.S
14240 @@ -359,6 +359,7 @@ ST5(        mov             v4.16b, vctr.16b                )
14241         ins             vctr.d[0], x8
14243         /* apply carry to N counter blocks for N := x12 */
14244 +       cbz             x12, 2f
14245         adr             x16, 1f
14246         sub             x16, x16, x12, lsl #3
14247         br              x16
14248 diff --git a/arch/arm64/crypto/poly1305-glue.c b/arch/arm64/crypto/poly1305-glue.c
14249 index 683de671741a..9c3d86e397bf 100644
14250 --- a/arch/arm64/crypto/poly1305-glue.c
14251 +++ b/arch/arm64/crypto/poly1305-glue.c
14252 @@ -25,7 +25,7 @@ asmlinkage void poly1305_emit(void *state, u8 *digest, const u32 *nonce);
14254  static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
14256 -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
14257 +void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
14259         poly1305_init_arm64(&dctx->h, key);
14260         dctx->s[0] = get_unaligned_le32(key + 16);
14261 diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
14262 index 1c26d7baa67f..cfdde3a56805 100644
14263 --- a/arch/arm64/include/asm/daifflags.h
14264 +++ b/arch/arm64/include/asm/daifflags.h
14265 @@ -131,6 +131,9 @@ static inline void local_daif_inherit(struct pt_regs *regs)
14266         if (interrupts_enabled(regs))
14267                 trace_hardirqs_on();
14269 +       if (system_uses_irq_prio_masking())
14270 +               gic_write_pmr(regs->pmr_save);
14272         /*
14273          * We can't use local_daif_restore(regs->pstate) here as
14274          * system_has_prio_mask_debugging() won't restore the I bit if it can
14275 diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
14276 index 3d10e6527f7d..858c2fcfc043 100644
14277 --- a/arch/arm64/include/asm/kvm_host.h
14278 +++ b/arch/arm64/include/asm/kvm_host.h
14279 @@ -713,6 +713,7 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
14280  static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
14282  void kvm_arm_init_debug(void);
14283 +void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
14284  void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
14285  void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
14286  void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
14287 diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
14288 index 949788f5ba40..727bfc3be99b 100644
14289 --- a/arch/arm64/include/asm/unistd.h
14290 +++ b/arch/arm64/include/asm/unistd.h
14291 @@ -38,7 +38,7 @@
14292  #define __ARM_NR_compat_set_tls                (__ARM_NR_COMPAT_BASE + 5)
14293  #define __ARM_NR_COMPAT_END            (__ARM_NR_COMPAT_BASE + 0x800)
14295 -#define __NR_compat_syscalls           443
14296 +#define __NR_compat_syscalls           447
14297  #endif
14299  #define __ARCH_WANT_SYS_CLONE
14300 diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
14301 index 3d874f624056..e5015a2b9c94 100644
14302 --- a/arch/arm64/include/asm/unistd32.h
14303 +++ b/arch/arm64/include/asm/unistd32.h
14304 @@ -893,6 +893,14 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise)
14305  __SYSCALL(__NR_epoll_pwait2, compat_sys_epoll_pwait2)
14306  #define __NR_mount_setattr 442
14307  __SYSCALL(__NR_mount_setattr, sys_mount_setattr)
14308 +#define __NR_futex_wait 443
14309 +__SYSCALL(__NR_futex_wait, sys_futex_wait)
14310 +#define __NR_futex_wake 444
14311 +__SYSCALL(__NR_futex_wake, sys_futex_wake)
14312 +#define __NR_futex_waitv 445
14313 +__SYSCALL(__NR_futex_waitv, compat_sys_futex_waitv)
14314 +#define __NR_futex_waitv 446
14315 +__SYSCALL(__NR_futex_requeue, compat_sys_futex_requeue)
14317  /*
14318   * Please add new compat syscalls above this comment and update
14319 diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
14320 index 9d3588450473..117412bae915 100644
14321 --- a/arch/arm64/kernel/entry-common.c
14322 +++ b/arch/arm64/kernel/entry-common.c
14323 @@ -226,14 +226,6 @@ static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
14325         unsigned long far = read_sysreg(far_el1);
14327 -       /*
14328 -        * The CPU masked interrupts, and we are leaving them masked during
14329 -        * do_debug_exception(). Update PMR as if we had called
14330 -        * local_daif_mask().
14331 -        */
14332 -       if (system_uses_irq_prio_masking())
14333 -               gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
14335         arm64_enter_el1_dbg(regs);
14336         if (!cortex_a76_erratum_1463225_debug_handler(regs))
14337                 do_debug_exception(far, esr, regs);
14338 @@ -398,9 +390,6 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
14339         /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
14340         unsigned long far = read_sysreg(far_el1);
14342 -       if (system_uses_irq_prio_masking())
14343 -               gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
14345         enter_from_user_mode();
14346         do_debug_exception(far, esr, regs);
14347         local_daif_restore(DAIF_PROCCTX_NOIRQ);
14348 @@ -408,9 +397,6 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
14350  static void noinstr el0_svc(struct pt_regs *regs)
14352 -       if (system_uses_irq_prio_masking())
14353 -               gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
14355         enter_from_user_mode();
14356         cortex_a76_erratum_1463225_svc_handler();
14357         do_el0_svc(regs);
14358 @@ -486,9 +472,6 @@ static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
14360  static void noinstr el0_svc_compat(struct pt_regs *regs)
14362 -       if (system_uses_irq_prio_masking())
14363 -               gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
14365         enter_from_user_mode();
14366         cortex_a76_erratum_1463225_svc_handler();
14367         do_el0_svc_compat(regs);
14368 diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
14369 index 6acfc5e6b5e0..e03fba3ae2a0 100644
14370 --- a/arch/arm64/kernel/entry.S
14371 +++ b/arch/arm64/kernel/entry.S
14372 @@ -263,16 +263,16 @@ alternative_else_nop_endif
14373         stp     lr, x21, [sp, #S_LR]
14375         /*
14376 -        * For exceptions from EL0, terminate the callchain here.
14377 +        * For exceptions from EL0, create a terminal frame record.
14378          * For exceptions from EL1, create a synthetic frame record so the
14379          * interrupted code shows up in the backtrace.
14380          */
14381         .if \el == 0
14382 -       mov     x29, xzr
14383 +       stp     xzr, xzr, [sp, #S_STACKFRAME]
14384         .else
14385         stp     x29, x22, [sp, #S_STACKFRAME]
14386 -       add     x29, sp, #S_STACKFRAME
14387         .endif
14388 +       add     x29, sp, #S_STACKFRAME
14390  #ifdef CONFIG_ARM64_SW_TTBR0_PAN
14391  alternative_if_not ARM64_HAS_PAN
14392 @@ -292,6 +292,8 @@ alternative_else_nop_endif
14393  alternative_if ARM64_HAS_IRQ_PRIO_MASKING
14394         mrs_s   x20, SYS_ICC_PMR_EL1
14395         str     x20, [sp, #S_PMR_SAVE]
14396 +       mov     x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
14397 +       msr_s   SYS_ICC_PMR_EL1, x20
14398  alternative_else_nop_endif
14400         /* Re-enable tag checking (TCO set on exception entry) */
14401 @@ -493,8 +495,8 @@ tsk .req    x28             // current thread_info
14402  /*
14403   * Interrupt handling.
14404   */
14405 -       .macro  irq_handler
14406 -       ldr_l   x1, handle_arch_irq
14407 +       .macro  irq_handler, handler:req
14408 +       ldr_l   x1, \handler
14409         mov     x0, sp
14410         irq_stack_entry
14411         blr     x1
14412 @@ -524,13 +526,41 @@ alternative_endif
14413  #endif
14414         .endm
14416 -       .macro  gic_prio_irq_setup, pmr:req, tmp:req
14417 -#ifdef CONFIG_ARM64_PSEUDO_NMI
14418 -       alternative_if ARM64_HAS_IRQ_PRIO_MASKING
14419 -       orr     \tmp, \pmr, #GIC_PRIO_PSR_I_SET
14420 -       msr_s   SYS_ICC_PMR_EL1, \tmp
14421 -       alternative_else_nop_endif
14422 +       .macro el1_interrupt_handler, handler:req
14423 +       enable_da_f
14425 +       mov     x0, sp
14426 +       bl      enter_el1_irq_or_nmi
14428 +       irq_handler     \handler
14430 +#ifdef CONFIG_PREEMPTION
14431 +       ldr     x24, [tsk, #TSK_TI_PREEMPT]     // get preempt count
14432 +alternative_if ARM64_HAS_IRQ_PRIO_MASKING
14433 +       /*
14434 +        * DA_F were cleared at start of handling. If anything is set in DAIF,
14435 +        * we come back from an NMI, so skip preemption
14436 +        */
14437 +       mrs     x0, daif
14438 +       orr     x24, x24, x0
14439 +alternative_else_nop_endif
14440 +       cbnz    x24, 1f                         // preempt count != 0 || NMI return path
14441 +       bl      arm64_preempt_schedule_irq      // irq en/disable is done inside
14443  #endif
14445 +       mov     x0, sp
14446 +       bl      exit_el1_irq_or_nmi
14447 +       .endm
14449 +       .macro el0_interrupt_handler, handler:req
14450 +       user_exit_irqoff
14451 +       enable_da_f
14453 +       tbz     x22, #55, 1f
14454 +       bl      do_el0_irq_bp_hardening
14456 +       irq_handler     \handler
14457         .endm
14459         .text
14460 @@ -662,32 +692,7 @@ SYM_CODE_END(el1_sync)
14461         .align  6
14462  SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
14463         kernel_entry 1
14464 -       gic_prio_irq_setup pmr=x20, tmp=x1
14465 -       enable_da_f
14467 -       mov     x0, sp
14468 -       bl      enter_el1_irq_or_nmi
14470 -       irq_handler
14472 -#ifdef CONFIG_PREEMPTION
14473 -       ldr     x24, [tsk, #TSK_TI_PREEMPT]     // get preempt count
14474 -alternative_if ARM64_HAS_IRQ_PRIO_MASKING
14475 -       /*
14476 -        * DA_F were cleared at start of handling. If anything is set in DAIF,
14477 -        * we come back from an NMI, so skip preemption
14478 -        */
14479 -       mrs     x0, daif
14480 -       orr     x24, x24, x0
14481 -alternative_else_nop_endif
14482 -       cbnz    x24, 1f                         // preempt count != 0 || NMI return path
14483 -       bl      arm64_preempt_schedule_irq      // irq en/disable is done inside
14485 -#endif
14487 -       mov     x0, sp
14488 -       bl      exit_el1_irq_or_nmi
14490 +       el1_interrupt_handler handle_arch_irq
14491         kernel_exit 1
14492  SYM_CODE_END(el1_irq)
14494 @@ -727,22 +732,13 @@ SYM_CODE_END(el0_error_compat)
14495  SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
14496         kernel_entry 0
14497  el0_irq_naked:
14498 -       gic_prio_irq_setup pmr=x20, tmp=x0
14499 -       user_exit_irqoff
14500 -       enable_da_f
14502 -       tbz     x22, #55, 1f
14503 -       bl      do_el0_irq_bp_hardening
14505 -       irq_handler
14507 +       el0_interrupt_handler handle_arch_irq
14508         b       ret_to_user
14509  SYM_CODE_END(el0_irq)
14511  SYM_CODE_START_LOCAL(el1_error)
14512         kernel_entry 1
14513         mrs     x1, esr_el1
14514 -       gic_prio_kentry_setup tmp=x2
14515         enable_dbg
14516         mov     x0, sp
14517         bl      do_serror
14518 @@ -753,7 +749,6 @@ SYM_CODE_START_LOCAL(el0_error)
14519         kernel_entry 0
14520  el0_error_naked:
14521         mrs     x25, esr_el1
14522 -       gic_prio_kentry_setup tmp=x2
14523         user_exit_irqoff
14524         enable_dbg
14525         mov     x0, sp
14526 diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
14527 index d55bdfb7789c..7032a5f9e624 100644
14528 --- a/arch/arm64/kernel/stacktrace.c
14529 +++ b/arch/arm64/kernel/stacktrace.c
14530 @@ -44,10 +44,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
14531         unsigned long fp = frame->fp;
14532         struct stack_info info;
14534 -       /* Terminal record; nothing to unwind */
14535 -       if (!fp)
14536 -               return -ENOENT;
14538         if (fp & 0xf)
14539                 return -EINVAL;
14541 @@ -108,6 +104,12 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
14543         frame->pc = ptrauth_strip_insn_pac(frame->pc);
14545 +       /*
14546 +        * This is a terminal record, so we have finished unwinding.
14547 +        */
14548 +       if (!frame->fp && !frame->pc)
14549 +               return -ENOENT;
14551         return 0;
14553  NOKPROBE_SYMBOL(unwind_frame);
14554 diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
14555 index 61dbb4c838ef..a5e61e09ea92 100644
14556 --- a/arch/arm64/kernel/vdso/vdso.lds.S
14557 +++ b/arch/arm64/kernel/vdso/vdso.lds.S
14558 @@ -31,6 +31,13 @@ SECTIONS
14559         .gnu.version_d  : { *(.gnu.version_d) }
14560         .gnu.version_r  : { *(.gnu.version_r) }
14562 +       /*
14563 +        * Discard .note.gnu.property sections which are unused and have
14564 +        * different alignment requirement from vDSO note sections.
14565 +        */
14566 +       /DISCARD/       : {
14567 +               *(.note.GNU-stack .note.gnu.property)
14568 +       }
14569         .note           : { *(.note.*) }                :text   :note
14571         . = ALIGN(16);
14572 @@ -48,7 +55,6 @@ SECTIONS
14573         PROVIDE(end = .);
14575         /DISCARD/       : {
14576 -               *(.note.GNU-stack)
14577                 *(.data .data.* .gnu.linkonce.d.* .sdata*)
14578                 *(.bss .sbss .dynbss .dynsbss)
14579                 *(.eh_frame .eh_frame_hdr)
14580 diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
14581 index 7f06ba76698d..84b5f79c9eab 100644
14582 --- a/arch/arm64/kvm/arm.c
14583 +++ b/arch/arm64/kvm/arm.c
14584 @@ -580,6 +580,8 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
14586         vcpu->arch.has_run_once = true;
14588 +       kvm_arm_vcpu_init_debug(vcpu);
14590         if (likely(irqchip_in_kernel(kvm))) {
14591                 /*
14592                  * Map the VGIC hardware resources before running a vcpu the
14593 @@ -1808,8 +1810,10 @@ static int init_hyp_mode(void)
14594         if (is_protected_kvm_enabled()) {
14595                 init_cpu_logical_map();
14597 -               if (!init_psci_relay())
14598 +               if (!init_psci_relay()) {
14599 +                       err = -ENODEV;
14600                         goto out_err;
14601 +               }
14602         }
14604         return 0;
14605 diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
14606 index dbc890511631..2484b2cca74b 100644
14607 --- a/arch/arm64/kvm/debug.c
14608 +++ b/arch/arm64/kvm/debug.c
14609 @@ -68,6 +68,64 @@ void kvm_arm_init_debug(void)
14610         __this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
14613 +/**
14614 + * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
14615 + *
14616 + * @vcpu:      the vcpu pointer
14617 + *
14618 + * This ensures we will trap access to:
14619 + *  - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
14620 + *  - Debug ROM Address (MDCR_EL2_TDRA)
14621 + *  - OS related registers (MDCR_EL2_TDOSA)
14622 + *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
14623 + *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
14624 + */
14625 +static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
14627 +       /*
14628 +        * This also clears MDCR_EL2_E2PB_MASK to disable guest access
14629 +        * to the profiling buffer.
14630 +        */
14631 +       vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
14632 +       vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
14633 +                               MDCR_EL2_TPMS |
14634 +                               MDCR_EL2_TTRF |
14635 +                               MDCR_EL2_TPMCR |
14636 +                               MDCR_EL2_TDRA |
14637 +                               MDCR_EL2_TDOSA);
14639 +       /* Is the VM being debugged by userspace? */
14640 +       if (vcpu->guest_debug)
14641 +               /* Route all software debug exceptions to EL2 */
14642 +               vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
14644 +       /*
14645 +        * Trap debug register access when one of the following is true:
14646 +        *  - Userspace is using the hardware to debug the guest
14647 +        *  (KVM_GUESTDBG_USE_HW is set).
14648 +        *  - The guest is not using debug (KVM_ARM64_DEBUG_DIRTY is clear).
14649 +        */
14650 +       if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) ||
14651 +           !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
14652 +               vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
14654 +       trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
14657 +/**
14658 + * kvm_arm_vcpu_init_debug - setup vcpu debug traps
14659 + *
14660 + * @vcpu:      the vcpu pointer
14661 + *
14662 + * Set vcpu initial mdcr_el2 value.
14663 + */
14664 +void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
14666 +       preempt_disable();
14667 +       kvm_arm_setup_mdcr_el2(vcpu);
14668 +       preempt_enable();
14671  /**
14672   * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
14673   */
14674 @@ -83,13 +141,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
14675   * @vcpu:      the vcpu pointer
14676   *
14677   * This is called before each entry into the hypervisor to setup any
14678 - * debug related registers. Currently this just ensures we will trap
14679 - * access to:
14680 - *  - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
14681 - *  - Debug ROM Address (MDCR_EL2_TDRA)
14682 - *  - OS related registers (MDCR_EL2_TDOSA)
14683 - *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
14684 - *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
14685 + * debug related registers.
14686   *
14687   * Additionally, KVM only traps guest accesses to the debug registers if
14688   * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
14689 @@ -101,28 +153,14 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
14691  void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
14693 -       bool trap_debug = !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY);
14694         unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2;
14696         trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
14698 -       /*
14699 -        * This also clears MDCR_EL2_E2PB_MASK to disable guest access
14700 -        * to the profiling buffer.
14701 -        */
14702 -       vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
14703 -       vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
14704 -                               MDCR_EL2_TPMS |
14705 -                               MDCR_EL2_TTRF |
14706 -                               MDCR_EL2_TPMCR |
14707 -                               MDCR_EL2_TDRA |
14708 -                               MDCR_EL2_TDOSA);
14709 +       kvm_arm_setup_mdcr_el2(vcpu);
14711         /* Is Guest debugging in effect? */
14712         if (vcpu->guest_debug) {
14713 -               /* Route all software debug exceptions to EL2 */
14714 -               vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
14716                 /* Save guest debug state */
14717                 save_guest_debug_regs(vcpu);
14719 @@ -176,7 +214,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
14721                         vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
14722                         vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
14723 -                       trap_debug = true;
14725                         trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
14726                                                 &vcpu->arch.debug_ptr->dbg_bcr[0],
14727 @@ -191,10 +228,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
14728         BUG_ON(!vcpu->guest_debug &&
14729                 vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
14731 -       /* Trap debug register access */
14732 -       if (trap_debug)
14733 -               vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
14735         /* If KDE or MDE are set, perform a full save/restore cycle. */
14736         if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
14737                 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
14738 @@ -203,7 +236,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
14739         if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
14740                 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
14742 -       trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
14743         trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
14746 diff --git a/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c b/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c
14747 index ead02c6a7628..6bc88a756cb7 100644
14748 --- a/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c
14749 +++ b/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c
14750 @@ -50,6 +50,18 @@
14751  #ifndef R_AARCH64_ABS64
14752  #define R_AARCH64_ABS64                        257
14753  #endif
14754 +#ifndef R_AARCH64_PREL64
14755 +#define R_AARCH64_PREL64               260
14756 +#endif
14757 +#ifndef R_AARCH64_PREL32
14758 +#define R_AARCH64_PREL32               261
14759 +#endif
14760 +#ifndef R_AARCH64_PREL16
14761 +#define R_AARCH64_PREL16               262
14762 +#endif
14763 +#ifndef R_AARCH64_PLT32
14764 +#define R_AARCH64_PLT32                        314
14765 +#endif
14766  #ifndef R_AARCH64_LD_PREL_LO19
14767  #define R_AARCH64_LD_PREL_LO19         273
14768  #endif
14769 @@ -371,6 +383,12 @@ static void emit_rela_section(Elf64_Shdr *sh_rela)
14770                 case R_AARCH64_ABS64:
14771                         emit_rela_abs64(rela, sh_orig_name);
14772                         break;
14773 +               /* Allow position-relative data relocations. */
14774 +               case R_AARCH64_PREL64:
14775 +               case R_AARCH64_PREL32:
14776 +               case R_AARCH64_PREL16:
14777 +               case R_AARCH64_PLT32:
14778 +                       break;
14779                 /* Allow relocations to generate PC-relative addressing. */
14780                 case R_AARCH64_LD_PREL_LO19:
14781                 case R_AARCH64_ADR_PREL_LO21:
14782 diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
14783 index bd354cd45d28..4b5acd84b8c8 100644
14784 --- a/arch/arm64/kvm/reset.c
14785 +++ b/arch/arm64/kvm/reset.c
14786 @@ -242,6 +242,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
14788         /* Reset core registers */
14789         memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
14790 +       memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
14791 +       vcpu->arch.ctxt.spsr_abt = 0;
14792 +       vcpu->arch.ctxt.spsr_und = 0;
14793 +       vcpu->arch.ctxt.spsr_irq = 0;
14794 +       vcpu->arch.ctxt.spsr_fiq = 0;
14795         vcpu_gp_regs(vcpu)->pstate = pstate;
14797         /* Reset system registers */
14798 diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c
14799 index 44419679f91a..7740995de982 100644
14800 --- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
14801 +++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
14802 @@ -87,8 +87,8 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
14803                         r = vgic_v3_set_redist_base(kvm, 0, *addr, 0);
14804                         goto out;
14805                 }
14806 -               rdreg = list_first_entry(&vgic->rd_regions,
14807 -                                        struct vgic_redist_region, list);
14808 +               rdreg = list_first_entry_or_null(&vgic->rd_regions,
14809 +                                                struct vgic_redist_region, list);
14810                 if (!rdreg)
14811                         addr_ptr = &undef_value;
14812                 else
14813 @@ -226,6 +226,9 @@ static int vgic_get_common_attr(struct kvm_device *dev,
14814                 u64 addr;
14815                 unsigned long type = (unsigned long)attr->attr;
14817 +               if (copy_from_user(&addr, uaddr, sizeof(addr)))
14818 +                       return -EFAULT;
14820                 r = kvm_vgic_addr(dev->kvm, type, &addr, false);
14821                 if (r)
14822                         return (r == -ENODEV) ? -ENXIO : r;
14823 diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
14824 index ac485163a4a7..6d44c028d1c9 100644
14825 --- a/arch/arm64/mm/flush.c
14826 +++ b/arch/arm64/mm/flush.c
14827 @@ -55,8 +55,10 @@ void __sync_icache_dcache(pte_t pte)
14829         struct page *page = pte_page(pte);
14831 -       if (!test_and_set_bit(PG_dcache_clean, &page->flags))
14832 +       if (!test_bit(PG_dcache_clean, &page->flags)) {
14833                 sync_icache_aliases(page_address(page), page_size(page));
14834 +               set_bit(PG_dcache_clean, &page->flags);
14835 +       }
14837  EXPORT_SYMBOL_GPL(__sync_icache_dcache);
14839 diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
14840 index c967bfd30d2b..b183216a591c 100644
14841 --- a/arch/arm64/mm/proc.S
14842 +++ b/arch/arm64/mm/proc.S
14843 @@ -444,6 +444,18 @@ SYM_FUNC_START(__cpu_setup)
14844         mov     x10, #(SYS_GCR_EL1_RRND | SYS_GCR_EL1_EXCL_MASK)
14845         msr_s   SYS_GCR_EL1, x10
14847 +       /*
14848 +        * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
14849 +        * RGSR_EL1.SEED must be non-zero for IRG to produce
14850 +        * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
14851 +        * must initialize it.
14852 +        */
14853 +       mrs     x10, CNTVCT_EL0
14854 +       ands    x10, x10, #SYS_RGSR_EL1_SEED_MASK
14855 +       csinc   x10, x10, xzr, ne
14856 +       lsl     x10, x10, #SYS_RGSR_EL1_SEED_SHIFT
14857 +       msr_s   SYS_RGSR_EL1, x10
14859         /* clear any pending tag check faults in TFSR*_EL1 */
14860         msr_s   SYS_TFSR_EL1, xzr
14861         msr_s   SYS_TFSRE0_EL1, xzr
14862 diff --git a/arch/ia64/include/asm/module.h b/arch/ia64/include/asm/module.h
14863 index 5a29652e6def..7271b9c5fc76 100644
14864 --- a/arch/ia64/include/asm/module.h
14865 +++ b/arch/ia64/include/asm/module.h
14866 @@ -14,16 +14,20 @@
14867  struct elf64_shdr;                     /* forward declration */
14869  struct mod_arch_specific {
14870 +       /* Used only at module load time. */
14871         struct elf64_shdr *core_plt;    /* core PLT section */
14872         struct elf64_shdr *init_plt;    /* init PLT section */
14873         struct elf64_shdr *got;         /* global offset table */
14874         struct elf64_shdr *opd;         /* official procedure descriptors */
14875         struct elf64_shdr *unwind;      /* unwind-table section */
14876         unsigned long gp;               /* global-pointer for module */
14877 +       unsigned int next_got_entry;    /* index of next available got entry */
14879 +       /* Used at module run and cleanup time. */
14880         void *core_unw_table;           /* core unwind-table cookie returned by unwinder */
14881         void *init_unw_table;           /* init unwind-table cookie returned by unwinder */
14882 -       unsigned int next_got_entry;    /* index of next available got entry */
14883 +       void *opd_addr;                 /* symbolize uses .opd to get to actual function */
14884 +       unsigned long opd_size;
14885  };
14887  #define ARCH_SHF_SMALL SHF_IA_64_SHORT
14888 diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
14889 index a5636524af76..e2af6b172200 100644
14890 --- a/arch/ia64/kernel/acpi.c
14891 +++ b/arch/ia64/kernel/acpi.c
14892 @@ -446,7 +446,8 @@ void __init acpi_numa_fixup(void)
14893         if (srat_num_cpus == 0) {
14894                 node_set_online(0);
14895                 node_cpuid[0].phys_id = hard_smp_processor_id();
14896 -               return;
14897 +               slit_distance(0, 0) = LOCAL_DISTANCE;
14898 +               goto out;
14899         }
14901         /*
14902 @@ -489,7 +490,7 @@ void __init acpi_numa_fixup(void)
14903                         for (j = 0; j < MAX_NUMNODES; j++)
14904                                 slit_distance(i, j) = i == j ?
14905                                         LOCAL_DISTANCE : REMOTE_DISTANCE;
14906 -               return;
14907 +               goto out;
14908         }
14910         memset(numa_slit, -1, sizeof(numa_slit));
14911 @@ -514,6 +515,8 @@ void __init acpi_numa_fixup(void)
14912                 printk("\n");
14913         }
14914  #endif
14915 +out:
14916 +       node_possible_map = node_online_map;
14918  #endif                         /* CONFIG_ACPI_NUMA */
14920 diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
14921 index c5fe21de46a8..31149e41f9be 100644
14922 --- a/arch/ia64/kernel/efi.c
14923 +++ b/arch/ia64/kernel/efi.c
14924 @@ -415,10 +415,10 @@ efi_get_pal_addr (void)
14925                 mask  = ~((1 << IA64_GRANULE_SHIFT) - 1);
14927                 printk(KERN_INFO "CPU %d: mapping PAL code "
14928 -                       "[0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
14929 -                       smp_processor_id(), md->phys_addr,
14930 -                       md->phys_addr + efi_md_size(md),
14931 -                       vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
14932 +                       "[0x%llx-0x%llx) into [0x%llx-0x%llx)\n",
14933 +                       smp_processor_id(), md->phys_addr,
14934 +                       md->phys_addr + efi_md_size(md),
14935 +                       vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
14936  #endif
14937                 return __va(md->phys_addr);
14938         }
14939 @@ -560,6 +560,7 @@ efi_init (void)
14940         {
14941                 efi_memory_desc_t *md;
14942                 void *p;
14943 +               unsigned int i;
14945                 for (i = 0, p = efi_map_start; p < efi_map_end;
14946                      ++i, p += efi_desc_size)
14947 @@ -586,7 +587,7 @@ efi_init (void)
14948                         }
14950                         printk("mem%02d: %s "
14951 -                              "range=[0x%016lx-0x%016lx) (%4lu%s)\n",
14952 +                              "range=[0x%016llx-0x%016llx) (%4lu%s)\n",
14953                                i, efi_md_typeattr_format(buf, sizeof(buf), md),
14954                                md->phys_addr,
14955                                md->phys_addr + efi_md_size(md), size, unit);
14956 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
14957 index 00a496cb346f..2cba53c1da82 100644
14958 --- a/arch/ia64/kernel/module.c
14959 +++ b/arch/ia64/kernel/module.c
14960 @@ -905,9 +905,31 @@ register_unwind_table (struct module *mod)
14961  int
14962  module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
14964 +       struct mod_arch_specific *mas = &mod->arch;
14966         DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
14967 -       if (mod->arch.unwind)
14968 +       if (mas->unwind)
14969                 register_unwind_table(mod);
14971 +       /*
14972 +        * ".opd" was already relocated to the final destination. Store
14973 +        * it's address for use in symbolizer.
14974 +        */
14975 +       mas->opd_addr = (void *)mas->opd->sh_addr;
14976 +       mas->opd_size = mas->opd->sh_size;
14978 +       /*
14979 +        * Module relocation was already done at this point. Section
14980 +        * headers are about to be deleted. Wipe out load-time context.
14981 +        */
14982 +       mas->core_plt = NULL;
14983 +       mas->init_plt = NULL;
14984 +       mas->got = NULL;
14985 +       mas->opd = NULL;
14986 +       mas->unwind = NULL;
14987 +       mas->gp = 0;
14988 +       mas->next_got_entry = 0;
14990         return 0;
14993 @@ -926,10 +948,9 @@ module_arch_cleanup (struct module *mod)
14995  void *dereference_module_function_descriptor(struct module *mod, void *ptr)
14997 -       Elf64_Shdr *opd = mod->arch.opd;
14998 +       struct mod_arch_specific *mas = &mod->arch;
15000 -       if (ptr < (void *)opd->sh_addr ||
15001 -                       ptr >= (void *)(opd->sh_addr + opd->sh_size))
15002 +       if (ptr < mas->opd_addr || ptr >= mas->opd_addr + mas->opd_size)
15003                 return ptr;
15005         return dereference_function_descriptor(ptr);
15006 diff --git a/arch/m68k/include/asm/mvme147hw.h b/arch/m68k/include/asm/mvme147hw.h
15007 index 257b29184af9..e28eb1c0e0bf 100644
15008 --- a/arch/m68k/include/asm/mvme147hw.h
15009 +++ b/arch/m68k/include/asm/mvme147hw.h
15010 @@ -66,6 +66,9 @@ struct pcc_regs {
15011  #define PCC_INT_ENAB           0x08
15013  #define PCC_TIMER_INT_CLR      0x80
15015 +#define PCC_TIMER_TIC_EN       0x01
15016 +#define PCC_TIMER_COC_EN       0x02
15017  #define PCC_TIMER_CLR_OVF      0x04
15019  #define PCC_LEVEL_ABORT                0x07
15020 diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
15021 index 1c235d8f53f3..f55bdcb8e4f1 100644
15022 --- a/arch/m68k/kernel/sys_m68k.c
15023 +++ b/arch/m68k/kernel/sys_m68k.c
15024 @@ -388,6 +388,8 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
15025                 ret = -EPERM;
15026                 if (!capable(CAP_SYS_ADMIN))
15027                         goto out;
15029 +               mmap_read_lock(current->mm);
15030         } else {
15031                 struct vm_area_struct *vma;
15033 diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
15034 index cfdc7f912e14..e1e90c49a496 100644
15035 --- a/arch/m68k/mvme147/config.c
15036 +++ b/arch/m68k/mvme147/config.c
15037 @@ -114,8 +114,10 @@ static irqreturn_t mvme147_timer_int (int irq, void *dev_id)
15038         unsigned long flags;
15040         local_irq_save(flags);
15041 -       m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;
15042 -       m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF;
15043 +       m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF | PCC_TIMER_COC_EN |
15044 +                            PCC_TIMER_TIC_EN;
15045 +       m147_pcc->t1_int_cntrl = PCC_INT_ENAB | PCC_TIMER_INT_CLR |
15046 +                                PCC_LEVEL_TIMER1;
15047         clk_total += PCC_TIMER_CYCLES;
15048         legacy_timer_tick(1);
15049         local_irq_restore(flags);
15050 @@ -133,10 +135,10 @@ void mvme147_sched_init (void)
15051         /* Init the clock with a value */
15052         /* The clock counter increments until 0xFFFF then reloads */
15053         m147_pcc->t1_preload = PCC_TIMER_PRELOAD;
15054 -       m147_pcc->t1_cntrl = 0x0;       /* clear timer */
15055 -       m147_pcc->t1_cntrl = 0x3;       /* start timer */
15056 -       m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;  /* clear pending ints */
15057 -       m147_pcc->t1_int_cntrl = PCC_INT_ENAB|PCC_LEVEL_TIMER1;
15058 +       m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF | PCC_TIMER_COC_EN |
15059 +                            PCC_TIMER_TIC_EN;
15060 +       m147_pcc->t1_int_cntrl = PCC_INT_ENAB | PCC_TIMER_INT_CLR |
15061 +                                PCC_LEVEL_TIMER1;
15063         clocksource_register_hz(&mvme147_clk, PCC_TIMER_CLOCK_FREQ);
15065 diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
15066 index 30357fe4ba6c..b59593c7cfb9 100644
15067 --- a/arch/m68k/mvme16x/config.c
15068 +++ b/arch/m68k/mvme16x/config.c
15069 @@ -366,6 +366,7 @@ static u32 clk_total;
15070  #define PCCTOVR1_COC_EN      0x02
15071  #define PCCTOVR1_OVR_CLR     0x04
15073 +#define PCCTIC1_INT_LEVEL    6
15074  #define PCCTIC1_INT_CLR      0x08
15075  #define PCCTIC1_INT_EN       0x10
15077 @@ -374,8 +375,8 @@ static irqreturn_t mvme16x_timer_int (int irq, void *dev_id)
15078         unsigned long flags;
15080         local_irq_save(flags);
15081 -       out_8(PCCTIC1, in_8(PCCTIC1) | PCCTIC1_INT_CLR);
15082 -       out_8(PCCTOVR1, PCCTOVR1_OVR_CLR);
15083 +       out_8(PCCTOVR1, PCCTOVR1_OVR_CLR | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
15084 +       out_8(PCCTIC1, PCCTIC1_INT_EN | PCCTIC1_INT_CLR | PCCTIC1_INT_LEVEL);
15085         clk_total += PCC_TIMER_CYCLES;
15086         legacy_timer_tick(1);
15087         local_irq_restore(flags);
15088 @@ -389,14 +390,15 @@ void mvme16x_sched_init(void)
15089      int irq;
15091      /* Using PCCchip2 or MC2 chip tick timer 1 */
15092 -    out_be32(PCCTCNT1, 0);
15093 -    out_be32(PCCTCMP1, PCC_TIMER_CYCLES);
15094 -    out_8(PCCTOVR1, in_8(PCCTOVR1) | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
15095 -    out_8(PCCTIC1, PCCTIC1_INT_EN | 6);
15096      if (request_irq(MVME16x_IRQ_TIMER, mvme16x_timer_int, IRQF_TIMER, "timer",
15097                      NULL))
15098         panic ("Couldn't register timer int");
15100 +    out_be32(PCCTCNT1, 0);
15101 +    out_be32(PCCTCMP1, PCC_TIMER_CYCLES);
15102 +    out_8(PCCTOVR1, PCCTOVR1_OVR_CLR | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
15103 +    out_8(PCCTIC1, PCCTIC1_INT_EN | PCCTIC1_INT_CLR | PCCTIC1_INT_LEVEL);
15105      clocksource_register_hz(&mvme16x_clk, PCC_TIMER_CLOCK_FREQ);
15107      if (brdno == 0x0162 || brdno == 0x172)
15108 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
15109 index d89efba3d8a4..e89d63cd92d1 100644
15110 --- a/arch/mips/Kconfig
15111 +++ b/arch/mips/Kconfig
15112 @@ -6,6 +6,7 @@ config MIPS
15113         select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
15114         select ARCH_HAS_FORTIFY_SOURCE
15115         select ARCH_HAS_KCOV
15116 +       select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE if !EVA
15117         select ARCH_HAS_PTE_SPECIAL if !(32BIT && CPU_HAS_RIXI)
15118         select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
15119         select ARCH_HAS_UBSAN_SANITIZE_ALL
15120 diff --git a/arch/mips/boot/dts/brcm/bcm3368.dtsi b/arch/mips/boot/dts/brcm/bcm3368.dtsi
15121 index 69cbef472377..d4b2b430dad0 100644
15122 --- a/arch/mips/boot/dts/brcm/bcm3368.dtsi
15123 +++ b/arch/mips/boot/dts/brcm/bcm3368.dtsi
15124 @@ -59,7 +59,7 @@ clkctl: clock-controller@fff8c004 {
15126                 periph_cntl: syscon@fff8c008 {
15127                         compatible = "syscon";
15128 -                       reg = <0xfff8c000 0x4>;
15129 +                       reg = <0xfff8c008 0x4>;
15130                         native-endian;
15131                 };
15133 diff --git a/arch/mips/boot/dts/brcm/bcm63268.dtsi b/arch/mips/boot/dts/brcm/bcm63268.dtsi
15134 index e0021ff9f144..940594436872 100644
15135 --- a/arch/mips/boot/dts/brcm/bcm63268.dtsi
15136 +++ b/arch/mips/boot/dts/brcm/bcm63268.dtsi
15137 @@ -59,7 +59,7 @@ clkctl: clock-controller@10000004 {
15139                 periph_cntl: syscon@10000008 {
15140                         compatible = "syscon";
15141 -                       reg = <0x10000000 0xc>;
15142 +                       reg = <0x10000008 0x4>;
15143                         native-endian;
15144                 };
15146 diff --git a/arch/mips/boot/dts/brcm/bcm6358.dtsi b/arch/mips/boot/dts/brcm/bcm6358.dtsi
15147 index 9d93e7f5e6fc..d79c88c2fc9c 100644
15148 --- a/arch/mips/boot/dts/brcm/bcm6358.dtsi
15149 +++ b/arch/mips/boot/dts/brcm/bcm6358.dtsi
15150 @@ -59,7 +59,7 @@ clkctl: clock-controller@fffe0004 {
15152                 periph_cntl: syscon@fffe0008 {
15153                         compatible = "syscon";
15154 -                       reg = <0xfffe0000 0x4>;
15155 +                       reg = <0xfffe0008 0x4>;
15156                         native-endian;
15157                 };
15159 diff --git a/arch/mips/boot/dts/brcm/bcm6362.dtsi b/arch/mips/boot/dts/brcm/bcm6362.dtsi
15160 index eb10341b75ba..8a21cb761ffd 100644
15161 --- a/arch/mips/boot/dts/brcm/bcm6362.dtsi
15162 +++ b/arch/mips/boot/dts/brcm/bcm6362.dtsi
15163 @@ -59,7 +59,7 @@ clkctl: clock-controller@10000004 {
15165                 periph_cntl: syscon@10000008 {
15166                         compatible = "syscon";
15167 -                       reg = <0x10000000 0xc>;
15168 +                       reg = <0x10000008 0x4>;
15169                         native-endian;
15170                 };
15172 diff --git a/arch/mips/boot/dts/brcm/bcm6368.dtsi b/arch/mips/boot/dts/brcm/bcm6368.dtsi
15173 index 52c19f40b9cc..8e87867ebc04 100644
15174 --- a/arch/mips/boot/dts/brcm/bcm6368.dtsi
15175 +++ b/arch/mips/boot/dts/brcm/bcm6368.dtsi
15176 @@ -59,7 +59,7 @@ clkctl: clock-controller@10000004 {
15178                 periph_cntl: syscon@100000008 {
15179                         compatible = "syscon";
15180 -                       reg = <0x10000000 0xc>;
15181 +                       reg = <0x10000008 0x4>;
15182                         native-endian;
15183                 };
15185 diff --git a/arch/mips/crypto/poly1305-glue.c b/arch/mips/crypto/poly1305-glue.c
15186 index fc881b46d911..bc6110fb98e0 100644
15187 --- a/arch/mips/crypto/poly1305-glue.c
15188 +++ b/arch/mips/crypto/poly1305-glue.c
15189 @@ -17,7 +17,7 @@ asmlinkage void poly1305_init_mips(void *state, const u8 *key);
15190  asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit);
15191  asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce);
15193 -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
15194 +void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
15196         poly1305_init_mips(&dctx->h, key);
15197         dctx->s[0] = get_unaligned_le32(key + 16);
15198 diff --git a/arch/mips/generic/board-boston.its.S b/arch/mips/generic/board-boston.its.S
15199 index a7f51f97b910..c45ad2759421 100644
15200 --- a/arch/mips/generic/board-boston.its.S
15201 +++ b/arch/mips/generic/board-boston.its.S
15202 @@ -1,22 +1,22 @@
15203  / {
15204         images {
15205 -               fdt@boston {
15206 +               fdt-boston {
15207                         description = "img,boston Device Tree";
15208                         data = /incbin/("boot/dts/img/boston.dtb");
15209                         type = "flat_dt";
15210                         arch = "mips";
15211                         compression = "none";
15212 -                       hash@0 {
15213 +                       hash {
15214                                 algo = "sha1";
15215                         };
15216                 };
15217         };
15219         configurations {
15220 -               conf@boston {
15221 +               conf-boston {
15222                         description = "Boston Linux kernel";
15223 -                       kernel = "kernel@0";
15224 -                       fdt = "fdt@boston";
15225 +                       kernel = "kernel";
15226 +                       fdt = "fdt-boston";
15227                 };
15228         };
15229  };
15230 diff --git a/arch/mips/generic/board-jaguar2.its.S b/arch/mips/generic/board-jaguar2.its.S
15231 index fb0e589eeff7..c2b8d479b26c 100644
15232 --- a/arch/mips/generic/board-jaguar2.its.S
15233 +++ b/arch/mips/generic/board-jaguar2.its.S
15234 @@ -1,23 +1,23 @@
15235  /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
15236  / {
15237         images {
15238 -               fdt@jaguar2_pcb110 {
15239 +               fdt-jaguar2_pcb110 {
15240                         description = "MSCC Jaguar2 PCB110 Device Tree";
15241                         data = /incbin/("boot/dts/mscc/jaguar2_pcb110.dtb");
15242                         type = "flat_dt";
15243                         arch = "mips";
15244                         compression = "none";
15245 -                       hash@0 {
15246 +                       hash {
15247                                 algo = "sha1";
15248                         };
15249                 };
15250 -               fdt@jaguar2_pcb111 {
15251 +               fdt-jaguar2_pcb111 {
15252                         description = "MSCC Jaguar2 PCB111 Device Tree";
15253                         data = /incbin/("boot/dts/mscc/jaguar2_pcb111.dtb");
15254                         type = "flat_dt";
15255                         arch = "mips";
15256                         compression = "none";
15257 -                       hash@0 {
15258 +                       hash {
15259                                 algo = "sha1";
15260                         };
15261                 };
15262 @@ -26,14 +26,14 @@
15263         configurations {
15264                 pcb110 {
15265                         description = "Jaguar2 Linux kernel";
15266 -                       kernel = "kernel@0";
15267 -                       fdt = "fdt@jaguar2_pcb110";
15268 +                       kernel = "kernel";
15269 +                       fdt = "fdt-jaguar2_pcb110";
15270                         ramdisk = "ramdisk";
15271                 };
15272                 pcb111 {
15273                         description = "Jaguar2 Linux kernel";
15274 -                       kernel = "kernel@0";
15275 -                       fdt = "fdt@jaguar2_pcb111";
15276 +                       kernel = "kernel";
15277 +                       fdt = "fdt-jaguar2_pcb111";
15278                         ramdisk = "ramdisk";
15279                 };
15280         };
15281 diff --git a/arch/mips/generic/board-luton.its.S b/arch/mips/generic/board-luton.its.S
15282 index 39a543f62f25..bd9837c9af97 100644
15283 --- a/arch/mips/generic/board-luton.its.S
15284 +++ b/arch/mips/generic/board-luton.its.S
15285 @@ -1,13 +1,13 @@
15286  /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
15287  / {
15288         images {
15289 -               fdt@luton_pcb091 {
15290 +               fdt-luton_pcb091 {
15291                         description = "MSCC Luton PCB091 Device Tree";
15292                         data = /incbin/("boot/dts/mscc/luton_pcb091.dtb");
15293                         type = "flat_dt";
15294                         arch = "mips";
15295                         compression = "none";
15296 -                       hash@0 {
15297 +                       hash {
15298                                 algo = "sha1";
15299                         };
15300                 };
15301 @@ -16,8 +16,8 @@
15302         configurations {
15303                 pcb091 {
15304                         description = "Luton Linux kernel";
15305 -                       kernel = "kernel@0";
15306 -                       fdt = "fdt@luton_pcb091";
15307 +                       kernel = "kernel";
15308 +                       fdt = "fdt-luton_pcb091";
15309                 };
15310         };
15311  };
15312 diff --git a/arch/mips/generic/board-ni169445.its.S b/arch/mips/generic/board-ni169445.its.S
15313 index e4cb4f95a8cc..0a2e8f7a8526 100644
15314 --- a/arch/mips/generic/board-ni169445.its.S
15315 +++ b/arch/mips/generic/board-ni169445.its.S
15316 @@ -1,22 +1,22 @@
15317  / {
15318         images {
15319 -               fdt@ni169445 {
15320 +               fdt-ni169445 {
15321                         description = "NI 169445 device tree";
15322                         data = /incbin/("boot/dts/ni/169445.dtb");
15323                         type = "flat_dt";
15324                         arch = "mips";
15325                         compression = "none";
15326 -                       hash@0 {
15327 +                       hash {
15328                                 algo = "sha1";
15329                         };
15330                 };
15331         };
15333         configurations {
15334 -               conf@ni169445 {
15335 +               conf-ni169445 {
15336                         description = "NI 169445 Linux Kernel";
15337 -                       kernel = "kernel@0";
15338 -                       fdt = "fdt@ni169445";
15339 +                       kernel = "kernel";
15340 +                       fdt = "fdt-ni169445";
15341                 };
15342         };
15343  };
15344 diff --git a/arch/mips/generic/board-ocelot.its.S b/arch/mips/generic/board-ocelot.its.S
15345 index 3da23988149a..8c7e3a1b68d3 100644
15346 --- a/arch/mips/generic/board-ocelot.its.S
15347 +++ b/arch/mips/generic/board-ocelot.its.S
15348 @@ -1,40 +1,40 @@
15349  /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
15350  / {
15351         images {
15352 -               fdt@ocelot_pcb123 {
15353 +               fdt-ocelot_pcb123 {
15354                         description = "MSCC Ocelot PCB123 Device Tree";
15355                         data = /incbin/("boot/dts/mscc/ocelot_pcb123.dtb");
15356                         type = "flat_dt";
15357                         arch = "mips";
15358                         compression = "none";
15359 -                       hash@0 {
15360 +                       hash {
15361                                 algo = "sha1";
15362                         };
15363                 };
15365 -               fdt@ocelot_pcb120 {
15366 +               fdt-ocelot_pcb120 {
15367                         description = "MSCC Ocelot PCB120 Device Tree";
15368                         data = /incbin/("boot/dts/mscc/ocelot_pcb120.dtb");
15369                         type = "flat_dt";
15370                         arch = "mips";
15371                         compression = "none";
15372 -                       hash@0 {
15373 +                       hash {
15374                                 algo = "sha1";
15375                         };
15376                 };
15377         };
15379         configurations {
15380 -               conf@ocelot_pcb123 {
15381 +               conf-ocelot_pcb123 {
15382                         description = "Ocelot Linux kernel";
15383 -                       kernel = "kernel@0";
15384 -                       fdt = "fdt@ocelot_pcb123";
15385 +                       kernel = "kernel";
15386 +                       fdt = "fdt-ocelot_pcb123";
15387                 };
15389 -               conf@ocelot_pcb120 {
15390 +               conf-ocelot_pcb120 {
15391                         description = "Ocelot Linux kernel";
15392 -                       kernel = "kernel@0";
15393 -                       fdt = "fdt@ocelot_pcb120";
15394 +                       kernel = "kernel";
15395 +                       fdt = "fdt-ocelot_pcb120";
15396                 };
15397         };
15398  };
15399 diff --git a/arch/mips/generic/board-serval.its.S b/arch/mips/generic/board-serval.its.S
15400 index 4ea4fc9d757f..dde833efe980 100644
15401 --- a/arch/mips/generic/board-serval.its.S
15402 +++ b/arch/mips/generic/board-serval.its.S
15403 @@ -1,13 +1,13 @@
15404  /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
15405  / {
15406         images {
15407 -               fdt@serval_pcb105 {
15408 +               fdt-serval_pcb105 {
15409                         description = "MSCC Serval PCB105 Device Tree";
15410                         data = /incbin/("boot/dts/mscc/serval_pcb105.dtb");
15411                         type = "flat_dt";
15412                         arch = "mips";
15413                         compression = "none";
15414 -                       hash@0 {
15415 +                       hash {
15416                                 algo = "sha1";
15417                         };
15418                 };
15419 @@ -16,8 +16,8 @@
15420         configurations {
15421                 pcb105 {
15422                         description = "Serval Linux kernel";
15423 -                       kernel = "kernel@0";
15424 -                       fdt = "fdt@serval_pcb105";
15425 +                       kernel = "kernel";
15426 +                       fdt = "fdt-serval_pcb105";
15427                         ramdisk = "ramdisk";
15428                 };
15429         };
15430 diff --git a/arch/mips/generic/board-xilfpga.its.S b/arch/mips/generic/board-xilfpga.its.S
15431 index a2e773d3f14f..08c1e900eb4e 100644
15432 --- a/arch/mips/generic/board-xilfpga.its.S
15433 +++ b/arch/mips/generic/board-xilfpga.its.S
15434 @@ -1,22 +1,22 @@
15435  / {
15436         images {
15437 -               fdt@xilfpga {
15438 +               fdt-xilfpga {
15439                         description = "MIPSfpga (xilfpga) Device Tree";
15440                         data = /incbin/("boot/dts/xilfpga/nexys4ddr.dtb");
15441                         type = "flat_dt";
15442                         arch = "mips";
15443                         compression = "none";
15444 -                       hash@0 {
15445 +                       hash {
15446                                 algo = "sha1";
15447                         };
15448                 };
15449         };
15451         configurations {
15452 -               conf@xilfpga {
15453 +               conf-xilfpga {
15454                         description = "MIPSfpga Linux kernel";
15455 -                       kernel = "kernel@0";
15456 -                       fdt = "fdt@xilfpga";
15457 +                       kernel = "kernel";
15458 +                       fdt = "fdt-xilfpga";
15459                 };
15460         };
15461  };
15462 diff --git a/arch/mips/generic/vmlinux.its.S b/arch/mips/generic/vmlinux.its.S
15463 index 1a08438fd893..3e254676540f 100644
15464 --- a/arch/mips/generic/vmlinux.its.S
15465 +++ b/arch/mips/generic/vmlinux.its.S
15466 @@ -6,7 +6,7 @@
15467         #address-cells = <ADDR_CELLS>;
15469         images {
15470 -               kernel@0 {
15471 +               kernel {
15472                         description = KERNEL_NAME;
15473                         data = /incbin/(VMLINUX_BINARY);
15474                         type = "kernel";
15475 @@ -15,18 +15,18 @@
15476                         compression = VMLINUX_COMPRESSION;
15477                         load = /bits/ ADDR_BITS <VMLINUX_LOAD_ADDRESS>;
15478                         entry = /bits/ ADDR_BITS <VMLINUX_ENTRY_ADDRESS>;
15479 -                       hash@0 {
15480 +                       hash {
15481                                 algo = "sha1";
15482                         };
15483                 };
15484         };
15486         configurations {
15487 -               default = "conf@default";
15488 +               default = "conf-default";
15490 -               conf@default {
15491 +               conf-default {
15492                         description = "Generic Linux kernel";
15493 -                       kernel = "kernel@0";
15494 +                       kernel = "kernel";
15495                 };
15496         };
15497  };
15498 diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
15499 index 86f2323ebe6b..ca83ada7015f 100644
15500 --- a/arch/mips/include/asm/asmmacro.h
15501 +++ b/arch/mips/include/asm/asmmacro.h
15502 @@ -44,8 +44,7 @@
15503         .endm
15504  #endif
15506 -#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
15507 -    defined(CONFIG_CPU_MIPSR6)
15508 +#ifdef CONFIG_CPU_HAS_DIEI
15509         .macro  local_irq_enable reg=t0
15510         ei
15511         irq_enable_hazard
15512 diff --git a/arch/mips/include/asm/div64.h b/arch/mips/include/asm/div64.h
15513 index dc5ea5736440..ceece76fc971 100644
15514 --- a/arch/mips/include/asm/div64.h
15515 +++ b/arch/mips/include/asm/div64.h
15516 @@ -1,5 +1,5 @@
15517  /*
15518 - * Copyright (C) 2000, 2004  Maciej W. Rozycki
15519 + * Copyright (C) 2000, 2004, 2021  Maciej W. Rozycki
15520   * Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org)
15521   *
15522   * This file is subject to the terms and conditions of the GNU General Public
15523 @@ -9,25 +9,18 @@
15524  #ifndef __ASM_DIV64_H
15525  #define __ASM_DIV64_H
15527 -#include <asm-generic/div64.h>
15529 -#if BITS_PER_LONG == 64
15530 +#include <asm/bitsperlong.h>
15532 -#include <linux/types.h>
15533 +#if BITS_PER_LONG == 32
15535  /*
15536   * No traps on overflows for any of these...
15537   */
15539 -#define __div64_32(n, base)                                            \
15540 -({                                                                     \
15541 +#define do_div64_32(res, high, low, base) ({                           \
15542         unsigned long __cf, __tmp, __tmp2, __i;                         \
15543         unsigned long __quot32, __mod32;                                \
15544 -       unsigned long __high, __low;                                    \
15545 -       unsigned long long __n;                                         \
15546                                                                         \
15547 -       __high = *__n >> 32;                                            \
15548 -       __low = __n;                                                    \
15549         __asm__(                                                        \
15550         "       .set    push                                    \n"     \
15551         "       .set    noat                                    \n"     \
15552 @@ -51,18 +44,48 @@
15553         "       subu    %0, %0, %z6                             \n"     \
15554         "       addiu   %2, %2, 1                               \n"     \
15555         "3:                                                     \n"     \
15556 -       "       bnez    %4, 0b\n\t"                                     \
15557 -       "        srl    %5, %1, 0x1f\n\t"                               \
15558 +       "       bnez    %4, 0b                                  \n"     \
15559 +       "        srl    %5, %1, 0x1f                            \n"     \
15560         "       .set    pop"                                            \
15561         : "=&r" (__mod32), "=&r" (__tmp),                               \
15562           "=&r" (__quot32), "=&r" (__cf),                               \
15563           "=&r" (__i), "=&r" (__tmp2)                                   \
15564 -       : "Jr" (base), "0" (__high), "1" (__low));                      \
15565 +       : "Jr" (base), "0" (high), "1" (low));                          \
15566                                                                         \
15567 -       (__n) = __quot32;                                               \
15568 +       (res) = __quot32;                                               \
15569         __mod32;                                                        \
15570  })
15572 -#endif /* BITS_PER_LONG == 64 */
15573 +#define __div64_32(n, base) ({                                         \
15574 +       unsigned long __upper, __low, __high, __radix;                  \
15575 +       unsigned long long __quot;                                      \
15576 +       unsigned long long __div;                                       \
15577 +       unsigned long __mod;                                            \
15578 +                                                                       \
15579 +       __div = (*n);                                                   \
15580 +       __radix = (base);                                               \
15581 +                                                                       \
15582 +       __high = __div >> 32;                                           \
15583 +       __low = __div;                                                  \
15584 +                                                                       \
15585 +       if (__high < __radix) {                                         \
15586 +               __upper = __high;                                       \
15587 +               __high = 0;                                             \
15588 +       } else {                                                        \
15589 +               __upper = __high % __radix;                             \
15590 +               __high /= __radix;                                      \
15591 +       }                                                               \
15592 +                                                                       \
15593 +       __mod = do_div64_32(__low, __upper, __low, __radix);            \
15594 +                                                                       \
15595 +       __quot = __high;                                                \
15596 +       __quot = __quot << 32 | __low;                                  \
15597 +       (*n) = __quot;                                                  \
15598 +       __mod;                                                          \
15601 +#endif /* BITS_PER_LONG == 32 */
15603 +#include <asm-generic/div64.h>
15605  #endif /* __ASM_DIV64_H */
15606 diff --git a/arch/mips/include/asm/vdso/gettimeofday.h b/arch/mips/include/asm/vdso/gettimeofday.h
15607 index 2203e2d0ae2a..44a45f3fa4b0 100644
15608 --- a/arch/mips/include/asm/vdso/gettimeofday.h
15609 +++ b/arch/mips/include/asm/vdso/gettimeofday.h
15610 @@ -20,6 +20,12 @@
15612  #define VDSO_HAS_CLOCK_GETRES          1
15614 +#if MIPS_ISA_REV < 6
15615 +#define VDSO_SYSCALL_CLOBBERS "hi", "lo",
15616 +#else
15617 +#define VDSO_SYSCALL_CLOBBERS
15618 +#endif
15620  static __always_inline long gettimeofday_fallback(
15621                                 struct __kernel_old_timeval *_tv,
15622                                 struct timezone *_tz)
15623 @@ -35,7 +41,9 @@ static __always_inline long gettimeofday_fallback(
15624         : "=r" (ret), "=r" (error)
15625         : "r" (tv), "r" (tz), "r" (nr)
15626         : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
15627 -         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
15628 +         "$14", "$15", "$24", "$25",
15629 +         VDSO_SYSCALL_CLOBBERS
15630 +         "memory");
15632         return error ? -ret : ret;
15634 @@ -59,7 +67,9 @@ static __always_inline long clock_gettime_fallback(
15635         : "=r" (ret), "=r" (error)
15636         : "r" (clkid), "r" (ts), "r" (nr)
15637         : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
15638 -         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
15639 +         "$14", "$15", "$24", "$25",
15640 +         VDSO_SYSCALL_CLOBBERS
15641 +         "memory");
15643         return error ? -ret : ret;
15645 @@ -83,7 +93,9 @@ static __always_inline int clock_getres_fallback(
15646         : "=r" (ret), "=r" (error)
15647         : "r" (clkid), "r" (ts), "r" (nr)
15648         : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
15649 -         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
15650 +         "$14", "$15", "$24", "$25",
15651 +         VDSO_SYSCALL_CLOBBERS
15652 +         "memory");
15654         return error ? -ret : ret;
15656 @@ -105,7 +117,9 @@ static __always_inline long clock_gettime32_fallback(
15657         : "=r" (ret), "=r" (error)
15658         : "r" (clkid), "r" (ts), "r" (nr)
15659         : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
15660 -         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
15661 +         "$14", "$15", "$24", "$25",
15662 +         VDSO_SYSCALL_CLOBBERS
15663 +         "memory");
15665         return error ? -ret : ret;
15667 @@ -125,7 +139,9 @@ static __always_inline int clock_getres32_fallback(
15668         : "=r" (ret), "=r" (error)
15669         : "r" (clkid), "r" (ts), "r" (nr)
15670         : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
15671 -         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
15672 +         "$14", "$15", "$24", "$25",
15673 +         VDSO_SYSCALL_CLOBBERS
15674 +         "memory");
15676         return error ? -ret : ret;
15678 diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
15679 index b71892064f27..0ef240adefb5 100644
15680 --- a/arch/mips/kernel/cpu-probe.c
15681 +++ b/arch/mips/kernel/cpu-probe.c
15682 @@ -1752,7 +1752,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
15683                         set_isa(c, MIPS_CPU_ISA_M64R2);
15684                         break;
15685                 }
15686 -               c->writecombine = _CACHE_UNCACHED_ACCELERATED;
15687                 c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_EXT |
15688                                 MIPS_ASE_LOONGSON_EXT2);
15689                 break;
15690 @@ -1782,7 +1781,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
15691                  * register, we correct it here.
15692                  */
15693                 c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
15694 -               c->writecombine = _CACHE_UNCACHED_ACCELERATED;
15695                 c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
15696                         MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
15697                 c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */
15698 @@ -1793,7 +1791,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
15699                 set_elf_platform(cpu, "loongson3a");
15700                 set_isa(c, MIPS_CPU_ISA_M64R2);
15701                 decode_cpucfg(c);
15702 -               c->writecombine = _CACHE_UNCACHED_ACCELERATED;
15703                 break;
15704         default:
15705                 panic("Unknown Loongson Processor ID!");
15706 diff --git a/arch/mips/loongson64/init.c b/arch/mips/loongson64/init.c
15707 index cfa788bca871..1c664b23c0f9 100644
15708 --- a/arch/mips/loongson64/init.c
15709 +++ b/arch/mips/loongson64/init.c
15710 @@ -126,7 +126,7 @@ static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, resource_size_
15711                 return -ENOMEM;
15713         range->fwnode = fwnode;
15714 -       range->size = size;
15715 +       range->size = size = round_up(size, PAGE_SIZE);
15716         range->hw_start = hw_start;
15717         range->flags = LOGIC_PIO_CPU_MMIO;
15719 diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
15720 index 39052de915f3..3a909194284a 100644
15721 --- a/arch/mips/pci/pci-legacy.c
15722 +++ b/arch/mips/pci/pci-legacy.c
15723 @@ -166,8 +166,13 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
15724                         res = hose->mem_resource;
15725                         break;
15726                 }
15727 -               if (res != NULL)
15728 -                       of_pci_range_to_resource(&range, node, res);
15729 +               if (res != NULL) {
15730 +                       res->name = node->full_name;
15731 +                       res->flags = range.flags;
15732 +                       res->start = range.cpu_addr;
15733 +                       res->end = range.cpu_addr + range.size - 1;
15734 +                       res->parent = res->child = res->sibling = NULL;
15735 +               }
15736         }
15739 diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
15740 index d36061603752..e032932348d6 100644
15741 --- a/arch/mips/pci/pci-mt7620.c
15742 +++ b/arch/mips/pci/pci-mt7620.c
15743 @@ -30,6 +30,7 @@
15744  #define RALINK_GPIOMODE                        0x60
15746  #define PPLL_CFG1                      0x9c
15747 +#define PPLL_LD                                BIT(23)
15749  #define PPLL_DRV                       0xa0
15750  #define PDRV_SW_SET                    BIT(31)
15751 @@ -239,8 +240,8 @@ static int mt7620_pci_hw_init(struct platform_device *pdev)
15752         rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1);
15753         mdelay(100);
15755 -       if (!(rt_sysc_r32(PPLL_CFG1) & PDRV_SW_SET)) {
15756 -               dev_err(&pdev->dev, "MT7620 PPLL unlock\n");
15757 +       if (!(rt_sysc_r32(PPLL_CFG1) & PPLL_LD)) {
15758 +               dev_err(&pdev->dev, "pcie PLL not locked, aborting init\n");
15759                 reset_control_assert(rstpcie0);
15760                 rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1);
15761                 return -1;
15762 diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c
15763 index e1f12e398136..f1538d2be89e 100644
15764 --- a/arch/mips/pci/pci-rt2880.c
15765 +++ b/arch/mips/pci/pci-rt2880.c
15766 @@ -180,7 +180,6 @@ static inline void rt2880_pci_write_u32(unsigned long reg, u32 val)
15768  int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
15770 -       u16 cmd;
15771         int irq = -1;
15773         if (dev->bus->number != 0)
15774 @@ -188,8 +187,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
15776         switch (PCI_SLOT(dev->devfn)) {
15777         case 0x00:
15778 -               rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000);
15779 -               (void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0);
15780                 break;
15781         case 0x11:
15782                 irq = RT288X_CPU_IRQ_PCI;
15783 @@ -201,16 +198,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
15784                 break;
15785         }
15787 -       pci_write_config_byte((struct pci_dev *) dev,
15788 -               PCI_CACHE_LINE_SIZE, 0x14);
15789 -       pci_write_config_byte((struct pci_dev *) dev, PCI_LATENCY_TIMER, 0xFF);
15790 -       pci_read_config_word((struct pci_dev *) dev, PCI_COMMAND, &cmd);
15791 -       cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
15792 -               PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK |
15793 -               PCI_COMMAND_SERR | PCI_COMMAND_WAIT | PCI_COMMAND_PARITY;
15794 -       pci_write_config_word((struct pci_dev *) dev, PCI_COMMAND, cmd);
15795 -       pci_write_config_byte((struct pci_dev *) dev, PCI_INTERRUPT_LINE,
15796 -                             dev->irq);
15797         return irq;
15800 @@ -251,6 +238,30 @@ static int rt288x_pci_probe(struct platform_device *pdev)
15802  int pcibios_plat_dev_init(struct pci_dev *dev)
15804 +       static bool slot0_init;
15806 +       /*
15807 +        * Nobody seems to initialize slot 0, but this platform requires it, so
15808 +        * do it once when some other slot is being enabled. The PCI subsystem
15809 +        * should configure other slots properly, so no need to do anything
15810 +        * special for those.
15811 +        */
15812 +       if (!slot0_init && dev->bus->number == 0) {
15813 +               u16 cmd;
15814 +               u32 bar0;
15816 +               slot0_init = true;
15818 +               pci_bus_write_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0,
15819 +                                          0x08000000);
15820 +               pci_bus_read_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0,
15821 +                                         &bar0);
15823 +               pci_bus_read_config_word(dev->bus, 0, PCI_COMMAND, &cmd);
15824 +               cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
15825 +               pci_bus_write_config_word(dev->bus, 0, PCI_COMMAND, cmd);
15826 +       }
15828         return 0;
15831 diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
15832 index 2416a9f91533..c6f9e7b9f7cb 100644
15833 --- a/arch/openrisc/kernel/setup.c
15834 +++ b/arch/openrisc/kernel/setup.c
15835 @@ -278,6 +278,8 @@ void calibrate_delay(void)
15836         pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
15837                 loops_per_jiffy / (500000 / HZ),
15838                 (loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy);
15840 +       of_node_put(cpu);
15843  void __init setup_arch(char **cmdline_p)
15844 diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
15845 index bf9b2310fc93..f3fa02b8838a 100644
15846 --- a/arch/openrisc/mm/init.c
15847 +++ b/arch/openrisc/mm/init.c
15848 @@ -75,7 +75,6 @@ static void __init map_ram(void)
15849         /* These mark extents of read-only kernel pages...
15850          * ...from vmlinux.lds.S
15851          */
15852 -       struct memblock_region *region;
15854         v = PAGE_OFFSET;
15856 @@ -121,7 +120,7 @@ static void __init map_ram(void)
15857                 }
15859                 printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
15860 -                      region->base, region->base + region->size);
15861 +                      start, end);
15862         }
15865 diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
15866 index 386ae12d8523..57c0ab71d51e 100644
15867 --- a/arch/powerpc/Kconfig
15868 +++ b/arch/powerpc/Kconfig
15869 @@ -224,7 +224,7 @@ config PPC
15870         select HAVE_LIVEPATCH                   if HAVE_DYNAMIC_FTRACE_WITH_REGS
15871         select HAVE_MOD_ARCH_SPECIFIC
15872         select HAVE_NMI                         if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
15873 -       select HAVE_HARDLOCKUP_DETECTOR_ARCH    if (PPC64 && PPC_BOOK3S)
15874 +       select HAVE_HARDLOCKUP_DETECTOR_ARCH    if PPC64 && PPC_BOOK3S && SMP
15875         select HAVE_OPTPROBES                   if PPC64
15876         select HAVE_PERF_EVENTS
15877         select HAVE_PERF_EVENTS_NMI             if PPC64
15878 diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
15879 index ae084357994e..6342f9da4545 100644
15880 --- a/arch/powerpc/Kconfig.debug
15881 +++ b/arch/powerpc/Kconfig.debug
15882 @@ -353,6 +353,7 @@ config PPC_EARLY_DEBUG_CPM_ADDR
15883  config FAIL_IOMMU
15884         bool "Fault-injection capability for IOMMU"
15885         depends on FAULT_INJECTION
15886 +       depends on PCI || IBMVIO
15887         help
15888           Provide fault-injection capability for IOMMU. Each device can
15889           be selectively enabled via the fail_iommu property.
15890 diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
15891 index 058601efbc8a..b703330459b8 100644
15892 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
15893 +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
15894 @@ -7,6 +7,7 @@
15895  #ifndef __ASSEMBLY__
15896  #include <linux/mmdebug.h>
15897  #include <linux/bug.h>
15898 +#include <linux/sizes.h>
15899  #endif
15901  /*
15902 @@ -323,7 +324,8 @@ extern unsigned long pci_io_base;
15903  #define  PHB_IO_END    (KERN_IO_START + FULL_IO_SIZE)
15904  #define IOREMAP_BASE   (PHB_IO_END)
15905  #define IOREMAP_START  (ioremap_bot)
15906 -#define IOREMAP_END    (KERN_IO_END)
15907 +#define IOREMAP_END    (KERN_IO_END - FIXADDR_SIZE)
15908 +#define FIXADDR_SIZE   SZ_32M
15910  /* Advertise special mapping type for AGP */
15911  #define HAVE_PAGE_AGP
15912 diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
15913 index c7813dc628fc..59cab558e2f0 100644
15914 --- a/arch/powerpc/include/asm/book3s/64/radix.h
15915 +++ b/arch/powerpc/include/asm/book3s/64/radix.h
15916 @@ -222,8 +222,10 @@ static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr,
15917          * from ptesync, it should probably go into update_mmu_cache, rather
15918          * than set_pte_at (which is used to set ptes unrelated to faults).
15919          *
15920 -        * Spurious faults to vmalloc region are not tolerated, so there is
15921 -        * a ptesync in flush_cache_vmap.
15922 +        * Spurious faults from the kernel memory are not tolerated, so there
15923 +        * is a ptesync in flush_cache_vmap, and __map_kernel_page() follows
15924 +        * the pte update sequence from ISA Book III 6.10 Translation Table
15925 +        * Update Synchronization Requirements.
15926          */
15929 diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
15930 index 8d03c16a3663..947b5b9c4424 100644
15931 --- a/arch/powerpc/include/asm/fixmap.h
15932 +++ b/arch/powerpc/include/asm/fixmap.h
15933 @@ -23,12 +23,17 @@
15934  #include <asm/kmap_size.h>
15935  #endif
15937 +#ifdef CONFIG_PPC64
15938 +#define FIXADDR_TOP    (IOREMAP_END + FIXADDR_SIZE)
15939 +#else
15940 +#define FIXADDR_SIZE   0
15941  #ifdef CONFIG_KASAN
15942  #include <asm/kasan.h>
15943  #define FIXADDR_TOP    (KASAN_SHADOW_START - PAGE_SIZE)
15944  #else
15945  #define FIXADDR_TOP    ((unsigned long)(-PAGE_SIZE))
15946  #endif
15947 +#endif
15949  /*
15950   * Here we define all the compile-time 'special' virtual
15951 @@ -50,6 +55,7 @@
15952   */
15953  enum fixed_addresses {
15954         FIX_HOLE,
15955 +#ifdef CONFIG_PPC32
15956         /* reserve the top 128K for early debugging purposes */
15957         FIX_EARLY_DEBUG_TOP = FIX_HOLE,
15958         FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
15959 @@ -72,6 +78,7 @@ enum fixed_addresses {
15960                        FIX_IMMR_SIZE,
15961  #endif
15962         /* FIX_PCIE_MCFG, */
15963 +#endif /* CONFIG_PPC32 */
15964         __end_of_permanent_fixed_addresses,
15966  #define NR_FIX_BTMAPS          (SZ_256K / PAGE_SIZE)
15967 @@ -98,6 +105,8 @@ enum fixed_addresses {
15968  static inline void __set_fixmap(enum fixed_addresses idx,
15969                                 phys_addr_t phys, pgprot_t flags)
15971 +       BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC64) && __FIXADDR_SIZE > FIXADDR_SIZE);
15973         if (__builtin_constant_p(idx))
15974                 BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
15975         else if (WARN_ON(idx >= __end_of_fixed_addresses))
15976 diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
15977 index ed6086d57b22..0c92b01a3c3c 100644
15978 --- a/arch/powerpc/include/asm/hvcall.h
15979 +++ b/arch/powerpc/include/asm/hvcall.h
15980 @@ -446,6 +446,9 @@
15981   */
15982  long plpar_hcall_norets(unsigned long opcode, ...);
15984 +/* Variant which does not do hcall tracing */
15985 +long plpar_hcall_norets_notrace(unsigned long opcode, ...);
15987  /**
15988   * plpar_hcall: - Make a pseries hypervisor call
15989   * @opcode: The hypervisor call to make.
15990 diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
15991 index e8d09a841373..31ed5356590a 100644
15992 --- a/arch/powerpc/include/asm/interrupt.h
15993 +++ b/arch/powerpc/include/asm/interrupt.h
15994 @@ -138,6 +138,13 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
15995         local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
15996         local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
15998 +       if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !(regs->msr & MSR_PR) &&
15999 +                               regs->nip < (unsigned long)__end_interrupts) {
16000 +               // Kernel code running below __end_interrupts is
16001 +               // implicitly soft-masked.
16002 +               regs->softe = IRQS_ALL_DISABLED;
16003 +       }
16005         /* Don't do any per-CPU operations until interrupt state is fixed */
16006  #endif
16007         /* Allow DEC and PMI to be traced when they are soft-NMI */
16008 diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
16009 index 652ce85f9410..4bc45d3ed8b0 100644
16010 --- a/arch/powerpc/include/asm/mmu_context.h
16011 +++ b/arch/powerpc/include/asm/mmu_context.h
16012 @@ -263,7 +263,7 @@ extern void arch_exit_mmap(struct mm_struct *mm);
16013  static inline void arch_unmap(struct mm_struct *mm,
16014                               unsigned long start, unsigned long end)
16016 -       unsigned long vdso_base = (unsigned long)mm->context.vdso - PAGE_SIZE;
16017 +       unsigned long vdso_base = (unsigned long)mm->context.vdso;
16019         if (start <= vdso_base && vdso_base < end)
16020                 mm->context.vdso = NULL;
16021 diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
16022 index 6cb8aa357191..57cd3892bfe0 100644
16023 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h
16024 +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
16025 @@ -6,6 +6,8 @@
16026   * the ppc64 non-hashed page table.
16027   */
16029 +#include <linux/sizes.h>
16031  #include <asm/nohash/64/pgtable-4k.h>
16032  #include <asm/barrier.h>
16033  #include <asm/asm-const.h>
16034 @@ -54,7 +56,8 @@
16035  #define  PHB_IO_END    (KERN_IO_START + FULL_IO_SIZE)
16036  #define IOREMAP_BASE   (PHB_IO_END)
16037  #define IOREMAP_START  (ioremap_bot)
16038 -#define IOREMAP_END    (KERN_VIRT_START + KERN_VIRT_SIZE)
16039 +#define IOREMAP_END    (KERN_VIRT_START + KERN_VIRT_SIZE - FIXADDR_SIZE)
16040 +#define FIXADDR_SIZE   SZ_32M
16043  /*
16044 diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h
16045 index 5d1726bb28e7..bcb7b5f917be 100644
16046 --- a/arch/powerpc/include/asm/paravirt.h
16047 +++ b/arch/powerpc/include/asm/paravirt.h
16048 @@ -28,19 +28,35 @@ static inline u32 yield_count_of(int cpu)
16049         return be32_to_cpu(yield_count);
16053 + * Spinlock code confers and prods, so don't trace the hcalls because the
16054 + * tracing code takes spinlocks which can cause recursion deadlocks.
16055 + *
16056 + * These calls are made while the lock is not held: the lock slowpath yields if
16057 + * it can not acquire the lock, and unlock slow path might prod if a waiter has
16058 + * yielded). So this may not be a problem for simple spin locks because the
16059 + * tracing does not technically recurse on the lock, but we avoid it anyway.
16060 + *
16061 + * However the queued spin lock contended path is more strictly ordered: the
16062 + * H_CONFER hcall is made after the task has queued itself on the lock, so then
16063 + * recursing on that lock will cause the task to then queue up again behind the
16064 + * first instance (or worse: queued spinlocks use tricks that assume a context
16065 + * never waits on more than one spinlock, so such recursion may cause random
16066 + * corruption in the lock code).
16067 + */
16068  static inline void yield_to_preempted(int cpu, u32 yield_count)
16070 -       plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
16071 +       plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
16074  static inline void prod_cpu(int cpu)
16076 -       plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
16077 +       plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
16080  static inline void yield_to_any(void)
16082 -       plpar_hcall_norets(H_CONFER, -1, 0);
16083 +       plpar_hcall_norets_notrace(H_CONFER, -1, 0);
16085  #else
16086  static inline bool is_shared_processor(void)
16087 diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
16088 index 1499e928ea6a..5d8d397e928a 100644
16089 --- a/arch/powerpc/include/asm/ptrace.h
16090 +++ b/arch/powerpc/include/asm/ptrace.h
16091 @@ -19,6 +19,7 @@
16092  #ifndef _ASM_POWERPC_PTRACE_H
16093  #define _ASM_POWERPC_PTRACE_H
16095 +#include <linux/err.h>
16096  #include <uapi/asm/ptrace.h>
16097  #include <asm/asm-const.h>
16099 @@ -152,25 +153,6 @@ extern unsigned long profile_pc(struct pt_regs *regs);
16100  long do_syscall_trace_enter(struct pt_regs *regs);
16101  void do_syscall_trace_leave(struct pt_regs *regs);
16103 -#define kernel_stack_pointer(regs) ((regs)->gpr[1])
16104 -static inline int is_syscall_success(struct pt_regs *regs)
16106 -       return !(regs->ccr & 0x10000000);
16109 -static inline long regs_return_value(struct pt_regs *regs)
16111 -       if (is_syscall_success(regs))
16112 -               return regs->gpr[3];
16113 -       else
16114 -               return -regs->gpr[3];
16117 -static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
16119 -       regs->gpr[3] = rc;
16122  #ifdef __powerpc64__
16123  #define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
16124  #else
16125 @@ -252,6 +234,31 @@ static inline void set_trap_norestart(struct pt_regs *regs)
16126         regs->trap |= 0x10;
16129 +#define kernel_stack_pointer(regs) ((regs)->gpr[1])
16130 +static inline int is_syscall_success(struct pt_regs *regs)
16132 +       if (trap_is_scv(regs))
16133 +               return !IS_ERR_VALUE((unsigned long)regs->gpr[3]);
16134 +       else
16135 +               return !(regs->ccr & 0x10000000);
16138 +static inline long regs_return_value(struct pt_regs *regs)
16140 +       if (trap_is_scv(regs))
16141 +               return regs->gpr[3];
16143 +       if (is_syscall_success(regs))
16144 +               return regs->gpr[3];
16145 +       else
16146 +               return -regs->gpr[3];
16149 +static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
16151 +       regs->gpr[3] = rc;
16154  #define arch_has_single_step() (1)
16155  #define arch_has_block_step()  (true)
16156  #define ARCH_HAS_USER_SINGLE_STEP_REPORT
16157 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
16158 index da103e92c112..37d0b8c76a59 100644
16159 --- a/arch/powerpc/include/asm/reg.h
16160 +++ b/arch/powerpc/include/asm/reg.h
16161 @@ -441,6 +441,7 @@
16162  #define   LPCR_VRMA_LP1                ASM_CONST(0x0000800000000000)
16163  #define   LPCR_RMLS            0x1C000000      /* Implementation dependent RMO limit sel */
16164  #define   LPCR_RMLS_SH         26
16165 +#define   LPCR_HAIL            ASM_CONST(0x0000000004000000)   /* HV AIL (ISAv3.1) */
16166  #define   LPCR_ILE             ASM_CONST(0x0000000002000000)   /* !HV irqs set MSR:LE */
16167  #define   LPCR_AIL             ASM_CONST(0x0000000001800000)   /* Alternate interrupt location */
16168  #define   LPCR_AIL_0           ASM_CONST(0x0000000000000000)   /* MMU off exception offset 0x0 */
16169 diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
16170 index 7a13bc20f0a0..47081a9e13ca 100644
16171 --- a/arch/powerpc/include/asm/smp.h
16172 +++ b/arch/powerpc/include/asm/smp.h
16173 @@ -121,6 +121,11 @@ static inline struct cpumask *cpu_sibling_mask(int cpu)
16174         return per_cpu(cpu_sibling_map, cpu);
16177 +static inline struct cpumask *cpu_core_mask(int cpu)
16179 +       return per_cpu(cpu_core_map, cpu);
16182  static inline struct cpumask *cpu_l2_cache_mask(int cpu)
16184         return per_cpu(cpu_l2_cache_map, cpu);
16185 diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
16186 index fd1b518eed17..ba0f88f3a30d 100644
16187 --- a/arch/powerpc/include/asm/syscall.h
16188 +++ b/arch/powerpc/include/asm/syscall.h
16189 @@ -41,11 +41,17 @@ static inline void syscall_rollback(struct task_struct *task,
16190  static inline long syscall_get_error(struct task_struct *task,
16191                                      struct pt_regs *regs)
16193 -       /*
16194 -        * If the system call failed,
16195 -        * regs->gpr[3] contains a positive ERRORCODE.
16196 -        */
16197 -       return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
16198 +       if (trap_is_scv(regs)) {
16199 +               unsigned long error = regs->gpr[3];
16201 +               return IS_ERR_VALUE(error) ? error : 0;
16202 +       } else {
16203 +               /*
16204 +                * If the system call failed,
16205 +                * regs->gpr[3] contains a positive ERRORCODE.
16206 +                */
16207 +               return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
16208 +       }
16211  static inline long syscall_get_return_value(struct task_struct *task,
16212 @@ -58,18 +64,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
16213                                             struct pt_regs *regs,
16214                                             int error, long val)
16216 -       /*
16217 -        * In the general case it's not obvious that we must deal with CCR
16218 -        * here, as the syscall exit path will also do that for us. However
16219 -        * there are some places, eg. the signal code, which check ccr to
16220 -        * decide if the value in r3 is actually an error.
16221 -        */
16222 -       if (error) {
16223 -               regs->ccr |= 0x10000000L;
16224 -               regs->gpr[3] = error;
16225 +       if (trap_is_scv(regs)) {
16226 +               regs->gpr[3] = (long) error ?: val;
16227         } else {
16228 -               regs->ccr &= ~0x10000000L;
16229 -               regs->gpr[3] = val;
16230 +               /*
16231 +                * In the general case it's not obvious that we must deal with
16232 +                * CCR here, as the syscall exit path will also do that for us.
16233 +                * However there are some places, eg. the signal code, which
16234 +                * check ccr to decide if the value in r3 is actually an error.
16235 +                */
16236 +               if (error) {
16237 +                       regs->ccr |= 0x10000000L;
16238 +                       regs->gpr[3] = error;
16239 +               } else {
16240 +                       regs->ccr &= ~0x10000000L;
16241 +                       regs->gpr[3] = val;
16242 +               }
16243         }
16246 diff --git a/arch/powerpc/include/uapi/asm/errno.h b/arch/powerpc/include/uapi/asm/errno.h
16247 index cc79856896a1..4ba87de32be0 100644
16248 --- a/arch/powerpc/include/uapi/asm/errno.h
16249 +++ b/arch/powerpc/include/uapi/asm/errno.h
16250 @@ -2,6 +2,7 @@
16251  #ifndef _ASM_POWERPC_ERRNO_H
16252  #define _ASM_POWERPC_ERRNO_H
16254 +#undef EDEADLOCK
16255  #include <asm-generic/errno.h>
16257  #undef EDEADLOCK
16258 diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
16259 index cd60bc1c8701..7040e430a124 100644
16260 --- a/arch/powerpc/kernel/eeh.c
16261 +++ b/arch/powerpc/kernel/eeh.c
16262 @@ -362,14 +362,11 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
16263         pa = pte_pfn(*ptep);
16265         /* On radix we can do hugepage mappings for io, so handle that */
16266 -       if (hugepage_shift) {
16267 -               pa <<= hugepage_shift;
16268 -               pa |= token & ((1ul << hugepage_shift) - 1);
16269 -       } else {
16270 -               pa <<= PAGE_SHIFT;
16271 -               pa |= token & (PAGE_SIZE - 1);
16272 -       }
16273 +       if (!hugepage_shift)
16274 +               hugepage_shift = PAGE_SHIFT;
16276 +       pa <<= PAGE_SHIFT;
16277 +       pa |= token & ((1ul << hugepage_shift) - 1);
16278         return pa;
16281 diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
16282 index 8482739d42f3..eddf362caedc 100644
16283 --- a/arch/powerpc/kernel/fadump.c
16284 +++ b/arch/powerpc/kernel/fadump.c
16285 @@ -292,7 +292,7 @@ static void fadump_show_config(void)
16286   * that is required for a kernel to boot successfully.
16287   *
16288   */
16289 -static inline u64 fadump_calculate_reserve_size(void)
16290 +static __init u64 fadump_calculate_reserve_size(void)
16292         u64 base, size, bootmem_min;
16293         int ret;
16294 diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
16295 index 5d4706c14572..cf8ca08295bf 100644
16296 --- a/arch/powerpc/kernel/head_32.h
16297 +++ b/arch/powerpc/kernel/head_32.h
16298 @@ -261,11 +261,7 @@
16299         lis     r1, emergency_ctx@ha
16300  #endif
16301         lwz     r1, emergency_ctx@l(r1)
16302 -       cmpwi   cr1, r1, 0
16303 -       bne     cr1, 1f
16304 -       lis     r1, init_thread_union@ha
16305 -       addi    r1, r1, init_thread_union@l
16306 -1:     addi    r1, r1, THREAD_SIZE - INT_FRAME_SIZE
16307 +       addi    r1, r1, THREAD_SIZE - INT_FRAME_SIZE
16308         EXCEPTION_PROLOG_2
16309         SAVE_NVGPRS(r11)
16310         addi    r3, r1, STACK_FRAME_OVERHEAD
16311 diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
16312 index c475a229a42a..352346e14a08 100644
16313 --- a/arch/powerpc/kernel/interrupt.c
16314 +++ b/arch/powerpc/kernel/interrupt.c
16315 @@ -34,11 +34,11 @@ notrace long system_call_exception(long r3, long r4, long r5,
16316         if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
16317                 BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
16319 +       trace_hardirqs_off(); /* finish reconciling */
16321         CT_WARN_ON(ct_state() == CONTEXT_KERNEL);
16322         user_exit_irqoff();
16324 -       trace_hardirqs_off(); /* finish reconciling */
16326         if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x))
16327                 BUG_ON(!(regs->msr & MSR_RI));
16328         BUG_ON(!(regs->msr & MSR_PR));
16329 diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
16330 index c00214a4355c..4023f91defa6 100644
16331 --- a/arch/powerpc/kernel/iommu.c
16332 +++ b/arch/powerpc/kernel/iommu.c
16333 @@ -1096,7 +1096,7 @@ int iommu_take_ownership(struct iommu_table *tbl)
16335         spin_lock_irqsave(&tbl->large_pool.lock, flags);
16336         for (i = 0; i < tbl->nr_pools; i++)
16337 -               spin_lock(&tbl->pools[i].lock);
16338 +               spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
16340         iommu_table_release_pages(tbl);
16342 @@ -1124,7 +1124,7 @@ void iommu_release_ownership(struct iommu_table *tbl)
16344         spin_lock_irqsave(&tbl->large_pool.lock, flags);
16345         for (i = 0; i < tbl->nr_pools; i++)
16346 -               spin_lock(&tbl->pools[i].lock);
16347 +               spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
16349         memset(tbl->it_map, 0, sz);
16351 diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
16352 index 9a4797d1d40d..a8b2d6bfc1ca 100644
16353 --- a/arch/powerpc/kernel/prom.c
16354 +++ b/arch/powerpc/kernel/prom.c
16355 @@ -267,7 +267,7 @@ static struct feature_property {
16356  };
16358  #if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
16359 -static inline void identical_pvr_fixup(unsigned long node)
16360 +static __init void identical_pvr_fixup(unsigned long node)
16362         unsigned int pvr;
16363         const char *model = of_get_flat_dt_prop(node, "model", NULL);
16364 diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
16365 index 8ba49a6bf515..d7c1f92152af 100644
16366 --- a/arch/powerpc/kernel/setup_32.c
16367 +++ b/arch/powerpc/kernel/setup_32.c
16368 @@ -164,7 +164,7 @@ void __init irqstack_early_init(void)
16371  #ifdef CONFIG_VMAP_STACK
16372 -void *emergency_ctx[NR_CPUS] __ro_after_init;
16373 +void *emergency_ctx[NR_CPUS] __ro_after_init = {[0] = &init_stack};
16375  void __init emergency_stack_init(void)
16377 diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
16378 index 560ed8b975e7..c914fe8a2c67 100644
16379 --- a/arch/powerpc/kernel/setup_64.c
16380 +++ b/arch/powerpc/kernel/setup_64.c
16381 @@ -232,10 +232,23 @@ static void cpu_ready_for_interrupts(void)
16382          * If we are not in hypervisor mode the job is done once for
16383          * the whole partition in configure_exceptions().
16384          */
16385 -       if (cpu_has_feature(CPU_FTR_HVMODE) &&
16386 -           cpu_has_feature(CPU_FTR_ARCH_207S)) {
16387 +       if (cpu_has_feature(CPU_FTR_HVMODE)) {
16388                 unsigned long lpcr = mfspr(SPRN_LPCR);
16389 -               mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
16390 +               unsigned long new_lpcr = lpcr;
16392 +               if (cpu_has_feature(CPU_FTR_ARCH_31)) {
16393 +                       /* P10 DD1 does not have HAIL */
16394 +                       if (pvr_version_is(PVR_POWER10) &&
16395 +                                       (mfspr(SPRN_PVR) & 0xf00) == 0x100)
16396 +                               new_lpcr |= LPCR_AIL_3;
16397 +                       else
16398 +                               new_lpcr |= LPCR_HAIL;
16399 +               } else if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
16400 +                       new_lpcr |= LPCR_AIL_3;
16401 +               }
16403 +               if (new_lpcr != lpcr)
16404 +                       mtspr(SPRN_LPCR, new_lpcr);
16405         }
16407         /*
16408 @@ -356,11 +369,11 @@ void __init early_setup(unsigned long dt_ptr)
16409         apply_feature_fixups();
16410         setup_feature_keys();
16412 -       early_ioremap_setup();
16414         /* Initialize the hash table or TLB handling */
16415         early_init_mmu();
16417 +       early_ioremap_setup();
16419         /*
16420          * After firmware and early platform setup code has set things up,
16421          * we note the SPR values for configurable control/performance
16422 diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
16423 index 5a4d59a1070d..c2473e20f5f5 100644
16424 --- a/arch/powerpc/kernel/smp.c
16425 +++ b/arch/powerpc/kernel/smp.c
16426 @@ -1057,17 +1057,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
16427                                 local_memory_node(numa_cpu_lookup_table[cpu]));
16428                 }
16429  #endif
16430 -               /*
16431 -                * cpu_core_map is now more updated and exists only since
16432 -                * its been exported for long. It only will have a snapshot
16433 -                * of cpu_cpu_mask.
16434 -                */
16435 -               cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
16436         }
16438         /* Init the cpumasks so the boot CPU is related to itself */
16439         cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
16440         cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
16441 +       cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
16443         if (has_coregroup_support())
16444                 cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
16445 @@ -1408,6 +1403,9 @@ static void remove_cpu_from_masks(int cpu)
16446                         set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
16447         }
16449 +       for_each_cpu(i, cpu_core_mask(cpu))
16450 +               set_cpus_unrelated(cpu, i, cpu_core_mask);
16452         if (has_coregroup_support()) {
16453                 for_each_cpu(i, cpu_coregroup_mask(cpu))
16454                         set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
16455 @@ -1468,8 +1466,11 @@ static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
16457  static void add_cpu_to_masks(int cpu)
16459 +       struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
16460         int first_thread = cpu_first_thread_sibling(cpu);
16461 +       int chip_id = cpu_to_chip_id(cpu);
16462         cpumask_var_t mask;
16463 +       bool ret;
16464         int i;
16466         /*
16467 @@ -1485,12 +1486,36 @@ static void add_cpu_to_masks(int cpu)
16468         add_cpu_to_smallcore_masks(cpu);
16470         /* In CPU-hotplug path, hence use GFP_ATOMIC */
16471 -       alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
16472 +       ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
16473         update_mask_by_l2(cpu, &mask);
16475         if (has_coregroup_support())
16476                 update_coregroup_mask(cpu, &mask);
16478 +       if (chip_id == -1 || !ret) {
16479 +               cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
16480 +               goto out;
16481 +       }
16483 +       if (shared_caches)
16484 +               submask_fn = cpu_l2_cache_mask;
16486 +       /* Update core_mask with all the CPUs that are part of submask */
16487 +       or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask);
16489 +       /* Skip all CPUs already part of current CPU core mask */
16490 +       cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
16492 +       for_each_cpu(i, mask) {
16493 +               if (chip_id == cpu_to_chip_id(i)) {
16494 +                       or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
16495 +                       cpumask_andnot(mask, mask, submask_fn(i));
16496 +               } else {
16497 +                       cpumask_andnot(mask, mask, cpu_core_mask(i));
16498 +               }
16499 +       }
16501 +out:
16502         free_cpumask_var(mask);
16505 @@ -1521,6 +1546,9 @@ void start_secondary(void *unused)
16507         vdso_getcpu_init();
16508  #endif
16509 +       set_numa_node(numa_cpu_lookup_table[cpu]);
16510 +       set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
16512         /* Update topology CPU masks */
16513         add_cpu_to_masks(cpu);
16515 @@ -1539,9 +1567,6 @@ void start_secondary(void *unused)
16516                         shared_caches = true;
16517         }
16519 -       set_numa_node(numa_cpu_lookup_table[cpu]);
16520 -       set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
16522         smp_wmb();
16523         notify_cpu_starting(cpu);
16524         set_cpu_online(cpu, true);
16525 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
16526 index e839a906fdf2..b14907209822 100644
16527 --- a/arch/powerpc/kernel/vdso.c
16528 +++ b/arch/powerpc/kernel/vdso.c
16529 @@ -55,10 +55,10 @@ static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struc
16531         unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
16533 -       if (new_size != text_size + PAGE_SIZE)
16534 +       if (new_size != text_size)
16535                 return -EINVAL;
16537 -       current->mm->context.vdso = (void __user *)new_vma->vm_start + PAGE_SIZE;
16538 +       current->mm->context.vdso = (void __user *)new_vma->vm_start;
16540         return 0;
16542 @@ -73,6 +73,10 @@ static int vdso64_mremap(const struct vm_special_mapping *sm, struct vm_area_str
16543         return vdso_mremap(sm, new_vma, &vdso64_end - &vdso64_start);
16546 +static struct vm_special_mapping vvar_spec __ro_after_init = {
16547 +       .name = "[vvar]",
16550  static struct vm_special_mapping vdso32_spec __ro_after_init = {
16551         .name = "[vdso]",
16552         .mremap = vdso32_mremap,
16553 @@ -89,11 +93,11 @@ static struct vm_special_mapping vdso64_spec __ro_after_init = {
16554   */
16555  static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
16557 -       struct mm_struct *mm = current->mm;
16558 +       unsigned long vdso_size, vdso_base, mappings_size;
16559         struct vm_special_mapping *vdso_spec;
16560 +       unsigned long vvar_size = PAGE_SIZE;
16561 +       struct mm_struct *mm = current->mm;
16562         struct vm_area_struct *vma;
16563 -       unsigned long vdso_size;
16564 -       unsigned long vdso_base;
16566         if (is_32bit_task()) {
16567                 vdso_spec = &vdso32_spec;
16568 @@ -110,8 +114,8 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
16569                 vdso_base = 0;
16570         }
16572 -       /* Add a page to the vdso size for the data page */
16573 -       vdso_size += PAGE_SIZE;
16574 +       mappings_size = vdso_size + vvar_size;
16575 +       mappings_size += (VDSO_ALIGNMENT - 1) & PAGE_MASK;
16577         /*
16578          * pick a base address for the vDSO in process space. We try to put it
16579 @@ -119,9 +123,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
16580          * and end up putting it elsewhere.
16581          * Add enough to the size so that the result can be aligned.
16582          */
16583 -       vdso_base = get_unmapped_area(NULL, vdso_base,
16584 -                                     vdso_size + ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
16585 -                                     0, 0);
16586 +       vdso_base = get_unmapped_area(NULL, vdso_base, mappings_size, 0, 0);
16587         if (IS_ERR_VALUE(vdso_base))
16588                 return vdso_base;
16590 @@ -133,7 +135,13 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
16591          * install_special_mapping or the perf counter mmap tracking code
16592          * will fail to recognise it as a vDSO.
16593          */
16594 -       mm->context.vdso = (void __user *)vdso_base + PAGE_SIZE;
16595 +       mm->context.vdso = (void __user *)vdso_base + vvar_size;
16597 +       vma = _install_special_mapping(mm, vdso_base, vvar_size,
16598 +                                      VM_READ | VM_MAYREAD | VM_IO |
16599 +                                      VM_DONTDUMP | VM_PFNMAP, &vvar_spec);
16600 +       if (IS_ERR(vma))
16601 +               return PTR_ERR(vma);
16603         /*
16604          * our vma flags don't have VM_WRITE so by default, the process isn't
16605 @@ -145,9 +153,12 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
16606          * It's fine to use that for setting breakpoints in the vDSO code
16607          * pages though.
16608          */
16609 -       vma = _install_special_mapping(mm, vdso_base, vdso_size,
16610 +       vma = _install_special_mapping(mm, vdso_base + vvar_size, vdso_size,
16611                                        VM_READ | VM_EXEC | VM_MAYREAD |
16612                                        VM_MAYWRITE | VM_MAYEXEC, vdso_spec);
16613 +       if (IS_ERR(vma))
16614 +               do_munmap(mm, vdso_base, vvar_size, NULL);
16616         return PTR_ERR_OR_ZERO(vma);
16619 @@ -249,11 +260,22 @@ static struct page ** __init vdso_setup_pages(void *start, void *end)
16620         if (!pagelist)
16621                 panic("%s: Cannot allocate page list for VDSO", __func__);
16623 -       pagelist[0] = virt_to_page(vdso_data);
16625         for (i = 0; i < pages; i++)
16626 -               pagelist[i + 1] = virt_to_page(start + i * PAGE_SIZE);
16627 +               pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
16629 +       return pagelist;
16632 +static struct page ** __init vvar_setup_pages(void)
16634 +       struct page **pagelist;
16636 +       /* .pages is NULL-terminated */
16637 +       pagelist = kcalloc(2, sizeof(struct page *), GFP_KERNEL);
16638 +       if (!pagelist)
16639 +               panic("%s: Cannot allocate page list for VVAR", __func__);
16641 +       pagelist[0] = virt_to_page(vdso_data);
16642         return pagelist;
16645 @@ -295,6 +317,8 @@ static int __init vdso_init(void)
16646         if (IS_ENABLED(CONFIG_PPC64))
16647                 vdso64_spec.pages = vdso_setup_pages(&vdso64_start, &vdso64_end);
16649 +       vvar_spec.pages = vvar_setup_pages();
16651         smp_wmb();
16653         return 0;
16654 diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
16655 index 02b9e4d0dc40..a8a7cb71086b 100644
16656 --- a/arch/powerpc/kexec/file_load_64.c
16657 +++ b/arch/powerpc/kexec/file_load_64.c
16658 @@ -960,6 +960,93 @@ unsigned int kexec_fdt_totalsize_ppc64(struct kimage *image)
16659         return fdt_size;
16662 +/**
16663 + * add_node_props - Reads node properties from device node structure and add
16664 + *                  them to fdt.
16665 + * @fdt:            Flattened device tree of the kernel
16666 + * @node_offset:    offset of the node to add a property at
16667 + * @dn:             device node pointer
16668 + *
16669 + * Returns 0 on success, negative errno on error.
16670 + */
16671 +static int add_node_props(void *fdt, int node_offset, const struct device_node *dn)
16673 +       int ret = 0;
16674 +       struct property *pp;
16676 +       if (!dn)
16677 +               return -EINVAL;
16679 +       for_each_property_of_node(dn, pp) {
16680 +               ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length);
16681 +               if (ret < 0) {
16682 +                       pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret));
16683 +                       return ret;
16684 +               }
16685 +       }
16686 +       return ret;
16689 +/**
16690 + * update_cpus_node - Update cpus node of flattened device tree using of_root
16691 + *                    device node.
16692 + * @fdt:              Flattened device tree of the kernel.
16693 + *
16694 + * Returns 0 on success, negative errno on error.
16695 + */
16696 +static int update_cpus_node(void *fdt)
16698 +       struct device_node *cpus_node, *dn;
16699 +       int cpus_offset, cpus_subnode_offset, ret = 0;
16701 +       cpus_offset = fdt_path_offset(fdt, "/cpus");
16702 +       if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) {
16703 +               pr_err("Malformed device tree: error reading /cpus node: %s\n",
16704 +                      fdt_strerror(cpus_offset));
16705 +               return cpus_offset;
16706 +       }
16708 +       if (cpus_offset > 0) {
16709 +               ret = fdt_del_node(fdt, cpus_offset);
16710 +               if (ret < 0) {
16711 +                       pr_err("Error deleting /cpus node: %s\n", fdt_strerror(ret));
16712 +                       return -EINVAL;
16713 +               }
16714 +       }
16716 +       /* Add cpus node to fdt */
16717 +       cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), "cpus");
16718 +       if (cpus_offset < 0) {
16719 +               pr_err("Error creating /cpus node: %s\n", fdt_strerror(cpus_offset));
16720 +               return -EINVAL;
16721 +       }
16723 +       /* Add cpus node properties */
16724 +       cpus_node = of_find_node_by_path("/cpus");
16725 +       ret = add_node_props(fdt, cpus_offset, cpus_node);
16726 +       of_node_put(cpus_node);
16727 +       if (ret < 0)
16728 +               return ret;
16730 +       /* Loop through all subnodes of cpus and add them to fdt */
16731 +       for_each_node_by_type(dn, "cpu") {
16732 +               cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name);
16733 +               if (cpus_subnode_offset < 0) {
16734 +                       pr_err("Unable to add %s subnode: %s\n", dn->full_name,
16735 +                              fdt_strerror(cpus_subnode_offset));
16736 +                       ret = cpus_subnode_offset;
16737 +                       goto out;
16738 +               }
16740 +               ret = add_node_props(fdt, cpus_subnode_offset, dn);
16741 +               if (ret < 0)
16742 +                       goto out;
16743 +       }
16744 +out:
16745 +       of_node_put(dn);
16746 +       return ret;
16749  /**
16750   * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
16751   *                       being loaded.
16752 @@ -1020,6 +1107,11 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
16753                 }
16754         }
16756 +       /* Update cpus nodes information to account hotplug CPUs. */
16757 +       ret =  update_cpus_node(fdt);
16758 +       if (ret < 0)
16759 +               goto out;
16761         /* Update memory reserve map */
16762         ret = get_reserved_memory_ranges(&rmem);
16763         if (ret)
16764 diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
16765 index e452158a18d7..c3e31fef0be1 100644
16766 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c
16767 +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
16768 @@ -8,6 +8,7 @@
16769   */
16771  #include <linux/kvm_host.h>
16772 +#include <linux/pkeys.h>
16774  #include <asm/kvm_ppc.h>
16775  #include <asm/kvm_book3s.h>
16776 @@ -133,6 +134,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
16777         else
16778                 kvmppc_mmu_flush_icache(pfn);
16780 +       rflags |= pte_to_hpte_pkey_bits(0, HPTE_USE_KERNEL_KEY);
16781         rflags = (rflags & ~HPTE_R_WIMG) | orig_pte->wimg;
16783         /*
16784 diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
16785 index 13bad6bf4c95..208a053c9adf 100644
16786 --- a/arch/powerpc/kvm/book3s_hv.c
16787 +++ b/arch/powerpc/kvm/book3s_hv.c
16788 @@ -3728,7 +3728,10 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
16789         vcpu->arch.dec_expires = dec + tb;
16790         vcpu->cpu = -1;
16791         vcpu->arch.thread_cpu = -1;
16792 +       /* Save guest CTRL register, set runlatch to 1 */
16793         vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
16794 +       if (!(vcpu->arch.ctrl & 1))
16795 +               mtspr(SPRN_CTRLT, vcpu->arch.ctrl | 1);
16797         vcpu->arch.iamr = mfspr(SPRN_IAMR);
16798         vcpu->arch.pspb = mfspr(SPRN_PSPB);
16799 diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
16800 index d4efc182662a..248f7c9e36fc 100644
16801 --- a/arch/powerpc/lib/Makefile
16802 +++ b/arch/powerpc/lib/Makefile
16803 @@ -5,6 +5,9 @@
16805  ccflags-$(CONFIG_PPC64)        := $(NO_MINIMAL_TOC)
16807 +CFLAGS_code-patching.o += -fno-stack-protector
16808 +CFLAGS_feature-fixups.o += -fno-stack-protector
16810  CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE)
16811  CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
16813 diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
16814 index 1fd31b4b0e13..0aefa6a4a259 100644
16815 --- a/arch/powerpc/lib/feature-fixups.c
16816 +++ b/arch/powerpc/lib/feature-fixups.c
16817 @@ -14,6 +14,7 @@
16818  #include <linux/string.h>
16819  #include <linux/init.h>
16820  #include <linux/sched/mm.h>
16821 +#include <linux/stop_machine.h>
16822  #include <asm/cputable.h>
16823  #include <asm/code-patching.h>
16824  #include <asm/page.h>
16825 @@ -227,11 +228,25 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
16826                                                            : "unknown");
16829 +static int __do_stf_barrier_fixups(void *data)
16831 +       enum stf_barrier_type *types = data;
16833 +       do_stf_entry_barrier_fixups(*types);
16834 +       do_stf_exit_barrier_fixups(*types);
16836 +       return 0;
16839  void do_stf_barrier_fixups(enum stf_barrier_type types)
16841 -       do_stf_entry_barrier_fixups(types);
16842 -       do_stf_exit_barrier_fixups(types);
16843 +       /*
16844 +        * The call to the fallback entry flush, and the fallback/sync-ori exit
16845 +        * flush can not be safely patched in/out while other CPUs are executing
16846 +        * them. So call __do_stf_barrier_fixups() on one CPU while all other CPUs
16847 +        * spin in the stop machine core with interrupts hard disabled.
16848 +        */
16849 +       stop_machine(__do_stf_barrier_fixups, &types, NULL);
16852  void do_uaccess_flush_fixups(enum l1d_flush_type types)
16853 @@ -284,8 +299,9 @@ void do_uaccess_flush_fixups(enum l1d_flush_type types)
16854                                                 : "unknown");
16857 -void do_entry_flush_fixups(enum l1d_flush_type types)
16858 +static int __do_entry_flush_fixups(void *data)
16860 +       enum l1d_flush_type types = *(enum l1d_flush_type *)data;
16861         unsigned int instrs[3], *dest;
16862         long *start, *end;
16863         int i;
16864 @@ -354,6 +370,19 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
16865                                                         : "ori type" :
16866                 (types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
16867                                                 : "unknown");
16869 +       return 0;
16872 +void do_entry_flush_fixups(enum l1d_flush_type types)
16874 +       /*
16875 +        * The call to the fallback flush can not be safely patched in/out while
16876 +        * other CPUs are executing it. So call __do_entry_flush_fixups() on one
16877 +        * CPU while all other CPUs spin in the stop machine core with interrupts
16878 +        * hard disabled.
16879 +        */
16880 +       stop_machine(__do_entry_flush_fixups, &types, NULL);
16883  void do_rfi_flush_fixups(enum l1d_flush_type types)
16884 diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c
16885 index 567e0c6b3978..03819c259f0a 100644
16886 --- a/arch/powerpc/mm/book3s64/hash_pgtable.c
16887 +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c
16888 @@ -428,12 +428,14 @@ static bool hash__change_memory_range(unsigned long start, unsigned long end,
16890  void hash__mark_rodata_ro(void)
16892 -       unsigned long start, end;
16893 +       unsigned long start, end, pp;
16895         start = (unsigned long)_stext;
16896         end = (unsigned long)__init_begin;
16898 -       WARN_ON(!hash__change_memory_range(start, end, PP_RXXX));
16899 +       pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL_ROX), HPTE_USE_KERNEL_KEY);
16901 +       WARN_ON(!hash__change_memory_range(start, end, pp));
16904  void hash__mark_initmem_nx(void)
16905 diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
16906 index 581b20a2feaf..12de1906e97b 100644
16907 --- a/arch/powerpc/mm/book3s64/hash_utils.c
16908 +++ b/arch/powerpc/mm/book3s64/hash_utils.c
16909 @@ -338,7 +338,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
16910  int htab_remove_mapping(unsigned long vstart, unsigned long vend,
16911                       int psize, int ssize)
16913 -       unsigned long vaddr;
16914 +       unsigned long vaddr, time_limit;
16915         unsigned int step, shift;
16916         int rc;
16917         int ret = 0;
16918 @@ -351,8 +351,19 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend,
16920         /* Unmap the full range specificied */
16921         vaddr = ALIGN_DOWN(vstart, step);
16922 +       time_limit = jiffies + HZ;
16924         for (;vaddr < vend; vaddr += step) {
16925                 rc = mmu_hash_ops.hpte_removebolted(vaddr, psize, ssize);
16927 +               /*
16928 +                * For large number of mappings introduce a cond_resched()
16929 +                * to prevent softlockup warnings.
16930 +                */
16931 +               if (time_after(jiffies, time_limit)) {
16932 +                       cond_resched();
16933 +                       time_limit = jiffies + HZ;
16934 +               }
16935                 if (rc == -ENOENT) {
16936                         ret = -ENOENT;
16937                         continue;
16938 @@ -1545,10 +1556,10 @@ DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
16939         if (user_mode(regs) || (region_id == USER_REGION_ID))
16940                 access &= ~_PAGE_PRIVILEGED;
16942 -       if (regs->trap == 0x400)
16943 +       if (TRAP(regs) == 0x400)
16944                 access |= _PAGE_EXEC;
16946 -       err = hash_page_mm(mm, ea, access, regs->trap, flags);
16947 +       err = hash_page_mm(mm, ea, access, TRAP(regs), flags);
16948         if (unlikely(err < 0)) {
16949                 // failed to instert a hash PTE due to an hypervisor error
16950                 if (user_mode(regs)) {
16951 diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
16952 index 98f0b243c1ab..39d488a212a0 100644
16953 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c
16954 +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
16955 @@ -108,7 +108,7 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa,
16957  set_the_pte:
16958         set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
16959 -       smp_wmb();
16960 +       asm volatile("ptesync": : :"memory");
16961         return 0;
16964 @@ -168,7 +168,7 @@ static int __map_kernel_page(unsigned long ea, unsigned long pa,
16966  set_the_pte:
16967         set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
16968 -       smp_wmb();
16969 +       asm volatile("ptesync": : :"memory");
16970         return 0;
16973 diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
16974 index 4e8ce6d85232..7a59a5c9aa5d 100644
16975 --- a/arch/powerpc/mm/mem.c
16976 +++ b/arch/powerpc/mm/mem.c
16977 @@ -54,7 +54,6 @@
16979  #include <mm/mmu_decl.h>
16981 -static DEFINE_MUTEX(linear_mapping_mutex);
16982  unsigned long long memory_limit;
16983  bool init_mem_is_free;
16985 @@ -72,6 +71,7 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
16986  EXPORT_SYMBOL(phys_mem_access_prot);
16988  #ifdef CONFIG_MEMORY_HOTPLUG
16989 +static DEFINE_MUTEX(linear_mapping_mutex);
16991  #ifdef CONFIG_NUMA
16992  int memory_add_physaddr_to_nid(u64 start)
16993 diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
16994 index e4f577da33d8..8b5eeb6fb2fb 100644
16995 --- a/arch/powerpc/perf/isa207-common.c
16996 +++ b/arch/powerpc/perf/isa207-common.c
16997 @@ -447,8 +447,8 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp,
16998          * EBB events are pinned & exclusive, so this should never actually
16999          * hit, but we leave it as a fallback in case.
17000          */
17001 -       mask  |= CNST_EBB_VAL(ebb);
17002 -       value |= CNST_EBB_MASK;
17003 +       mask  |= CNST_EBB_MASK;
17004 +       value |= CNST_EBB_VAL(ebb);
17006         *maskp = mask;
17007         *valp = value;
17008 diff --git a/arch/powerpc/perf/power10-events-list.h b/arch/powerpc/perf/power10-events-list.h
17009 index e45dafe818ed..93be7197d250 100644
17010 --- a/arch/powerpc/perf/power10-events-list.h
17011 +++ b/arch/powerpc/perf/power10-events-list.h
17012 @@ -75,5 +75,5 @@ EVENT(PM_RUN_INST_CMPL_ALT,                   0x00002);
17013   *     thresh end (TE)
17014   */
17016 -EVENT(MEM_LOADS,                               0x34340401e0);
17017 -EVENT(MEM_STORES,                              0x343c0401e0);
17018 +EVENT(MEM_LOADS,                               0x35340401e0);
17019 +EVENT(MEM_STORES,                              0x353c0401e0);
17020 diff --git a/arch/powerpc/platforms/52xx/lite5200_sleep.S b/arch/powerpc/platforms/52xx/lite5200_sleep.S
17021 index 11475c58ea43..afee8b1515a8 100644
17022 --- a/arch/powerpc/platforms/52xx/lite5200_sleep.S
17023 +++ b/arch/powerpc/platforms/52xx/lite5200_sleep.S
17024 @@ -181,7 +181,7 @@ sram_code:
17025    udelay: /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */
17026         mullw   r12, r12, r11
17027         mftb    r13     /* start */
17028 -       addi    r12, r13, r12 /* end */
17029 +       add     r12, r13, r12 /* end */
17030      1:
17031         mftb    r13     /* current */
17032         cmp     cr0, r13, r12
17033 diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
17034 index 019669eb21d2..4ab7c3ef5826 100644
17035 --- a/arch/powerpc/platforms/powernv/memtrace.c
17036 +++ b/arch/powerpc/platforms/powernv/memtrace.c
17037 @@ -88,8 +88,8 @@ static void memtrace_clear_range(unsigned long start_pfn,
17038          * Before we go ahead and use this range as cache inhibited range
17039          * flush the cache.
17040          */
17041 -       flush_dcache_range_chunked(PFN_PHYS(start_pfn),
17042 -                                  PFN_PHYS(start_pfn + nr_pages),
17043 +       flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn),
17044 +                                  (unsigned long)pfn_to_kaddr(start_pfn + nr_pages),
17045                                    FLUSH_CHUNK_SIZE);
17048 diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
17049 index 12cbffd3c2e3..325f3b220f36 100644
17050 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
17051 +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
17052 @@ -47,9 +47,6 @@ static void rtas_stop_self(void)
17054         BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
17056 -       printk("cpu %u (hwid %u) Ready to die...\n",
17057 -              smp_processor_id(), hard_smp_processor_id());
17059         rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL);
17061         panic("Alas, I survived.\n");
17062 diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
17063 index 2136e42833af..8a2b8d64265b 100644
17064 --- a/arch/powerpc/platforms/pseries/hvCall.S
17065 +++ b/arch/powerpc/platforms/pseries/hvCall.S
17066 @@ -102,6 +102,16 @@ END_FTR_SECTION(0, 1);                                             \
17067  #define HCALL_BRANCH(LABEL)
17068  #endif
17070 +_GLOBAL_TOC(plpar_hcall_norets_notrace)
17071 +       HMT_MEDIUM
17073 +       mfcr    r0
17074 +       stw     r0,8(r1)
17075 +       HVSC                            /* invoke the hypervisor */
17076 +       lwz     r0,8(r1)
17077 +       mtcrf   0xff,r0
17078 +       blr                             /* return r3 = status */
17080  _GLOBAL_TOC(plpar_hcall_norets)
17081         HMT_MEDIUM
17083 diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
17084 index 9fc5217f0c8e..836cbbe0ecc5 100644
17085 --- a/arch/powerpc/platforms/pseries/iommu.c
17086 +++ b/arch/powerpc/platforms/pseries/iommu.c
17087 @@ -1229,7 +1229,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
17088         if (pmem_present) {
17089                 if (query.largest_available_block >=
17090                     (1ULL << (MAX_PHYSMEM_BITS - page_shift)))
17091 -                       len = MAX_PHYSMEM_BITS - page_shift;
17092 +                       len = MAX_PHYSMEM_BITS;
17093                 else
17094                         dev_info(&dev->dev, "Skipping ibm,pmemory");
17095         }
17096 diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
17097 index 3805519a6469..d4aa6a46e1fa 100644
17098 --- a/arch/powerpc/platforms/pseries/lpar.c
17099 +++ b/arch/powerpc/platforms/pseries/lpar.c
17100 @@ -977,11 +977,13 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
17101         slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
17102         BUG_ON(slot == -1);
17104 -       flags = newpp & 7;
17105 +       flags = newpp & (HPTE_R_PP | HPTE_R_N);
17106         if (mmu_has_feature(MMU_FTR_KERNEL_RO))
17107                 /* Move pp0 into bit 8 (IBM 55) */
17108                 flags |= (newpp & HPTE_R_PP0) >> 55;
17110 +       flags |= ((newpp & HPTE_R_KEY_HI) >> 48) | (newpp & HPTE_R_KEY_LO);
17112         lpar_rc = plpar_pte_protect(flags, slot, 0);
17114         BUG_ON(lpar_rc != H_SUCCESS);
17115 @@ -1828,8 +1830,7 @@ void hcall_tracepoint_unregfunc(void)
17117  /*
17118   * Since the tracing code might execute hcalls we need to guard against
17119 - * recursion. One example of this are spinlocks calling H_YIELD on
17120 - * shared processor partitions.
17121 + * recursion.
17122   */
17123  static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
17125 diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
17126 index f9ae17e8a0f4..a8f9140a24fa 100644
17127 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c
17128 +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
17129 @@ -50,6 +50,7 @@ EXPORT_SYMBOL_GPL(init_phb_dynamic);
17130  int remove_phb_dynamic(struct pci_controller *phb)
17132         struct pci_bus *b = phb->bus;
17133 +       struct pci_host_bridge *host_bridge = to_pci_host_bridge(b->bridge);
17134         struct resource *res;
17135         int rc, i;
17137 @@ -76,7 +77,8 @@ int remove_phb_dynamic(struct pci_controller *phb)
17138         /* Remove the PCI bus and unregister the bridge device from sysfs */
17139         phb->bus = NULL;
17140         pci_remove_bus(b);
17141 -       device_unregister(b->bridge);
17142 +       host_bridge->bus = NULL;
17143 +       device_unregister(&host_bridge->dev);
17145         /* Now release the IO resource */
17146         if (res->flags & IORESOURCE_IO)
17147 diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
17148 index 9cb4fc839fd5..429053d0402a 100644
17149 --- a/arch/powerpc/platforms/pseries/vio.c
17150 +++ b/arch/powerpc/platforms/pseries/vio.c
17151 @@ -1285,6 +1285,10 @@ static int vio_bus_remove(struct device *dev)
17152  int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
17153                           const char *mod_name)
17155 +       // vio_bus_type is only initialised for pseries
17156 +       if (!machine_is(pseries))
17157 +               return -ENODEV;
17159         pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
17161         /* fill in 'struct driver' fields */
17162 diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
17163 index 595310e056f4..31b657c37735 100644
17164 --- a/arch/powerpc/sysdev/xive/common.c
17165 +++ b/arch/powerpc/sysdev/xive/common.c
17166 @@ -253,17 +253,20 @@ notrace void xmon_xive_do_dump(int cpu)
17167         xmon_printf("\n");
17170 +static struct irq_data *xive_get_irq_data(u32 hw_irq)
17172 +       unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq);
17174 +       return irq ? irq_get_irq_data(irq) : NULL;
17177  int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
17179 -       struct irq_chip *chip = irq_data_get_irq_chip(d);
17180         int rc;
17181         u32 target;
17182         u8 prio;
17183         u32 lirq;
17185 -       if (!is_xive_irq(chip))
17186 -               return -EINVAL;
17188         rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
17189         if (rc) {
17190                 xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
17191 @@ -273,6 +276,9 @@ int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
17192         xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
17193                     hw_irq, target, prio, lirq);
17195 +       if (!d)
17196 +               d = xive_get_irq_data(hw_irq);
17198         if (d) {
17199                 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
17200                 u64 val = xive_esb_read(xd, XIVE_ESB_GET);
17201 @@ -1335,17 +1341,14 @@ static int xive_prepare_cpu(unsigned int cpu)
17203         xc = per_cpu(xive_cpu, cpu);
17204         if (!xc) {
17205 -               struct device_node *np;
17207                 xc = kzalloc_node(sizeof(struct xive_cpu),
17208                                   GFP_KERNEL, cpu_to_node(cpu));
17209                 if (!xc)
17210                         return -ENOMEM;
17211 -               np = of_get_cpu_node(cpu, NULL);
17212 -               if (np)
17213 -                       xc->chip_id = of_get_ibm_chip_id(np);
17214 -               of_node_put(np);
17215                 xc->hw_ipi = XIVE_BAD_IRQ;
17216 +               xc->chip_id = XIVE_INVALID_CHIP_ID;
17217 +               if (xive_ops->prepare_cpu)
17218 +                       xive_ops->prepare_cpu(cpu, xc);
17220                 per_cpu(xive_cpu, cpu) = xc;
17221         }
17222 @@ -1599,6 +1602,8 @@ static void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data
17223         u32 target;
17224         u8 prio;
17225         u32 lirq;
17226 +       struct xive_irq_data *xd;
17227 +       u64 val;
17229         if (!is_xive_irq(chip))
17230                 return;
17231 @@ -1612,17 +1617,14 @@ static void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data
17232         seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
17233                    hw_irq, target, prio, lirq);
17235 -       if (d) {
17236 -               struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
17237 -               u64 val = xive_esb_read(xd, XIVE_ESB_GET);
17239 -               seq_printf(m, "flags=%c%c%c PQ=%c%c",
17240 -                          xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
17241 -                          xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
17242 -                          xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
17243 -                          val & XIVE_ESB_VAL_P ? 'P' : '-',
17244 -                          val & XIVE_ESB_VAL_Q ? 'Q' : '-');
17245 -       }
17246 +       xd = irq_data_get_irq_handler_data(d);
17247 +       val = xive_esb_read(xd, XIVE_ESB_GET);
17248 +       seq_printf(m, "flags=%c%c%c PQ=%c%c",
17249 +                  xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
17250 +                  xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
17251 +                  xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
17252 +                  val & XIVE_ESB_VAL_P ? 'P' : '-',
17253 +                  val & XIVE_ESB_VAL_Q ? 'Q' : '-');
17254         seq_puts(m, "\n");
17257 diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
17258 index 05a800a3104e..57e3f1540435 100644
17259 --- a/arch/powerpc/sysdev/xive/native.c
17260 +++ b/arch/powerpc/sysdev/xive/native.c
17261 @@ -380,6 +380,11 @@ static void xive_native_update_pending(struct xive_cpu *xc)
17262         }
17265 +static void xive_native_prepare_cpu(unsigned int cpu, struct xive_cpu *xc)
17267 +       xc->chip_id = cpu_to_chip_id(cpu);
17270  static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
17272         s64 rc;
17273 @@ -462,6 +467,7 @@ static const struct xive_ops xive_native_ops = {
17274         .match                  = xive_native_match,
17275         .shutdown               = xive_native_shutdown,
17276         .update_pending         = xive_native_update_pending,
17277 +       .prepare_cpu            = xive_native_prepare_cpu,
17278         .setup_cpu              = xive_native_setup_cpu,
17279         .teardown_cpu           = xive_native_teardown_cpu,
17280         .sync_source            = xive_native_sync_source,
17281 diff --git a/arch/powerpc/sysdev/xive/xive-internal.h b/arch/powerpc/sysdev/xive/xive-internal.h
17282 index 9cf57c722faa..6478be19b4d3 100644
17283 --- a/arch/powerpc/sysdev/xive/xive-internal.h
17284 +++ b/arch/powerpc/sysdev/xive/xive-internal.h
17285 @@ -46,6 +46,7 @@ struct xive_ops {
17286                                   u32 *sw_irq);
17287         int     (*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
17288         void    (*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
17289 +       void    (*prepare_cpu)(unsigned int cpu, struct xive_cpu *xc);
17290         void    (*setup_cpu)(unsigned int cpu, struct xive_cpu *xc);
17291         void    (*teardown_cpu)(unsigned int cpu, struct xive_cpu *xc);
17292         bool    (*match)(struct device_node *np);
17293 diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
17294 index 4515a10c5d22..d9522fc35ca5 100644
17295 --- a/arch/riscv/Kconfig
17296 +++ b/arch/riscv/Kconfig
17297 @@ -227,7 +227,7 @@ config ARCH_RV64I
17298         bool "RV64I"
17299         select 64BIT
17300         select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
17301 -       select HAVE_DYNAMIC_FTRACE if MMU
17302 +       select HAVE_DYNAMIC_FTRACE if MMU && $(cc-option,-fpatchable-function-entry=8)
17303         select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
17304         select HAVE_FTRACE_MCOUNT_RECORD
17305         select HAVE_FUNCTION_GRAPH_TRACER
17306 diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
17307 index 845002cc2e57..04dad3380041 100644
17308 --- a/arch/riscv/include/asm/ftrace.h
17309 +++ b/arch/riscv/include/asm/ftrace.h
17310 @@ -13,9 +13,19 @@
17311  #endif
17312  #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
17315 + * Clang prior to 13 had "mcount" instead of "_mcount":
17316 + * https://reviews.llvm.org/D98881
17317 + */
17318 +#if defined(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 130000
17319 +#define MCOUNT_NAME _mcount
17320 +#else
17321 +#define MCOUNT_NAME mcount
17322 +#endif
17324  #define ARCH_SUPPORTS_FTRACE_OPS 1
17325  #ifndef __ASSEMBLY__
17326 -void _mcount(void);
17327 +void MCOUNT_NAME(void);
17328  static inline unsigned long ftrace_call_adjust(unsigned long addr)
17330         return addr;
17331 @@ -36,7 +46,7 @@ struct dyn_arch_ftrace {
17332   * both auipc and jalr at the same time.
17333   */
17335 -#define MCOUNT_ADDR            ((unsigned long)_mcount)
17336 +#define MCOUNT_ADDR            ((unsigned long)MCOUNT_NAME)
17337  #define JALR_SIGN_MASK         (0x00000800)
17338  #define JALR_OFFSET_MASK       (0x00000fff)
17339  #define AUIPC_OFFSET_MASK      (0xfffff000)
17340 diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S
17341 index 8a5593ff9ff3..6d462681c9c0 100644
17342 --- a/arch/riscv/kernel/mcount.S
17343 +++ b/arch/riscv/kernel/mcount.S
17344 @@ -47,8 +47,8 @@
17346  ENTRY(ftrace_stub)
17347  #ifdef CONFIG_DYNAMIC_FTRACE
17348 -       .global _mcount
17349 -       .set    _mcount, ftrace_stub
17350 +       .global MCOUNT_NAME
17351 +       .set    MCOUNT_NAME, ftrace_stub
17352  #endif
17353         ret
17354  ENDPROC(ftrace_stub)
17355 @@ -78,7 +78,7 @@ ENDPROC(return_to_handler)
17356  #endif
17358  #ifndef CONFIG_DYNAMIC_FTRACE
17359 -ENTRY(_mcount)
17360 +ENTRY(MCOUNT_NAME)
17361         la      t4, ftrace_stub
17362  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
17363         la      t0, ftrace_graph_return
17364 @@ -124,6 +124,6 @@ do_trace:
17365         jalr    t5
17366         RESTORE_ABI_STATE
17367         ret
17368 -ENDPROC(_mcount)
17369 +ENDPROC(MCOUNT_NAME)
17370  #endif
17371 -EXPORT_SYMBOL(_mcount)
17372 +EXPORT_SYMBOL(MCOUNT_NAME)
17373 diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
17374 index 7e2c78e2ca6b..d71f7c49a721 100644
17375 --- a/arch/riscv/kernel/probes/kprobes.c
17376 +++ b/arch/riscv/kernel/probes/kprobes.c
17377 @@ -260,8 +260,10 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
17379                 if (kcb->kprobe_status == KPROBE_REENTER)
17380                         restore_previous_kprobe(kcb);
17381 -               else
17382 +               else {
17383 +                       kprobes_restore_local_irqflag(kcb, regs);
17384                         reset_current_kprobe();
17385 +               }
17387                 break;
17388         case KPROBE_HIT_ACTIVE:
17389 diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
17390 index ea028d9e0d24..d44567490d91 100644
17391 --- a/arch/riscv/kernel/smp.c
17392 +++ b/arch/riscv/kernel/smp.c
17393 @@ -54,7 +54,7 @@ int riscv_hartid_to_cpuid(int hartid)
17394                         return i;
17396         pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
17397 -       return i;
17398 +       return -ENOENT;
17401  void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
17402 diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
17403 index 71a315e73cbe..ca2b40dfd24b 100644
17404 --- a/arch/riscv/kernel/vdso/Makefile
17405 +++ b/arch/riscv/kernel/vdso/Makefile
17406 @@ -41,11 +41,10 @@ KASAN_SANITIZE := n
17407  $(obj)/vdso.o: $(obj)/vdso.so
17409  # link rule for the .so file, .lds has to be first
17410 -SYSCFLAGS_vdso.so.dbg = $(c_flags)
17411  $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
17412         $(call if_changed,vdsold)
17413 -SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
17414 -       -Wl,--build-id=sha1 -Wl,--hash-style=both
17415 +LDFLAGS_vdso.so.dbg = -shared -s -soname=linux-vdso.so.1 \
17416 +       --build-id=sha1 --hash-style=both --eh-frame-hdr
17418  # We also create a special relocatable object that should mirror the symbol
17419  # table and layout of the linked DSO. With ld --just-symbols we can then
17420 @@ -60,13 +59,10 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
17422  # actual build commands
17423  # The DSO images are built using a special linker script
17424 -# Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions.
17425  # Make sure only to export the intended __vdso_xxx symbol offsets.
17426  quiet_cmd_vdsold = VDSOLD  $@
17427 -      cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \
17428 -                           -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \
17429 -                   $(CROSS_COMPILE)objcopy \
17430 -                           $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
17431 +      cmd_vdsold = $(LD) $(ld_flags) -T $(filter-out FORCE,$^) -o $@.tmp && \
17432 +                   $(OBJCOPY) $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
17433                     rm $@.tmp
17435  # Extracts symbol offsets from the VDSO, converting them into an assembly file
17436 diff --git a/arch/s390/crypto/arch_random.c b/arch/s390/crypto/arch_random.c
17437 index 7b947728d57e..56007c763902 100644
17438 --- a/arch/s390/crypto/arch_random.c
17439 +++ b/arch/s390/crypto/arch_random.c
17440 @@ -54,6 +54,10 @@ static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer);
17442  bool s390_arch_random_generate(u8 *buf, unsigned int nbytes)
17444 +       /* max hunk is ARCH_RNG_BUF_SIZE */
17445 +       if (nbytes > ARCH_RNG_BUF_SIZE)
17446 +               return false;
17448         /* lock rng buffer */
17449         if (!spin_trylock(&arch_rng_lock))
17450                 return false;
17451 diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
17452 index d9215c7106f0..8fc52679543d 100644
17453 --- a/arch/s390/include/asm/qdio.h
17454 +++ b/arch/s390/include/asm/qdio.h
17455 @@ -246,21 +246,8 @@ struct slsb {
17456         u8 val[QDIO_MAX_BUFFERS_PER_Q];
17457  } __attribute__ ((packed, aligned(256)));
17459 -/**
17460 - * struct qdio_outbuf_state - SBAL related asynchronous operation information
17461 - *   (for communication with upper layer programs)
17462 - *   (only required for use with completion queues)
17463 - * @user: pointer to upper layer program's state information related to SBAL
17464 - *        (stored in user1 data of QAOB)
17465 - */
17466 -struct qdio_outbuf_state {
17467 -       void *user;
17470 -#define CHSC_AC1_INITIATE_INPUTQ       0x80
17473  /* qdio adapter-characteristics-1 flag */
17474 +#define CHSC_AC1_INITIATE_INPUTQ       0x80
17475  #define AC1_SIGA_INPUT_NEEDED          0x40    /* process input queues */
17476  #define AC1_SIGA_OUTPUT_NEEDED         0x20    /* process output queues */
17477  #define AC1_SIGA_SYNC_NEEDED           0x10    /* ask hypervisor to sync */
17478 @@ -338,7 +325,6 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
17479   * @int_parm: interruption parameter
17480   * @input_sbal_addr_array:  per-queue array, each element points to 128 SBALs
17481   * @output_sbal_addr_array: per-queue array, each element points to 128 SBALs
17482 - * @output_sbal_state_array: no_output_qs * 128 state info (for CQ or NULL)
17483   */
17484  struct qdio_initialize {
17485         unsigned char q_format;
17486 @@ -357,7 +343,6 @@ struct qdio_initialize {
17487         unsigned long int_parm;
17488         struct qdio_buffer ***input_sbal_addr_array;
17489         struct qdio_buffer ***output_sbal_addr_array;
17490 -       struct qdio_outbuf_state *output_sbal_state_array;
17491  };
17493  #define QDIO_STATE_INACTIVE            0x00000002 /* after qdio_cleanup */
17494 @@ -378,9 +363,10 @@ extern int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
17495  extern int qdio_establish(struct ccw_device *cdev,
17496                           struct qdio_initialize *init_data);
17497  extern int qdio_activate(struct ccw_device *);
17498 +extern struct qaob *qdio_allocate_aob(void);
17499  extern void qdio_release_aob(struct qaob *);
17500 -extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int,
17501 -                  unsigned int);
17502 +extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, int q_nr,
17503 +                  unsigned int bufnr, unsigned int count, struct qaob *aob);
17504  extern int qdio_start_irq(struct ccw_device *cdev);
17505  extern int qdio_stop_irq(struct ccw_device *cdev);
17506  extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
17507 diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
17508 index a7eab7be4db0..5412efe328f8 100644
17509 --- a/arch/s390/kernel/dis.c
17510 +++ b/arch/s390/kernel/dis.c
17511 @@ -563,7 +563,7 @@ void show_code(struct pt_regs *regs)
17513  void print_fn_code(unsigned char *code, unsigned long len)
17515 -       char buffer[64], *ptr;
17516 +       char buffer[128], *ptr;
17517         int opsize, i;
17519         while (len) {
17520 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
17521 index 72134f9f6ff5..5aab59ad5688 100644
17522 --- a/arch/s390/kernel/setup.c
17523 +++ b/arch/s390/kernel/setup.c
17524 @@ -937,9 +937,9 @@ static int __init setup_hwcaps(void)
17525         if (MACHINE_HAS_VX) {
17526                 elf_hwcap |= HWCAP_S390_VXRS;
17527                 if (test_facility(134))
17528 -                       elf_hwcap |= HWCAP_S390_VXRS_EXT;
17529 -               if (test_facility(135))
17530                         elf_hwcap |= HWCAP_S390_VXRS_BCD;
17531 +               if (test_facility(135))
17532 +                       elf_hwcap |= HWCAP_S390_VXRS_EXT;
17533                 if (test_facility(148))
17534                         elf_hwcap |= HWCAP_S390_VXRS_EXT2;
17535                 if (test_facility(152))
17536 diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
17537 index 6d6b57059493..b9f85b2dc053 100644
17538 --- a/arch/s390/kvm/gaccess.c
17539 +++ b/arch/s390/kvm/gaccess.c
17540 @@ -976,7 +976,9 @@ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
17541   * kvm_s390_shadow_tables - walk the guest page table and create shadow tables
17542   * @sg: pointer to the shadow guest address space structure
17543   * @saddr: faulting address in the shadow gmap
17544 - * @pgt: pointer to the page table address result
17545 + * @pgt: pointer to the beginning of the page table for the given address if
17546 + *      successful (return value 0), or to the first invalid DAT entry in
17547 + *      case of exceptions (return value > 0)
17548   * @fake: pgt references contiguous guest memory block, not a pgtable
17549   */
17550  static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17551 @@ -1034,6 +1036,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17552                         rfte.val = ptr;
17553                         goto shadow_r2t;
17554                 }
17555 +               *pgt = ptr + vaddr.rfx * 8;
17556                 rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val);
17557                 if (rc)
17558                         return rc;
17559 @@ -1060,6 +1063,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17560                         rste.val = ptr;
17561                         goto shadow_r3t;
17562                 }
17563 +               *pgt = ptr + vaddr.rsx * 8;
17564                 rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val);
17565                 if (rc)
17566                         return rc;
17567 @@ -1087,6 +1091,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17568                         rtte.val = ptr;
17569                         goto shadow_sgt;
17570                 }
17571 +               *pgt = ptr + vaddr.rtx * 8;
17572                 rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val);
17573                 if (rc)
17574                         return rc;
17575 @@ -1123,6 +1128,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17576                         ste.val = ptr;
17577                         goto shadow_pgt;
17578                 }
17579 +               *pgt = ptr + vaddr.sx * 8;
17580                 rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val);
17581                 if (rc)
17582                         return rc;
17583 @@ -1157,6 +1163,8 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17584   * @vcpu: virtual cpu
17585   * @sg: pointer to the shadow guest address space structure
17586   * @saddr: faulting address in the shadow gmap
17587 + * @datptr: will contain the address of the faulting DAT table entry, or of
17588 + *         the valid leaf, plus some flags
17589   *
17590   * Returns: - 0 if the shadow fault was successfully resolved
17591   *         - > 0 (pgm exception code) on exceptions while faulting
17592 @@ -1165,11 +1173,11 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17593   *         - -ENOMEM if out of memory
17594   */
17595  int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
17596 -                         unsigned long saddr)
17597 +                         unsigned long saddr, unsigned long *datptr)
17599         union vaddress vaddr;
17600         union page_table_entry pte;
17601 -       unsigned long pgt;
17602 +       unsigned long pgt = 0;
17603         int dat_protection, fake;
17604         int rc;
17606 @@ -1191,8 +1199,20 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
17607                 pte.val = pgt + vaddr.px * PAGE_SIZE;
17608                 goto shadow_page;
17609         }
17610 -       if (!rc)
17611 -               rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
17613 +       switch (rc) {
17614 +       case PGM_SEGMENT_TRANSLATION:
17615 +       case PGM_REGION_THIRD_TRANS:
17616 +       case PGM_REGION_SECOND_TRANS:
17617 +       case PGM_REGION_FIRST_TRANS:
17618 +               pgt |= PEI_NOT_PTE;
17619 +               break;
17620 +       case 0:
17621 +               pgt += vaddr.px * 8;
17622 +               rc = gmap_read_table(sg->parent, pgt, &pte.val);
17623 +       }
17624 +       if (datptr)
17625 +               *datptr = pgt | dat_protection * PEI_DAT_PROT;
17626         if (!rc && pte.i)
17627                 rc = PGM_PAGE_TRANSLATION;
17628         if (!rc && pte.z)
17629 diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
17630 index f4c51756c462..7c72a5e3449f 100644
17631 --- a/arch/s390/kvm/gaccess.h
17632 +++ b/arch/s390/kvm/gaccess.h
17633 @@ -18,17 +18,14 @@
17635  /**
17636   * kvm_s390_real_to_abs - convert guest real address to guest absolute address
17637 - * @vcpu - guest virtual cpu
17638 + * @prefix - guest prefix
17639   * @gra - guest real address
17640   *
17641   * Returns the guest absolute address that corresponds to the passed guest real
17642 - * address @gra of a virtual guest cpu by applying its prefix.
17643 + * address @gra of by applying the given prefix.
17644   */
17645 -static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
17646 -                                                unsigned long gra)
17647 +static inline unsigned long _kvm_s390_real_to_abs(u32 prefix, unsigned long gra)
17649 -       unsigned long prefix  = kvm_s390_get_prefix(vcpu);
17651         if (gra < 2 * PAGE_SIZE)
17652                 gra += prefix;
17653         else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE)
17654 @@ -36,6 +33,43 @@ static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
17655         return gra;
17658 +/**
17659 + * kvm_s390_real_to_abs - convert guest real address to guest absolute address
17660 + * @vcpu - guest virtual cpu
17661 + * @gra - guest real address
17662 + *
17663 + * Returns the guest absolute address that corresponds to the passed guest real
17664 + * address @gra of a virtual guest cpu by applying its prefix.
17665 + */
17666 +static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
17667 +                                                unsigned long gra)
17669 +       return _kvm_s390_real_to_abs(kvm_s390_get_prefix(vcpu), gra);
17672 +/**
17673 + * _kvm_s390_logical_to_effective - convert guest logical to effective address
17674 + * @psw: psw of the guest
17675 + * @ga: guest logical address
17676 + *
17677 + * Convert a guest logical address to an effective address by applying the
17678 + * rules of the addressing mode defined by bits 31 and 32 of the given PSW
17679 + * (extendended/basic addressing mode).
17680 + *
17681 + * Depending on the addressing mode, the upper 40 bits (24 bit addressing
17682 + * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing
17683 + * mode) of @ga will be zeroed and the remaining bits will be returned.
17684 + */
17685 +static inline unsigned long _kvm_s390_logical_to_effective(psw_t *psw,
17686 +                                                          unsigned long ga)
17688 +       if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
17689 +               return ga;
17690 +       if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
17691 +               return ga & ((1UL << 31) - 1);
17692 +       return ga & ((1UL << 24) - 1);
17695  /**
17696   * kvm_s390_logical_to_effective - convert guest logical to effective address
17697   * @vcpu: guest virtual cpu
17698 @@ -52,13 +86,7 @@ static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
17699  static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
17700                                                           unsigned long ga)
17702 -       psw_t *psw = &vcpu->arch.sie_block->gpsw;
17704 -       if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
17705 -               return ga;
17706 -       if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
17707 -               return ga & ((1UL << 31) - 1);
17708 -       return ga & ((1UL << 24) - 1);
17709 +       return _kvm_s390_logical_to_effective(&vcpu->arch.sie_block->gpsw, ga);
17712  /*
17713 @@ -359,7 +387,11 @@ void ipte_unlock(struct kvm_vcpu *vcpu);
17714  int ipte_lock_held(struct kvm_vcpu *vcpu);
17715  int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
17717 +/* MVPG PEI indication bits */
17718 +#define PEI_DAT_PROT 2
17719 +#define PEI_NOT_PTE 4
17721  int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow,
17722 -                         unsigned long saddr);
17723 +                         unsigned long saddr, unsigned long *datptr);
17725  #endif /* __KVM_S390_GACCESS_H */
17726 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
17727 index 2f09e9d7dc95..24ad447e648c 100644
17728 --- a/arch/s390/kvm/kvm-s390.c
17729 +++ b/arch/s390/kvm/kvm-s390.c
17730 @@ -4307,16 +4307,16 @@ static void store_regs_fmt2(struct kvm_vcpu *vcpu)
17731         kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
17732         kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
17733         if (MACHINE_HAS_GS) {
17734 +               preempt_disable();
17735                 __ctl_set_bit(2, 4);
17736                 if (vcpu->arch.gs_enabled)
17737                         save_gs_cb(current->thread.gs_cb);
17738 -               preempt_disable();
17739                 current->thread.gs_cb = vcpu->arch.host_gscb;
17740                 restore_gs_cb(vcpu->arch.host_gscb);
17741 -               preempt_enable();
17742                 if (!vcpu->arch.host_gscb)
17743                         __ctl_clear_bit(2, 4);
17744                 vcpu->arch.host_gscb = NULL;
17745 +               preempt_enable();
17746         }
17747         /* SIE will save etoken directly into SDNX and therefore kvm_run */
17749 diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
17750 index bd803e091918..4002a24bc43a 100644
17751 --- a/arch/s390/kvm/vsie.c
17752 +++ b/arch/s390/kvm/vsie.c
17753 @@ -417,11 +417,6 @@ static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
17754                 memcpy((void *)((u64)scb_o + 0xc0),
17755                        (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0);
17756                 break;
17757 -       case ICPT_PARTEXEC:
17758 -               /* MVPG only */
17759 -               memcpy((void *)((u64)scb_o + 0xc0),
17760 -                      (void *)((u64)scb_s + 0xc0), 0xd0 - 0xc0);
17761 -               break;
17762         }
17764         if (scb_s->ihcpu != 0xffffU)
17765 @@ -620,10 +615,10 @@ static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
17766         /* with mso/msl, the prefix lies at offset *mso* */
17767         prefix += scb_s->mso;
17769 -       rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix);
17770 +       rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix, NULL);
17771         if (!rc && (scb_s->ecb & ECB_TE))
17772                 rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
17773 -                                          prefix + PAGE_SIZE);
17774 +                                          prefix + PAGE_SIZE, NULL);
17775         /*
17776          * We don't have to mprotect, we will be called for all unshadows.
17777          * SIE will detect if protection applies and trigger a validity.
17778 @@ -914,7 +909,7 @@ static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
17779                                     current->thread.gmap_addr, 1);
17781         rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
17782 -                                  current->thread.gmap_addr);
17783 +                                  current->thread.gmap_addr, NULL);
17784         if (rc > 0) {
17785                 rc = inject_fault(vcpu, rc,
17786                                   current->thread.gmap_addr,
17787 @@ -936,7 +931,7 @@ static void handle_last_fault(struct kvm_vcpu *vcpu,
17789         if (vsie_page->fault_addr)
17790                 kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
17791 -                                     vsie_page->fault_addr);
17792 +                                     vsie_page->fault_addr, NULL);
17793         vsie_page->fault_addr = 0;
17796 @@ -983,6 +978,98 @@ static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
17797         return 0;
17801 + * Get a register for a nested guest.
17802 + * @vcpu the vcpu of the guest
17803 + * @vsie_page the vsie_page for the nested guest
17804 + * @reg the register number, the upper 4 bits are ignored.
17805 + * returns: the value of the register.
17806 + */
17807 +static u64 vsie_get_register(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, u8 reg)
17809 +       /* no need to validate the parameter and/or perform error handling */
17810 +       reg &= 0xf;
17811 +       switch (reg) {
17812 +       case 15:
17813 +               return vsie_page->scb_s.gg15;
17814 +       case 14:
17815 +               return vsie_page->scb_s.gg14;
17816 +       default:
17817 +               return vcpu->run->s.regs.gprs[reg];
17818 +       }
17821 +static int vsie_handle_mvpg(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
17823 +       struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
17824 +       unsigned long pei_dest, pei_src, src, dest, mask, prefix;
17825 +       u64 *pei_block = &vsie_page->scb_o->mcic;
17826 +       int edat, rc_dest, rc_src;
17827 +       union ctlreg0 cr0;
17829 +       cr0.val = vcpu->arch.sie_block->gcr[0];
17830 +       edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
17831 +       mask = _kvm_s390_logical_to_effective(&scb_s->gpsw, PAGE_MASK);
17832 +       prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
17834 +       dest = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 20) & mask;
17835 +       dest = _kvm_s390_real_to_abs(prefix, dest) + scb_s->mso;
17836 +       src = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 16) & mask;
17837 +       src = _kvm_s390_real_to_abs(prefix, src) + scb_s->mso;
17839 +       rc_dest = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, dest, &pei_dest);
17840 +       rc_src = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, src, &pei_src);
17841 +       /*
17842 +        * Either everything went well, or something non-critical went wrong
17843 +        * e.g. because of a race. In either case, simply retry.
17844 +        */
17845 +       if (rc_dest == -EAGAIN || rc_src == -EAGAIN || (!rc_dest && !rc_src)) {
17846 +               retry_vsie_icpt(vsie_page);
17847 +               return -EAGAIN;
17848 +       }
17849 +       /* Something more serious went wrong, propagate the error */
17850 +       if (rc_dest < 0)
17851 +               return rc_dest;
17852 +       if (rc_src < 0)
17853 +               return rc_src;
17855 +       /* The only possible suppressing exception: just deliver it */
17856 +       if (rc_dest == PGM_TRANSLATION_SPEC || rc_src == PGM_TRANSLATION_SPEC) {
17857 +               clear_vsie_icpt(vsie_page);
17858 +               rc_dest = kvm_s390_inject_program_int(vcpu, PGM_TRANSLATION_SPEC);
17859 +               WARN_ON_ONCE(rc_dest);
17860 +               return 1;
17861 +       }
17863 +       /*
17864 +        * Forward the PEI intercept to the guest if it was a page fault, or
17865 +        * also for segment and region table faults if EDAT applies.
17866 +        */
17867 +       if (edat) {
17868 +               rc_dest = rc_dest == PGM_ASCE_TYPE ? rc_dest : 0;
17869 +               rc_src = rc_src == PGM_ASCE_TYPE ? rc_src : 0;
17870 +       } else {
17871 +               rc_dest = rc_dest != PGM_PAGE_TRANSLATION ? rc_dest : 0;
17872 +               rc_src = rc_src != PGM_PAGE_TRANSLATION ? rc_src : 0;
17873 +       }
17874 +       if (!rc_dest && !rc_src) {
17875 +               pei_block[0] = pei_dest;
17876 +               pei_block[1] = pei_src;
17877 +               return 1;
17878 +       }
17880 +       retry_vsie_icpt(vsie_page);
17882 +       /*
17883 +        * The host has edat, and the guest does not, or it was an ASCE type
17884 +        * exception. The host needs to inject the appropriate DAT interrupts
17885 +        * into the guest.
17886 +        */
17887 +       if (rc_dest)
17888 +               return inject_fault(vcpu, rc_dest, dest, 1);
17889 +       return inject_fault(vcpu, rc_src, src, 0);
17892  /*
17893   * Run the vsie on a shadow scb and a shadow gmap, without any further
17894   * sanity checks, handling SIE faults.
17895 @@ -1071,6 +1158,10 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
17896                 if ((scb_s->ipa & 0xf000) != 0xf000)
17897                         scb_s->ipa += 0x1000;
17898                 break;
17899 +       case ICPT_PARTEXEC:
17900 +               if (scb_s->ipa == 0xb254)
17901 +                       rc = vsie_handle_mvpg(vcpu, vsie_page);
17902 +               break;
17903         }
17904         return rc;
17906 diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
17907 index f5beecdac693..e76b22157099 100644
17908 --- a/arch/sh/kernel/traps.c
17909 +++ b/arch/sh/kernel/traps.c
17910 @@ -180,7 +180,6 @@ static inline void arch_ftrace_nmi_exit(void) { }
17912  BUILD_TRAP_HANDLER(nmi)
17914 -       unsigned int cpu = smp_processor_id();
17915         TRAP_HANDLER_DECL;
17917         arch_ftrace_nmi_enter();
17918 diff --git a/arch/um/Kconfig.debug b/arch/um/Kconfig.debug
17919 index 315d368e63ad..1dfb2959c73b 100644
17920 --- a/arch/um/Kconfig.debug
17921 +++ b/arch/um/Kconfig.debug
17922 @@ -17,6 +17,7 @@ config GCOV
17923         bool "Enable gcov support"
17924         depends on DEBUG_INFO
17925         depends on !KCOV
17926 +       depends on !MODULES
17927         help
17928           This option allows developers to retrieve coverage data from a UML
17929           session.
17930 diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
17931 index 5aa882011e04..e698e0c7dbdc 100644
17932 --- a/arch/um/kernel/Makefile
17933 +++ b/arch/um/kernel/Makefile
17934 @@ -21,7 +21,6 @@ obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \
17936  obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
17937  obj-$(CONFIG_GPROF)    += gprof_syms.o
17938 -obj-$(CONFIG_GCOV)     += gmon_syms.o
17939  obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
17940  obj-$(CONFIG_STACKTRACE) += stacktrace.o
17942 diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
17943 index dacbfabf66d8..2f2a8ce92f1e 100644
17944 --- a/arch/um/kernel/dyn.lds.S
17945 +++ b/arch/um/kernel/dyn.lds.S
17946 @@ -6,6 +6,12 @@ OUTPUT_ARCH(ELF_ARCH)
17947  ENTRY(_start)
17948  jiffies = jiffies_64;
17950 +VERSION {
17951 +  {
17952 +    local: *;
17953 +  };
17956  SECTIONS
17958    PROVIDE (__executable_start = START);
17959 diff --git a/arch/um/kernel/gmon_syms.c b/arch/um/kernel/gmon_syms.c
17960 deleted file mode 100644
17961 index 9361a8eb9bf1..000000000000
17962 --- a/arch/um/kernel/gmon_syms.c
17963 +++ /dev/null
17964 @@ -1,16 +0,0 @@
17965 -// SPDX-License-Identifier: GPL-2.0
17967 - * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
17968 - */
17970 -#include <linux/module.h>
17972 -extern void __bb_init_func(void *)  __attribute__((weak));
17973 -EXPORT_SYMBOL(__bb_init_func);
17975 -extern void __gcov_init(void *)  __attribute__((weak));
17976 -EXPORT_SYMBOL(__gcov_init);
17977 -extern void __gcov_merge_add(void *, unsigned int)  __attribute__((weak));
17978 -EXPORT_SYMBOL(__gcov_merge_add);
17979 -extern void __gcov_exit(void)  __attribute__((weak));
17980 -EXPORT_SYMBOL(__gcov_exit);
17981 diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
17982 index 45d957d7004c..7a8e2b123e29 100644
17983 --- a/arch/um/kernel/uml.lds.S
17984 +++ b/arch/um/kernel/uml.lds.S
17985 @@ -7,6 +7,12 @@ OUTPUT_ARCH(ELF_ARCH)
17986  ENTRY(_start)
17987  jiffies = jiffies_64;
17989 +VERSION {
17990 +  {
17991 +    local: *;
17992 +  };
17995  SECTIONS
17997    /* This must contain the right address - not quite the default ELF one.*/
17998 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
17999 index 2792879d398e..ab2e8502c27c 100644
18000 --- a/arch/x86/Kconfig
18001 +++ b/arch/x86/Kconfig
18002 @@ -163,6 +163,7 @@ config X86
18003         select HAVE_ARCH_TRACEHOOK
18004         select HAVE_ARCH_TRANSPARENT_HUGEPAGE
18005         select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64
18006 +       select HAVE_ARCH_PARENT_PMD_YOUNG       if X86_64
18007         select HAVE_ARCH_USERFAULTFD_WP         if X86_64 && USERFAULTFD
18008         select HAVE_ARCH_VMAP_STACK             if X86_64
18009         select HAVE_ARCH_WITHIN_STACK_FRAMES
18010 @@ -571,6 +572,7 @@ config X86_UV
18011         depends on X86_EXTENDED_PLATFORM
18012         depends on NUMA
18013         depends on EFI
18014 +       depends on KEXEC_CORE
18015         depends on X86_X2APIC
18016         depends on PCI
18017         help
18018 @@ -1406,7 +1408,7 @@ config HIGHMEM4G
18020  config HIGHMEM64G
18021         bool "64GB"
18022 -       depends on !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
18023 +       depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
18024         select X86_PAE
18025         help
18026           Select this if you have a 32-bit processor and more than 4
18027 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
18028 index 814fe0d349b0..872b9cf598e3 100644
18029 --- a/arch/x86/Kconfig.cpu
18030 +++ b/arch/x86/Kconfig.cpu
18031 @@ -157,7 +157,7 @@ config MPENTIUM4
18034  config MK6
18035 -       bool "K6/K6-II/K6-III"
18036 +       bool "AMD K6/K6-II/K6-III"
18037         depends on X86_32
18038         help
18039           Select this for an AMD K6-family processor.  Enables use of
18040 @@ -165,7 +165,7 @@ config MK6
18041           flags to GCC.
18043  config MK7
18044 -       bool "Athlon/Duron/K7"
18045 +       bool "AMD Athlon/Duron/K7"
18046         depends on X86_32
18047         help
18048           Select this for an AMD Athlon K7-family processor.  Enables use of
18049 @@ -173,12 +173,98 @@ config MK7
18050           flags to GCC.
18052  config MK8
18053 -       bool "Opteron/Athlon64/Hammer/K8"
18054 +       bool "AMD Opteron/Athlon64/Hammer/K8"
18055         help
18056           Select this for an AMD Opteron or Athlon64 Hammer-family processor.
18057           Enables use of some extended instructions, and passes appropriate
18058           optimization flags to GCC.
18060 +config MK8SSE3
18061 +       bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3"
18062 +       help
18063 +         Select this for improved AMD Opteron or Athlon64 Hammer-family processors.
18064 +         Enables use of some extended instructions, and passes appropriate
18065 +         optimization flags to GCC.
18067 +config MK10
18068 +       bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
18069 +       help
18070 +         Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
18071 +         Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
18072 +         Enables use of some extended instructions, and passes appropriate
18073 +         optimization flags to GCC.
18075 +config MBARCELONA
18076 +       bool "AMD Barcelona"
18077 +       help
18078 +         Select this for AMD Family 10h Barcelona processors.
18080 +         Enables -march=barcelona
18082 +config MBOBCAT
18083 +       bool "AMD Bobcat"
18084 +       help
18085 +         Select this for AMD Family 14h Bobcat processors.
18087 +         Enables -march=btver1
18089 +config MJAGUAR
18090 +       bool "AMD Jaguar"
18091 +       help
18092 +         Select this for AMD Family 16h Jaguar processors.
18094 +         Enables -march=btver2
18096 +config MBULLDOZER
18097 +       bool "AMD Bulldozer"
18098 +       help
18099 +         Select this for AMD Family 15h Bulldozer processors.
18101 +         Enables -march=bdver1
18103 +config MPILEDRIVER
18104 +       bool "AMD Piledriver"
18105 +       help
18106 +         Select this for AMD Family 15h Piledriver processors.
18108 +         Enables -march=bdver2
18110 +config MSTEAMROLLER
18111 +       bool "AMD Steamroller"
18112 +       help
18113 +         Select this for AMD Family 15h Steamroller processors.
18115 +         Enables -march=bdver3
18117 +config MEXCAVATOR
18118 +       bool "AMD Excavator"
18119 +       help
18120 +         Select this for AMD Family 15h Excavator processors.
18122 +         Enables -march=bdver4
18124 +config MZEN
18125 +       bool "AMD Zen"
18126 +       help
18127 +         Select this for AMD Family 17h Zen processors.
18129 +         Enables -march=znver1
18131 +config MZEN2
18132 +       bool "AMD Zen 2"
18133 +       help
18134 +         Select this for AMD Family 17h Zen 2 processors.
18136 +         Enables -march=znver2
18138 +config MZEN3
18139 +       bool "AMD Zen 3"
18140 +       depends on GCC_VERSION > 100300
18141 +       help
18142 +         Select this for AMD Family 19h Zen 3 processors.
18144 +         Enables -march=znver3
18146  config MCRUSOE
18147         bool "Crusoe"
18148         depends on X86_32
18149 @@ -270,7 +356,7 @@ config MPSC
18150           in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
18152  config MCORE2
18153 -       bool "Core 2/newer Xeon"
18154 +       bool "Intel Core 2"
18155         help
18157           Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
18158 @@ -278,6 +364,8 @@ config MCORE2
18159           family in /proc/cpuinfo. Newer ones have 6 and older ones 15
18160           (not a typo)
18162 +         Enables -march=core2
18164  config MATOM
18165         bool "Intel Atom"
18166         help
18167 @@ -287,6 +375,182 @@ config MATOM
18168           accordingly optimized code. Use a recent GCC with specific Atom
18169           support in order to fully benefit from selecting this option.
18171 +config MNEHALEM
18172 +       bool "Intel Nehalem"
18173 +       select X86_P6_NOP
18174 +       help
18176 +         Select this for 1st Gen Core processors in the Nehalem family.
18178 +         Enables -march=nehalem
18180 +config MWESTMERE
18181 +       bool "Intel Westmere"
18182 +       select X86_P6_NOP
18183 +       help
18185 +         Select this for the Intel Westmere formerly Nehalem-C family.
18187 +         Enables -march=westmere
18189 +config MSILVERMONT
18190 +       bool "Intel Silvermont"
18191 +       select X86_P6_NOP
18192 +       help
18194 +         Select this for the Intel Silvermont platform.
18196 +         Enables -march=silvermont
18198 +config MGOLDMONT
18199 +       bool "Intel Goldmont"
18200 +       select X86_P6_NOP
18201 +       help
18203 +         Select this for the Intel Goldmont platform including Apollo Lake and Denverton.
18205 +         Enables -march=goldmont
18207 +config MGOLDMONTPLUS
18208 +       bool "Intel Goldmont Plus"
18209 +       select X86_P6_NOP
18210 +       help
18212 +         Select this for the Intel Goldmont Plus platform including Gemini Lake.
18214 +         Enables -march=goldmont-plus
18216 +config MSANDYBRIDGE
18217 +       bool "Intel Sandy Bridge"
18218 +       select X86_P6_NOP
18219 +       help
18221 +         Select this for 2nd Gen Core processors in the Sandy Bridge family.
18223 +         Enables -march=sandybridge
18225 +config MIVYBRIDGE
18226 +       bool "Intel Ivy Bridge"
18227 +       select X86_P6_NOP
18228 +       help
18230 +         Select this for 3rd Gen Core processors in the Ivy Bridge family.
18232 +         Enables -march=ivybridge
18234 +config MHASWELL
18235 +       bool "Intel Haswell"
18236 +       select X86_P6_NOP
18237 +       help
18239 +         Select this for 4th Gen Core processors in the Haswell family.
18241 +         Enables -march=haswell
18243 +config MBROADWELL
18244 +       bool "Intel Broadwell"
18245 +       select X86_P6_NOP
18246 +       help
18248 +         Select this for 5th Gen Core processors in the Broadwell family.
18250 +         Enables -march=broadwell
18252 +config MSKYLAKE
18253 +       bool "Intel Skylake"
18254 +       select X86_P6_NOP
18255 +       help
18257 +         Select this for 6th Gen Core processors in the Skylake family.
18259 +         Enables -march=skylake
18261 +config MSKYLAKEX
18262 +       bool "Intel Skylake X"
18263 +       select X86_P6_NOP
18264 +       help
18266 +         Select this for 6th Gen Core processors in the Skylake X family.
18268 +         Enables -march=skylake-avx512
18270 +config MCANNONLAKE
18271 +       bool "Intel Cannon Lake"
18272 +       select X86_P6_NOP
18273 +       help
18275 +         Select this for 8th Gen Core processors
18277 +         Enables -march=cannonlake
18279 +config MICELAKE
18280 +       bool "Intel Ice Lake"
18281 +       select X86_P6_NOP
18282 +       help
18284 +         Select this for 10th Gen Core processors in the Ice Lake family.
18286 +         Enables -march=icelake-client
18288 +config MCASCADELAKE
18289 +       bool "Intel Cascade Lake"
18290 +       select X86_P6_NOP
18291 +       help
18293 +         Select this for Xeon processors in the Cascade Lake family.
18295 +         Enables -march=cascadelake
18297 +config MCOOPERLAKE
18298 +       bool "Intel Cooper Lake"
18299 +       depends on GCC_VERSION > 100100
18300 +       select X86_P6_NOP
18301 +       help
18303 +         Select this for Xeon processors in the Cooper Lake family.
18305 +         Enables -march=cooperlake
18307 +config MTIGERLAKE
18308 +       bool "Intel Tiger Lake"
18309 +       depends on GCC_VERSION > 100100
18310 +       select X86_P6_NOP
18311 +       help
18313 +         Select this for third-generation 10 nm process processors in the Tiger Lake family.
18315 +         Enables -march=tigerlake
18317 +config MSAPPHIRERAPIDS
18318 +       bool "Intel Sapphire Rapids"
18319 +       depends on GCC_VERSION > 110000
18320 +       select X86_P6_NOP
18321 +       help
18323 +         Select this for third-generation 10 nm process processors in the Sapphire Rapids family.
18325 +         Enables -march=sapphirerapids
18327 +config MROCKETLAKE
18328 +       bool "Intel Rocket Lake"
18329 +       depends on GCC_VERSION > 110000
18330 +       select X86_P6_NOP
18331 +       help
18333 +         Select this for eleventh-generation processors in the Rocket Lake family.
18335 +         Enables -march=rocketlake
18337 +config MALDERLAKE
18338 +       bool "Intel Alder Lake"
18339 +       depends on GCC_VERSION > 110000
18340 +       select X86_P6_NOP
18341 +       help
18343 +         Select this for twelfth-generation processors in the Alder Lake family.
18345 +         Enables -march=alderlake
18347  config GENERIC_CPU
18348         bool "Generic-x86-64"
18349         depends on X86_64
18350 @@ -294,6 +558,50 @@ config GENERIC_CPU
18351           Generic x86-64 CPU.
18352           Run equally well on all x86-64 CPUs.
18354 +config GENERIC_CPU2
18355 +       bool "Generic-x86-64-v2"
18356 +       depends on GCC_VERSION > 110000
18357 +       depends on X86_64
18358 +       help
18359 +         Generic x86-64 CPU.
18360 +         Run equally well on all x86-64 CPUs with min support of x86-64-v2.
18362 +config GENERIC_CPU3
18363 +       bool "Generic-x86-64-v3"
18364 +       depends on GCC_VERSION > 110000
18365 +       depends on X86_64
18366 +       help
18367 +         Generic x86-64-v3 CPU with v3 instructions.
18368 +         Run equally well on all x86-64 CPUs with min support of x86-64-v3.
18370 +config GENERIC_CPU4
18371 +       bool "Generic-x86-64-v4"
18372 +       depends on GCC_VERSION > 110000
18373 +       depends on X86_64
18374 +       help
18375 +         Generic x86-64 CPU with v4 instructions.
18376 +         Run equally well on all x86-64 CPUs with min support of x86-64-v4.
18378 +config MNATIVE_INTEL
18379 +       bool "Intel-Native optimizations autodetected by GCC"
18380 +       help
18382 +         GCC 4.2 and above support -march=native, which automatically detects
18383 +         the optimum settings to use based on your processor. Do NOT use this
18384 +         for AMD CPUs.  Intel Only!
18386 +         Enables -march=native
18388 +config MNATIVE_AMD
18389 +       bool "AMD-Native optimizations autodetected by GCC"
18390 +       help
18392 +         GCC 4.2 and above support -march=native, which automatically detects
18393 +         the optimum settings to use based on your processor. Do NOT use this
18394 +         for Intel CPUs.  AMD Only!
18396 +         Enables -march=native
18398  endchoice
18400  config X86_GENERIC
18401 @@ -318,7 +626,7 @@ config X86_INTERNODE_CACHE_SHIFT
18402  config X86_L1_CACHE_SHIFT
18403         int
18404         default "7" if MPENTIUM4 || MPSC
18405 -       default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
18406 +       default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD || X86_GENERIC || GENERIC_CPU || GENERIC_CPU2 || GENERIC_CPU3 || GENERIC_CPU4
18407         default "4" if MELAN || M486SX || M486 || MGEODEGX1
18408         default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
18410 @@ -336,11 +644,11 @@ config X86_ALIGNMENT_16
18412  config X86_INTEL_USERCOPY
18413         def_bool y
18414 -       depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
18415 +       depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL
18417  config X86_USE_PPRO_CHECKSUM
18418         def_bool y
18419 -       depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
18420 +       depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD
18422  config X86_USE_3DNOW
18423         def_bool y
18424 @@ -360,26 +668,26 @@ config X86_USE_3DNOW
18425  config X86_P6_NOP
18426         def_bool y
18427         depends on X86_64
18428 -       depends on (MCORE2 || MPENTIUM4 || MPSC)
18429 +       depends on (MCORE2 || MPENTIUM4 || MPSC || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL)
18431  config X86_TSC
18432         def_bool y
18433 -       depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
18434 +       depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD) || X86_64
18436  config X86_CMPXCHG64
18437         def_bool y
18438 -       depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8
18439 +       depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD
18441  # this should be set for all -march=.. options where the compiler
18442  # generates cmov.
18443  config X86_CMOV
18444         def_bool y
18445 -       depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
18446 +       depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD)
18448  config X86_MINIMUM_CPU_FAMILY
18449         int
18450         default "64" if X86_64
18451 -       default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
18452 +       default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8 ||  MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD)
18453         default "5" if X86_32 && X86_CMPXCHG64
18454         default "4"
18456 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
18457 index 9a85eae37b17..3d7b305bc301 100644
18458 --- a/arch/x86/Makefile
18459 +++ b/arch/x86/Makefile
18460 @@ -33,6 +33,7 @@ REALMODE_CFLAGS += -ffreestanding
18461  REALMODE_CFLAGS += -fno-stack-protector
18462  REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
18463  REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
18464 +REALMODE_CFLAGS += $(CLANG_FLAGS)
18465  export REALMODE_CFLAGS
18467  # BITS is used as extension for files which are available in a 32 bit
18468 @@ -113,11 +114,48 @@ else
18469          # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
18470          cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
18471          cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
18473 -        cflags-$(CONFIG_MCORE2) += \
18474 -                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
18475 -       cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
18476 -               $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
18477 +        cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3)
18478 +        cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
18479 +        cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
18480 +        cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
18481 +        cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
18482 +        cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
18483 +        cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
18484 +        cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-mno-tbm)
18485 +        cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3)
18486 +        cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-mno-tbm)
18487 +        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4)
18488 +        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-mno-tbm)
18489 +        cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1)
18490 +        cflags-$(CONFIG_MZEN2) += $(call cc-option,-march=znver2)
18491 +        cflags-$(CONFIG_MZEN3) += $(call cc-option,-march=znver3)
18493 +        cflags-$(CONFIG_MNATIVE_INTEL) += $(call cc-option,-march=native)
18494 +        cflags-$(CONFIG_MNATIVE_AMD) += $(call cc-option,-march=native)
18495 +        cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell)
18496 +        cflags-$(CONFIG_MCORE2) += $(call cc-option,-march=core2)
18497 +        cflags-$(CONFIG_MNEHALEM) += $(call cc-option,-march=nehalem)
18498 +        cflags-$(CONFIG_MWESTMERE) += $(call cc-option,-march=westmere)
18499 +        cflags-$(CONFIG_MSILVERMONT) += $(call cc-option,-march=silvermont)
18500 +        cflags-$(CONFIG_MGOLDMONT) += $(call cc-option,-march=goldmont)
18501 +        cflags-$(CONFIG_MGOLDMONTPLUS) += $(call cc-option,-march=goldmont-plus)
18502 +        cflags-$(CONFIG_MSANDYBRIDGE) += $(call cc-option,-march=sandybridge)
18503 +        cflags-$(CONFIG_MIVYBRIDGE) += $(call cc-option,-march=ivybridge)
18504 +        cflags-$(CONFIG_MHASWELL) += $(call cc-option,-march=haswell)
18505 +        cflags-$(CONFIG_MBROADWELL) += $(call cc-option,-march=broadwell)
18506 +        cflags-$(CONFIG_MSKYLAKE) += $(call cc-option,-march=skylake)
18507 +        cflags-$(CONFIG_MSKYLAKEX) += $(call cc-option,-march=skylake-avx512)
18508 +        cflags-$(CONFIG_MCANNONLAKE) += $(call cc-option,-march=cannonlake)
18509 +        cflags-$(CONFIG_MICELAKE) += $(call cc-option,-march=icelake-client)
18510 +        cflags-$(CONFIG_MCASCADELAKE) += $(call cc-option,-march=cascadelake)
18511 +        cflags-$(CONFIG_MCOOPERLAKE) += $(call cc-option,-march=cooperlake)
18512 +        cflags-$(CONFIG_MTIGERLAKE) += $(call cc-option,-march=tigerlake)
18513 +        cflags-$(CONFIG_MSAPPHIRERAPIDS) += $(call cc-option,-march=sapphirerapids)
18514 +        cflags-$(CONFIG_MROCKETLAKE) += $(call cc-option,-march=rocketlake)
18515 +        cflags-$(CONFIG_MALDERLAKE) += $(call cc-option,-march=alderlake)
18516 +        cflags-$(CONFIG_GENERIC_CPU2) += $(call cc-option,-march=x86-64-v2)
18517 +        cflags-$(CONFIG_GENERIC_CPU3) += $(call cc-option,-march=x86-64-v3)
18518 +        cflags-$(CONFIG_GENERIC_CPU4) += $(call cc-option,-march=x86-64-v4)
18519          cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
18520          KBUILD_CFLAGS += $(cflags-y)
18522 @@ -169,11 +207,6 @@ ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
18523         KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
18524  endif
18526 -ifdef CONFIG_LTO_CLANG
18527 -KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
18528 -                  -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
18529 -endif
18531  # Workaround for a gcc prelease that unfortunately was shipped in a suse release
18532  KBUILD_CFLAGS += -Wno-sign-compare
18534 @@ -193,7 +226,12 @@ ifdef CONFIG_RETPOLINE
18535    endif
18536  endif
18538 -KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)
18539 +KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
18541 +ifdef CONFIG_LTO_CLANG
18542 +KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
18543 +                  -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
18544 +endif
18546  ifdef CONFIG_X86_NEED_RELOCS
18547  LDFLAGS_vmlinux := --emit-relocs --discard-none
18548 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
18549 index e0bc3988c3fa..6e5522aebbbd 100644
18550 --- a/arch/x86/boot/compressed/Makefile
18551 +++ b/arch/x86/boot/compressed/Makefile
18552 @@ -46,6 +46,7 @@ KBUILD_CFLAGS += -D__DISABLE_EXPORTS
18553  # Disable relocation relaxation in case the link is not PIE.
18554  KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
18555  KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h
18556 +KBUILD_CFLAGS += $(CLANG_FLAGS)
18558  # sev-es.c indirectly inludes inat-table.h which is generated during
18559  # compilation and stored in $(objtree). Add the directory to the includes so
18560 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
18561 index e94874f4bbc1..ae1fe558a2d8 100644
18562 --- a/arch/x86/boot/compressed/head_64.S
18563 +++ b/arch/x86/boot/compressed/head_64.S
18564 @@ -172,11 +172,21 @@ SYM_FUNC_START(startup_32)
18565          */
18566         call    get_sev_encryption_bit
18567         xorl    %edx, %edx
18568 +#ifdef CONFIG_AMD_MEM_ENCRYPT
18569         testl   %eax, %eax
18570         jz      1f
18571         subl    $32, %eax       /* Encryption bit is always above bit 31 */
18572         bts     %eax, %edx      /* Set encryption mask for page tables */
18573 +       /*
18574 +        * Mark SEV as active in sev_status so that startup32_check_sev_cbit()
18575 +        * will do a check. The sev_status memory will be fully initialized
18576 +        * with the contents of MSR_AMD_SEV_STATUS later in
18577 +        * set_sev_encryption_mask(). For now it is sufficient to know that SEV
18578 +        * is active.
18579 +        */
18580 +       movl    $1, rva(sev_status)(%ebp)
18581  1:
18582 +#endif
18584         /* Initialize Page tables to 0 */
18585         leal    rva(pgtable)(%ebx), %edi
18586 @@ -261,6 +271,9 @@ SYM_FUNC_START(startup_32)
18587         movl    %esi, %edx
18588  1:
18589  #endif
18590 +       /* Check if the C-bit position is correct when SEV is active */
18591 +       call    startup32_check_sev_cbit
18593         pushl   $__KERNEL_CS
18594         pushl   %eax
18596 @@ -786,6 +799,78 @@ SYM_DATA_START_LOCAL(loaded_image_proto)
18597  SYM_DATA_END(loaded_image_proto)
18598  #endif
18601 + * Check for the correct C-bit position when the startup_32 boot-path is used.
18602 + *
18603 + * The check makes use of the fact that all memory is encrypted when paging is
18604 + * disabled. The function creates 64 bits of random data using the RDRAND
18605 + * instruction. RDRAND is mandatory for SEV guests, so always available. If the
18606 + * hypervisor violates that the kernel will crash right here.
18607 + *
18608 + * The 64 bits of random data are stored to a memory location and at the same
18609 + * time kept in the %eax and %ebx registers. Since encryption is always active
18610 + * when paging is off the random data will be stored encrypted in main memory.
18611 + *
18612 + * Then paging is enabled. When the C-bit position is correct all memory is
18613 + * still mapped encrypted and comparing the register values with memory will
18614 + * succeed. An incorrect C-bit position will map all memory unencrypted, so that
18615 + * the compare will use the encrypted random data and fail.
18616 + */
18617 +       __HEAD
18618 +       .code32
18619 +SYM_FUNC_START(startup32_check_sev_cbit)
18620 +#ifdef CONFIG_AMD_MEM_ENCRYPT
18621 +       pushl   %eax
18622 +       pushl   %ebx
18623 +       pushl   %ecx
18624 +       pushl   %edx
18626 +       /* Check for non-zero sev_status */
18627 +       movl    rva(sev_status)(%ebp), %eax
18628 +       testl   %eax, %eax
18629 +       jz      4f
18631 +       /*
18632 +        * Get two 32-bit random values - Don't bail out if RDRAND fails
18633 +        * because it is better to prevent forward progress if no random value
18634 +        * can be gathered.
18635 +        */
18636 +1:     rdrand  %eax
18637 +       jnc     1b
18638 +2:     rdrand  %ebx
18639 +       jnc     2b
18641 +       /* Store to memory and keep it in the registers */
18642 +       movl    %eax, rva(sev_check_data)(%ebp)
18643 +       movl    %ebx, rva(sev_check_data+4)(%ebp)
18645 +       /* Enable paging to see if encryption is active */
18646 +       movl    %cr0, %edx                       /* Backup %cr0 in %edx */
18647 +       movl    $(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */
18648 +       movl    %ecx, %cr0
18650 +       cmpl    %eax, rva(sev_check_data)(%ebp)
18651 +       jne     3f
18652 +       cmpl    %ebx, rva(sev_check_data+4)(%ebp)
18653 +       jne     3f
18655 +       movl    %edx, %cr0      /* Restore previous %cr0 */
18657 +       jmp     4f
18659 +3:     /* Check failed - hlt the machine */
18660 +       hlt
18661 +       jmp     3b
18664 +       popl    %edx
18665 +       popl    %ecx
18666 +       popl    %ebx
18667 +       popl    %eax
18668 +#endif
18669 +       ret
18670 +SYM_FUNC_END(startup32_check_sev_cbit)
18672  /*
18673   * Stack and heap for uncompression
18674   */
18675 diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
18676 index aa561795efd1..a6dea4e8a082 100644
18677 --- a/arch/x86/boot/compressed/mem_encrypt.S
18678 +++ b/arch/x86/boot/compressed/mem_encrypt.S
18679 @@ -23,12 +23,6 @@ SYM_FUNC_START(get_sev_encryption_bit)
18680         push    %ecx
18681         push    %edx
18683 -       /* Check if running under a hypervisor */
18684 -       movl    $1, %eax
18685 -       cpuid
18686 -       bt      $31, %ecx               /* Check the hypervisor bit */
18687 -       jnc     .Lno_sev
18689         movl    $0x80000000, %eax       /* CPUID to check the highest leaf */
18690         cpuid
18691         cmpl    $0x8000001f, %eax       /* See if 0x8000001f is available */
18692 diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
18693 index 646da46e8d10..1dfb8af48a3c 100644
18694 --- a/arch/x86/crypto/poly1305_glue.c
18695 +++ b/arch/x86/crypto/poly1305_glue.c
18696 @@ -16,7 +16,7 @@
18697  #include <asm/simd.h>
18699  asmlinkage void poly1305_init_x86_64(void *ctx,
18700 -                                    const u8 key[POLY1305_KEY_SIZE]);
18701 +                                    const u8 key[POLY1305_BLOCK_SIZE]);
18702  asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp,
18703                                        const size_t len, const u32 padbit);
18704  asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
18705 @@ -81,7 +81,7 @@ static void convert_to_base2_64(void *ctx)
18706         state->is_base2_26 = 0;
18709 -static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_KEY_SIZE])
18710 +static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_BLOCK_SIZE])
18712         poly1305_init_x86_64(ctx, key);
18714 @@ -129,7 +129,7 @@ static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
18715                 poly1305_emit_avx(ctx, mac, nonce);
18718 -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
18719 +void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
18721         poly1305_simd_init(&dctx->h, key);
18722         dctx->s[0] = get_unaligned_le32(&key[16]);
18723 diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
18724 index a1c9f496fca6..4d0111f44d79 100644
18725 --- a/arch/x86/entry/syscalls/syscall_32.tbl
18726 +++ b/arch/x86/entry/syscalls/syscall_32.tbl
18727 @@ -447,3 +447,7 @@
18728  440    i386    process_madvise         sys_process_madvise
18729  441    i386    epoll_pwait2            sys_epoll_pwait2                compat_sys_epoll_pwait2
18730  442    i386    mount_setattr           sys_mount_setattr
18731 +443    i386    futex_wait              sys_futex_wait
18732 +444    i386    futex_wake              sys_futex_wake
18733 +445    i386    futex_waitv             sys_futex_waitv                 compat_sys_futex_waitv
18734 +446    i386    futex_requeue           sys_futex_requeue               compat_sys_futex_requeue
18735 diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
18736 index 7bf01cbe582f..61c0b47365e3 100644
18737 --- a/arch/x86/entry/syscalls/syscall_64.tbl
18738 +++ b/arch/x86/entry/syscalls/syscall_64.tbl
18739 @@ -364,6 +364,10 @@
18740  440    common  process_madvise         sys_process_madvise
18741  441    common  epoll_pwait2            sys_epoll_pwait2
18742  442    common  mount_setattr           sys_mount_setattr
18743 +443    common  futex_wait              sys_futex_wait
18744 +444    common  futex_wake              sys_futex_wake
18745 +445    common  futex_waitv             sys_futex_waitv
18746 +446    common  futex_requeue           sys_futex_requeue
18749  # Due to a historical design error, certain syscalls are numbered differently
18750 diff --git a/arch/x86/entry/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h
18751 index 1c7cfac7e64a..5264daa8859f 100644
18752 --- a/arch/x86/entry/vdso/vdso2c.h
18753 +++ b/arch/x86/entry/vdso/vdso2c.h
18754 @@ -35,7 +35,7 @@ static void BITSFUNC(extract)(const unsigned char *data, size_t data_len,
18755         if (offset + len > data_len)
18756                 fail("section to extract overruns input data");
18758 -       fprintf(outfile, "static const unsigned char %s[%lu] = {", name, len);
18759 +       fprintf(outfile, "static const unsigned char %s[%zu] = {", name, len);
18760         BITSFUNC(copy)(outfile, data + offset, len);
18761         fprintf(outfile, "\n};\n\n");
18763 diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
18764 index be50ef8572cc..6a98a7651621 100644
18765 --- a/arch/x86/events/amd/iommu.c
18766 +++ b/arch/x86/events/amd/iommu.c
18767 @@ -81,12 +81,12 @@ static struct attribute_group amd_iommu_events_group = {
18768  };
18770  struct amd_iommu_event_desc {
18771 -       struct kobj_attribute attr;
18772 +       struct device_attribute attr;
18773         const char *event;
18774  };
18776 -static ssize_t _iommu_event_show(struct kobject *kobj,
18777 -                               struct kobj_attribute *attr, char *buf)
18778 +static ssize_t _iommu_event_show(struct device *dev,
18779 +                               struct device_attribute *attr, char *buf)
18781         struct amd_iommu_event_desc *event =
18782                 container_of(attr, struct amd_iommu_event_desc, attr);
18783 diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
18784 index 7f014d450bc2..582c0ffb5e98 100644
18785 --- a/arch/x86/events/amd/uncore.c
18786 +++ b/arch/x86/events/amd/uncore.c
18787 @@ -275,14 +275,14 @@ static struct attribute_group amd_uncore_attr_group = {
18788  };
18790  #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format)                        \
18791 -static ssize_t __uncore_##_var##_show(struct kobject *kobj,            \
18792 -                               struct kobj_attribute *attr,            \
18793 +static ssize_t __uncore_##_var##_show(struct device *dev,              \
18794 +                               struct device_attribute *attr,          \
18795                                 char *page)                             \
18796  {                                                                      \
18797         BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                     \
18798         return sprintf(page, _format "\n");                             \
18799  }                                                                      \
18800 -static struct kobj_attribute format_attr_##_var =                      \
18801 +static struct device_attribute format_attr_##_var =                    \
18802         __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
18804  DEFINE_UNCORE_FORMAT_ATTR(event12,     event,          "config:0-7,32-35");
18805 diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
18806 index c57ec8e27907..4c18e7fb58f5 100644
18807 --- a/arch/x86/events/intel/core.c
18808 +++ b/arch/x86/events/intel/core.c
18809 @@ -5741,7 +5741,7 @@ __init int intel_pmu_init(void)
18810          * Check all LBT MSR here.
18811          * Disable LBR access if any LBR MSRs can not be accessed.
18812          */
18813 -       if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
18814 +       if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
18815                 x86_pmu.lbr_nr = 0;
18816         for (i = 0; i < x86_pmu.lbr_nr; i++) {
18817                 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
18818 diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
18819 index 5eb3bdf36a41..06b0789d61b9 100644
18820 --- a/arch/x86/include/asm/idtentry.h
18821 +++ b/arch/x86/include/asm/idtentry.h
18822 @@ -588,6 +588,21 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_MC,  xenpv_exc_machine_check);
18823  #endif
18825  /* NMI */
18827 +#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
18829 + * Special NOIST entry point for VMX which invokes this on the kernel
18830 + * stack. asm_exc_nmi() requires an IST to work correctly vs. the NMI
18831 + * 'executing' marker.
18832 + *
18833 + * On 32bit this just uses the regular NMI entry point because 32-bit does
18834 + * not have ISTs.
18835 + */
18836 +DECLARE_IDTENTRY(X86_TRAP_NMI,         exc_nmi_noist);
18837 +#else
18838 +#define asm_exc_nmi_noist              asm_exc_nmi
18839 +#endif
18841  DECLARE_IDTENTRY_NMI(X86_TRAP_NMI,     exc_nmi);
18842  #ifdef CONFIG_XEN_PV
18843  DECLARE_IDTENTRY_RAW(X86_TRAP_NMI,     xenpv_exc_nmi);
18844 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
18845 index 3768819693e5..eec2dcca2f39 100644
18846 --- a/arch/x86/include/asm/kvm_host.h
18847 +++ b/arch/x86/include/asm/kvm_host.h
18848 @@ -1753,6 +1753,7 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
18849                     unsigned long icr, int op_64_bit);
18851  void kvm_define_user_return_msr(unsigned index, u32 msr);
18852 +int kvm_probe_user_return_msr(u32 msr);
18853  int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
18855  u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
18856 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
18857 index a02c67291cfc..a6b5cfe1fc5a 100644
18858 --- a/arch/x86/include/asm/pgtable.h
18859 +++ b/arch/x86/include/asm/pgtable.h
18860 @@ -846,7 +846,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
18862  static inline int pmd_bad(pmd_t pmd)
18864 -       return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
18865 +       return ((pmd_flags(pmd) | _PAGE_ACCESSED) & ~_PAGE_USER) != _KERNPG_TABLE;
18868  static inline unsigned long pages_to_mb(unsigned long npg)
18869 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18870 index f1b9ed5efaa9..908bcaea1361 100644
18871 --- a/arch/x86/include/asm/processor.h
18872 +++ b/arch/x86/include/asm/processor.h
18873 @@ -804,8 +804,10 @@ DECLARE_PER_CPU(u64, msr_misc_features_shadow);
18875  #ifdef CONFIG_CPU_SUP_AMD
18876  extern u32 amd_get_nodes_per_socket(void);
18877 +extern u32 amd_get_highest_perf(void);
18878  #else
18879  static inline u32 amd_get_nodes_per_socket(void)       { return 0; }
18880 +static inline u32 amd_get_highest_perf(void)           { return 0; }
18881  #endif
18883  static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18884 diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h
18885 index 75884d2cdec3..4e6a08d4c7e5 100644
18886 --- a/arch/x86/include/asm/vermagic.h
18887 +++ b/arch/x86/include/asm/vermagic.h
18888 @@ -17,6 +17,48 @@
18889  #define MODULE_PROC_FAMILY "586MMX "
18890  #elif defined CONFIG_MCORE2
18891  #define MODULE_PROC_FAMILY "CORE2 "
18892 +#elif defined CONFIG_MNATIVE_INTEL
18893 +#define MODULE_PROC_FAMILY "NATIVE_INTEL "
18894 +#elif defined CONFIG_MNATIVE_AMD
18895 +#define MODULE_PROC_FAMILY "NATIVE_AMD "
18896 +#elif defined CONFIG_MNEHALEM
18897 +#define MODULE_PROC_FAMILY "NEHALEM "
18898 +#elif defined CONFIG_MWESTMERE
18899 +#define MODULE_PROC_FAMILY "WESTMERE "
18900 +#elif defined CONFIG_MSILVERMONT
18901 +#define MODULE_PROC_FAMILY "SILVERMONT "
18902 +#elif defined CONFIG_MGOLDMONT
18903 +#define MODULE_PROC_FAMILY "GOLDMONT "
18904 +#elif defined CONFIG_MGOLDMONTPLUS
18905 +#define MODULE_PROC_FAMILY "GOLDMONTPLUS "
18906 +#elif defined CONFIG_MSANDYBRIDGE
18907 +#define MODULE_PROC_FAMILY "SANDYBRIDGE "
18908 +#elif defined CONFIG_MIVYBRIDGE
18909 +#define MODULE_PROC_FAMILY "IVYBRIDGE "
18910 +#elif defined CONFIG_MHASWELL
18911 +#define MODULE_PROC_FAMILY "HASWELL "
18912 +#elif defined CONFIG_MBROADWELL
18913 +#define MODULE_PROC_FAMILY "BROADWELL "
18914 +#elif defined CONFIG_MSKYLAKE
18915 +#define MODULE_PROC_FAMILY "SKYLAKE "
18916 +#elif defined CONFIG_MSKYLAKEX
18917 +#define MODULE_PROC_FAMILY "SKYLAKEX "
18918 +#elif defined CONFIG_MCANNONLAKE
18919 +#define MODULE_PROC_FAMILY "CANNONLAKE "
18920 +#elif defined CONFIG_MICELAKE
18921 +#define MODULE_PROC_FAMILY "ICELAKE "
18922 +#elif defined CONFIG_MCASCADELAKE
18923 +#define MODULE_PROC_FAMILY "CASCADELAKE "
18924 +#elif defined CONFIG_MCOOPERLAKE
18925 +#define MODULE_PROC_FAMILY "COOPERLAKE "
18926 +#elif defined CONFIG_MTIGERLAKE
18927 +#define MODULE_PROC_FAMILY "TIGERLAKE "
18928 +#elif defined CONFIG_MSAPPHIRERAPIDS
18929 +#define MODULE_PROC_FAMILY "SAPPHIRERAPIDS "
18930 +#elif defined CONFIG_ROCKETLAKE
18931 +#define MODULE_PROC_FAMILY "ROCKETLAKE "
18932 +#elif defined CONFIG_MALDERLAKE
18933 +#define MODULE_PROC_FAMILY "ALDERLAKE "
18934  #elif defined CONFIG_MATOM
18935  #define MODULE_PROC_FAMILY "ATOM "
18936  #elif defined CONFIG_M686
18937 @@ -35,6 +77,30 @@
18938  #define MODULE_PROC_FAMILY "K7 "
18939  #elif defined CONFIG_MK8
18940  #define MODULE_PROC_FAMILY "K8 "
18941 +#elif defined CONFIG_MK8SSE3
18942 +#define MODULE_PROC_FAMILY "K8SSE3 "
18943 +#elif defined CONFIG_MK10
18944 +#define MODULE_PROC_FAMILY "K10 "
18945 +#elif defined CONFIG_MBARCELONA
18946 +#define MODULE_PROC_FAMILY "BARCELONA "
18947 +#elif defined CONFIG_MBOBCAT
18948 +#define MODULE_PROC_FAMILY "BOBCAT "
18949 +#elif defined CONFIG_MBULLDOZER
18950 +#define MODULE_PROC_FAMILY "BULLDOZER "
18951 +#elif defined CONFIG_MPILEDRIVER
18952 +#define MODULE_PROC_FAMILY "PILEDRIVER "
18953 +#elif defined CONFIG_MSTEAMROLLER
18954 +#define MODULE_PROC_FAMILY "STEAMROLLER "
18955 +#elif defined CONFIG_MJAGUAR
18956 +#define MODULE_PROC_FAMILY "JAGUAR "
18957 +#elif defined CONFIG_MEXCAVATOR
18958 +#define MODULE_PROC_FAMILY "EXCAVATOR "
18959 +#elif defined CONFIG_MZEN
18960 +#define MODULE_PROC_FAMILY "ZEN "
18961 +#elif defined CONFIG_MZEN2
18962 +#define MODULE_PROC_FAMILY "ZEN2 "
18963 +#elif defined CONFIG_MZEN3
18964 +#define MODULE_PROC_FAMILY "ZEN3 "
18965  #elif defined CONFIG_MELAN
18966  #define MODULE_PROC_FAMILY "ELAN "
18967  #elif defined CONFIG_MCRUSOE
18968 diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
18969 index 52bc217ca8c3..c9ddd233e32f 100644
18970 --- a/arch/x86/kernel/apic/x2apic_uv_x.c
18971 +++ b/arch/x86/kernel/apic/x2apic_uv_x.c
18972 @@ -1671,6 +1671,9 @@ static __init int uv_system_init_hubless(void)
18973         if (rc < 0)
18974                 return rc;
18976 +       /* Set section block size for current node memory */
18977 +       set_block_size();
18979         /* Create user access node */
18980         if (rc >= 0)
18981                 uv_setup_proc_files(1);
18982 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
18983 index 347a956f71ca..eedb2b320946 100644
18984 --- a/arch/x86/kernel/cpu/amd.c
18985 +++ b/arch/x86/kernel/cpu/amd.c
18986 @@ -1170,3 +1170,19 @@ void set_dr_addr_mask(unsigned long mask, int dr)
18987                 break;
18988         }
18991 +u32 amd_get_highest_perf(void)
18993 +       struct cpuinfo_x86 *c = &boot_cpu_data;
18995 +       if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) ||
18996 +                              (c->x86_model >= 0x70 && c->x86_model < 0x80)))
18997 +               return 166;
18999 +       if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) ||
19000 +                              (c->x86_model >= 0x40 && c->x86_model < 0x70)))
19001 +               return 166;
19003 +       return 255;
19005 +EXPORT_SYMBOL_GPL(amd_get_highest_perf);
19006 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
19007 index ab640abe26b6..1e576cc831c1 100644
19008 --- a/arch/x86/kernel/cpu/common.c
19009 +++ b/arch/x86/kernel/cpu/common.c
19010 @@ -1850,7 +1850,7 @@ static inline void setup_getcpu(int cpu)
19011         unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
19012         struct desc_struct d = { };
19014 -       if (boot_cpu_has(X86_FEATURE_RDTSCP))
19015 +       if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
19016                 write_rdtscp_aux(cpudata);
19018         /* Store CPU and node number in limit. */
19019 diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
19020 index b935e1b5f115..6a6318e9590c 100644
19021 --- a/arch/x86/kernel/cpu/microcode/core.c
19022 +++ b/arch/x86/kernel/cpu/microcode/core.c
19023 @@ -629,16 +629,16 @@ static ssize_t reload_store(struct device *dev,
19024         if (val != 1)
19025                 return size;
19027 -       tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
19028 -       if (tmp_ret != UCODE_NEW)
19029 -               return size;
19031         get_online_cpus();
19033         ret = check_online_cpus();
19034         if (ret)
19035                 goto put;
19037 +       tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
19038 +       if (tmp_ret != UCODE_NEW)
19039 +               goto put;
19041         mutex_lock(&microcode_mutex);
19042         ret = microcode_reload_late();
19043         mutex_unlock(&microcode_mutex);
19044 diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
19045 index 22aad412f965..629c4994f165 100644
19046 --- a/arch/x86/kernel/e820.c
19047 +++ b/arch/x86/kernel/e820.c
19048 @@ -31,8 +31,8 @@
19049   *       - inform the user about the firmware's notion of memory layout
19050   *         via /sys/firmware/memmap
19051   *
19052 - *       - the hibernation code uses it to generate a kernel-independent MD5
19053 - *         fingerprint of the physical memory layout of a system.
19054 + *       - the hibernation code uses it to generate a kernel-independent CRC32
19055 + *         checksum of the physical memory layout of a system.
19056   *
19057   * - 'e820_table_kexec': a slightly modified (by the kernel) firmware version
19058   *   passed to us by the bootloader - the major difference between
19059 diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
19060 index df776cdca327..0bb9fe021bbe 100644
19061 --- a/arch/x86/kernel/kprobes/core.c
19062 +++ b/arch/x86/kernel/kprobes/core.c
19063 @@ -139,6 +139,8 @@ NOKPROBE_SYMBOL(synthesize_relcall);
19064  int can_boost(struct insn *insn, void *addr)
19066         kprobe_opcode_t opcode;
19067 +       insn_byte_t prefix;
19068 +       int i;
19070         if (search_exception_tables((unsigned long)addr))
19071                 return 0;       /* Page fault may occur on this address. */
19072 @@ -151,9 +153,14 @@ int can_boost(struct insn *insn, void *addr)
19073         if (insn->opcode.nbytes != 1)
19074                 return 0;
19076 -       /* Can't boost Address-size override prefix */
19077 -       if (unlikely(inat_is_address_size_prefix(insn->attr)))
19078 -               return 0;
19079 +       for_each_insn_prefix(insn, i, prefix) {
19080 +               insn_attr_t attr;
19082 +               attr = inat_get_opcode_attribute(prefix);
19083 +               /* Can't boost Address-size override prefix and CS override prefix */
19084 +               if (prefix == 0x2e || inat_is_address_size_prefix(attr))
19085 +                       return 0;
19086 +       }
19088         opcode = insn->opcode.bytes[0];
19090 @@ -178,8 +185,8 @@ int can_boost(struct insn *insn, void *addr)
19091                 /* clear and set flags are boostable */
19092                 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
19093         default:
19094 -               /* CS override prefix and call are not boostable */
19095 -               return (opcode != 0x2e && opcode != 0x9a);
19096 +               /* call is not boostable */
19097 +               return opcode != 0x9a;
19098         }
19101 @@ -448,7 +455,11 @@ static void set_resume_flags(struct kprobe *p, struct insn *insn)
19102                 break;
19103  #endif
19104         case 0xff:
19105 -               opcode = insn->opcode.bytes[1];
19106 +               /*
19107 +                * Since the 0xff is an extended group opcode, the instruction
19108 +                * is determined by the MOD/RM byte.
19109 +                */
19110 +               opcode = insn->modrm.bytes[0];
19111                 if ((opcode & 0x30) == 0x10) {
19112                         /*
19113                          * call absolute, indirect
19114 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
19115 index bf250a339655..2ef961cf4cfc 100644
19116 --- a/arch/x86/kernel/nmi.c
19117 +++ b/arch/x86/kernel/nmi.c
19118 @@ -524,6 +524,16 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
19119                 mds_user_clear_cpu_buffers();
19122 +#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
19123 +DEFINE_IDTENTRY_RAW(exc_nmi_noist)
19125 +       exc_nmi(regs);
19127 +#endif
19128 +#if IS_MODULE(CONFIG_KVM_INTEL)
19129 +EXPORT_SYMBOL_GPL(asm_exc_nmi_noist);
19130 +#endif
19132  void stop_nmi(void)
19134         ignore_nmis++;
19135 diff --git a/arch/x86/kernel/sev-es-shared.c b/arch/x86/kernel/sev-es-shared.c
19136 index cdc04d091242..ecb20b17b7df 100644
19137 --- a/arch/x86/kernel/sev-es-shared.c
19138 +++ b/arch/x86/kernel/sev-es-shared.c
19139 @@ -63,6 +63,7 @@ static bool sev_es_negotiate_protocol(void)
19141  static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb)
19143 +       ghcb->save.sw_exit_code = 0;
19144         memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
19147 @@ -186,7 +187,6 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
19148          * make it accessible to the hypervisor.
19149          *
19150          * In particular, check for:
19151 -        *      - Hypervisor CPUID bit
19152          *      - Availability of CPUID leaf 0x8000001f
19153          *      - SEV CPUID bit.
19154          *
19155 @@ -194,10 +194,7 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
19156          * can't be checked here.
19157          */
19159 -       if ((fn == 1 && !(regs->cx & BIT(31))))
19160 -               /* Hypervisor bit */
19161 -               goto fail;
19162 -       else if (fn == 0x80000000 && (regs->ax < 0x8000001f))
19163 +       if (fn == 0x80000000 && (regs->ax < 0x8000001f))
19164                 /* SEV leaf check */
19165                 goto fail;
19166         else if ((fn == 0x8000001f && !(regs->ax & BIT(1))))
19167 diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c
19168 index 04a780abb512..e0cdab7cb632 100644
19169 --- a/arch/x86/kernel/sev-es.c
19170 +++ b/arch/x86/kernel/sev-es.c
19171 @@ -191,8 +191,18 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
19172         if (unlikely(data->ghcb_active)) {
19173                 /* GHCB is already in use - save its contents */
19175 -               if (unlikely(data->backup_ghcb_active))
19176 -                       return NULL;
19177 +               if (unlikely(data->backup_ghcb_active)) {
19178 +                       /*
19179 +                        * Backup-GHCB is also already in use. There is no way
19180 +                        * to continue here so just kill the machine. To make
19181 +                        * panic() work, mark GHCBs inactive so that messages
19182 +                        * can be printed out.
19183 +                        */
19184 +                       data->ghcb_active        = false;
19185 +                       data->backup_ghcb_active = false;
19187 +                       panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
19188 +               }
19190                 /* Mark backup_ghcb active before writing to it */
19191                 data->backup_ghcb_active = true;
19192 @@ -209,24 +219,6 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
19193         return ghcb;
19196 -static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
19198 -       struct sev_es_runtime_data *data;
19199 -       struct ghcb *ghcb;
19201 -       data = this_cpu_read(runtime_data);
19202 -       ghcb = &data->ghcb_page;
19204 -       if (state->ghcb) {
19205 -               /* Restore GHCB from Backup */
19206 -               *ghcb = *state->ghcb;
19207 -               data->backup_ghcb_active = false;
19208 -               state->ghcb = NULL;
19209 -       } else {
19210 -               data->ghcb_active = false;
19211 -       }
19214  /* Needed in vc_early_forward_exception */
19215  void do_early_exception(struct pt_regs *regs, int trapnr);
19217 @@ -296,31 +288,44 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
19218         u16 d2;
19219         u8  d1;
19221 -       /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
19222 -       if (!user_mode(ctxt->regs) && !access_ok(target, size)) {
19223 -               memcpy(dst, buf, size);
19224 -               return ES_OK;
19225 -       }
19227 +       /*
19228 +        * This function uses __put_user() independent of whether kernel or user
19229 +        * memory is accessed. This works fine because __put_user() does no
19230 +        * sanity checks of the pointer being accessed. All that it does is
19231 +        * to report when the access failed.
19232 +        *
19233 +        * Also, this function runs in atomic context, so __put_user() is not
19234 +        * allowed to sleep. The page-fault handler detects that it is running
19235 +        * in atomic context and will not try to take mmap_sem and handle the
19236 +        * fault, so additional pagefault_enable()/disable() calls are not
19237 +        * needed.
19238 +        *
19239 +        * The access can't be done via copy_to_user() here because
19240 +        * vc_write_mem() must not use string instructions to access unsafe
19241 +        * memory. The reason is that MOVS is emulated by the #VC handler by
19242 +        * splitting the move up into a read and a write and taking a nested #VC
19243 +        * exception on whatever of them is the MMIO access. Using string
19244 +        * instructions here would cause infinite nesting.
19245 +        */
19246         switch (size) {
19247         case 1:
19248                 memcpy(&d1, buf, 1);
19249 -               if (put_user(d1, target))
19250 +               if (__put_user(d1, target))
19251                         goto fault;
19252                 break;
19253         case 2:
19254                 memcpy(&d2, buf, 2);
19255 -               if (put_user(d2, target))
19256 +               if (__put_user(d2, target))
19257                         goto fault;
19258                 break;
19259         case 4:
19260                 memcpy(&d4, buf, 4);
19261 -               if (put_user(d4, target))
19262 +               if (__put_user(d4, target))
19263                         goto fault;
19264                 break;
19265         case 8:
19266                 memcpy(&d8, buf, 8);
19267 -               if (put_user(d8, target))
19268 +               if (__put_user(d8, target))
19269                         goto fault;
19270                 break;
19271         default:
19272 @@ -351,30 +356,43 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
19273         u16 d2;
19274         u8  d1;
19276 -       /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
19277 -       if (!user_mode(ctxt->regs) && !access_ok(s, size)) {
19278 -               memcpy(buf, src, size);
19279 -               return ES_OK;
19280 -       }
19282 +       /*
19283 +        * This function uses __get_user() independent of whether kernel or user
19284 +        * memory is accessed. This works fine because __get_user() does no
19285 +        * sanity checks of the pointer being accessed. All that it does is
19286 +        * to report when the access failed.
19287 +        *
19288 +        * Also, this function runs in atomic context, so __get_user() is not
19289 +        * allowed to sleep. The page-fault handler detects that it is running
19290 +        * in atomic context and will not try to take mmap_sem and handle the
19291 +        * fault, so additional pagefault_enable()/disable() calls are not
19292 +        * needed.
19293 +        *
19294 +        * The access can't be done via copy_from_user() here because
19295 +        * vc_read_mem() must not use string instructions to access unsafe
19296 +        * memory. The reason is that MOVS is emulated by the #VC handler by
19297 +        * splitting the move up into a read and a write and taking a nested #VC
19298 +        * exception on whatever of them is the MMIO access. Using string
19299 +        * instructions here would cause infinite nesting.
19300 +        */
19301         switch (size) {
19302         case 1:
19303 -               if (get_user(d1, s))
19304 +               if (__get_user(d1, s))
19305                         goto fault;
19306                 memcpy(buf, &d1, 1);
19307                 break;
19308         case 2:
19309 -               if (get_user(d2, s))
19310 +               if (__get_user(d2, s))
19311                         goto fault;
19312                 memcpy(buf, &d2, 2);
19313                 break;
19314         case 4:
19315 -               if (get_user(d4, s))
19316 +               if (__get_user(d4, s))
19317                         goto fault;
19318                 memcpy(buf, &d4, 4);
19319                 break;
19320         case 8:
19321 -               if (get_user(d8, s))
19322 +               if (__get_user(d8, s))
19323                         goto fault;
19324                 memcpy(buf, &d8, 8);
19325                 break;
19326 @@ -434,6 +452,29 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
19327  /* Include code shared with pre-decompression boot stage */
19328  #include "sev-es-shared.c"
19330 +static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
19332 +       struct sev_es_runtime_data *data;
19333 +       struct ghcb *ghcb;
19335 +       data = this_cpu_read(runtime_data);
19336 +       ghcb = &data->ghcb_page;
19338 +       if (state->ghcb) {
19339 +               /* Restore GHCB from Backup */
19340 +               *ghcb = *state->ghcb;
19341 +               data->backup_ghcb_active = false;
19342 +               state->ghcb = NULL;
19343 +       } else {
19344 +               /*
19345 +                * Invalidate the GHCB so a VMGEXIT instruction issued
19346 +                * from userspace won't appear to be valid.
19347 +                */
19348 +               vc_ghcb_invalidate(ghcb);
19349 +               data->ghcb_active = false;
19350 +       }
19353  void noinstr __sev_es_nmi_complete(void)
19355         struct ghcb_state state;
19356 @@ -1228,6 +1269,10 @@ static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
19357         case X86_TRAP_UD:
19358                 exc_invalid_op(ctxt->regs);
19359                 break;
19360 +       case X86_TRAP_PF:
19361 +               write_cr2(ctxt->fi.cr2);
19362 +               exc_page_fault(ctxt->regs, error_code);
19363 +               break;
19364         case X86_TRAP_AC:
19365                 exc_alignment_check(ctxt->regs, error_code);
19366                 break;
19367 @@ -1257,7 +1302,6 @@ static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
19368   */
19369  DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
19371 -       struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
19372         irqentry_state_t irq_state;
19373         struct ghcb_state state;
19374         struct es_em_ctxt ctxt;
19375 @@ -1283,16 +1327,6 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
19376          */
19378         ghcb = sev_es_get_ghcb(&state);
19379 -       if (!ghcb) {
19380 -               /*
19381 -                * Mark GHCBs inactive so that panic() is able to print the
19382 -                * message.
19383 -                */
19384 -               data->ghcb_active        = false;
19385 -               data->backup_ghcb_active = false;
19387 -               panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
19388 -       }
19390         vc_ghcb_invalidate(ghcb);
19391         result = vc_init_em_ctxt(&ctxt, regs, error_code);
19392 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19393 index 16703c35a944..363b36bbd791 100644
19394 --- a/arch/x86/kernel/smpboot.c
19395 +++ b/arch/x86/kernel/smpboot.c
19396 @@ -458,29 +458,52 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
19397         return false;
19400 +static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
19402 +       if (c->phys_proc_id == o->phys_proc_id &&
19403 +           c->cpu_die_id == o->cpu_die_id)
19404 +               return true;
19405 +       return false;
19408  /*
19409 - * Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs.
19410 + * Unlike the other levels, we do not enforce keeping a
19411 + * multicore group inside a NUMA node.  If this happens, we will
19412 + * discard the MC level of the topology later.
19413 + */
19414 +static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
19416 +       if (c->phys_proc_id == o->phys_proc_id)
19417 +               return true;
19418 +       return false;
19422 + * Define intel_cod_cpu[] for Intel COD (Cluster-on-Die) CPUs.
19423   *
19424 - * These are Intel CPUs that enumerate an LLC that is shared by
19425 - * multiple NUMA nodes. The LLC on these systems is shared for
19426 - * off-package data access but private to the NUMA node (half
19427 - * of the package) for on-package access.
19428 + * Any Intel CPU that has multiple nodes per package and does not
19429 + * match intel_cod_cpu[] has the SNC (Sub-NUMA Cluster) topology.
19430   *
19431 - * CPUID (the source of the information about the LLC) can only
19432 - * enumerate the cache as being shared *or* unshared, but not
19433 - * this particular configuration. The CPU in this case enumerates
19434 - * the cache to be shared across the entire package (spanning both
19435 - * NUMA nodes).
19436 + * When in SNC mode, these CPUs enumerate an LLC that is shared
19437 + * by multiple NUMA nodes. The LLC is shared for off-package data
19438 + * access but private to the NUMA node (half of the package) for
19439 + * on-package access. CPUID (the source of the information about
19440 + * the LLC) can only enumerate the cache as shared or unshared,
19441 + * but not this particular configuration.
19442   */
19444 -static const struct x86_cpu_id snc_cpu[] = {
19445 -       X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
19446 +static const struct x86_cpu_id intel_cod_cpu[] = {
19447 +       X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, 0),       /* COD */
19448 +       X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, 0),     /* COD */
19449 +       X86_MATCH_INTEL_FAM6_MODEL(ANY, 1),             /* SNC */
19450         {}
19451  };
19453  static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
19455 +       const struct x86_cpu_id *id = x86_match_cpu(intel_cod_cpu);
19456         int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
19457 +       bool intel_snc = id && id->driver_data;
19459         /* Do not match if we do not have a valid APICID for cpu: */
19460         if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
19461 @@ -495,32 +518,12 @@ static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
19462          * means 'c' does not share the LLC of 'o'. This will be
19463          * reflected to userspace.
19464          */
19465 -       if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu))
19466 +       if (match_pkg(c, o) && !topology_same_node(c, o) && intel_snc)
19467                 return false;
19469         return topology_sane(c, o, "llc");
19473 - * Unlike the other levels, we do not enforce keeping a
19474 - * multicore group inside a NUMA node.  If this happens, we will
19475 - * discard the MC level of the topology later.
19476 - */
19477 -static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
19479 -       if (c->phys_proc_id == o->phys_proc_id)
19480 -               return true;
19481 -       return false;
19484 -static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
19486 -       if ((c->phys_proc_id == o->phys_proc_id) &&
19487 -               (c->cpu_die_id == o->cpu_die_id))
19488 -               return true;
19489 -       return false;
19493  #if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
19494  static inline int x86_sched_itmt_flags(void)
19495 @@ -592,14 +595,23 @@ void set_cpu_sibling_map(int cpu)
19496         for_each_cpu(i, cpu_sibling_setup_mask) {
19497                 o = &cpu_data(i);
19499 +               if (match_pkg(c, o) && !topology_same_node(c, o))
19500 +                       x86_has_numa_in_package = true;
19502                 if ((i == cpu) || (has_smt && match_smt(c, o)))
19503                         link_mask(topology_sibling_cpumask, cpu, i);
19505                 if ((i == cpu) || (has_mp && match_llc(c, o)))
19506                         link_mask(cpu_llc_shared_mask, cpu, i);
19508 +               if ((i == cpu) || (has_mp && match_die(c, o)))
19509 +                       link_mask(topology_die_cpumask, cpu, i);
19510         }
19512 +       threads = cpumask_weight(topology_sibling_cpumask(cpu));
19513 +       if (threads > __max_smt_threads)
19514 +               __max_smt_threads = threads;
19516         /*
19517          * This needs a separate iteration over the cpus because we rely on all
19518          * topology_sibling_cpumask links to be set-up.
19519 @@ -613,8 +625,7 @@ void set_cpu_sibling_map(int cpu)
19520                         /*
19521                          *  Does this new cpu bringup a new core?
19522                          */
19523 -                       if (cpumask_weight(
19524 -                           topology_sibling_cpumask(cpu)) == 1) {
19525 +                       if (threads == 1) {
19526                                 /*
19527                                  * for each core in package, increment
19528                                  * the booted_cores for this new cpu
19529 @@ -631,16 +642,7 @@ void set_cpu_sibling_map(int cpu)
19530                         } else if (i != cpu && !c->booted_cores)
19531                                 c->booted_cores = cpu_data(i).booted_cores;
19532                 }
19533 -               if (match_pkg(c, o) && !topology_same_node(c, o))
19534 -                       x86_has_numa_in_package = true;
19536 -               if ((i == cpu) || (has_mp && match_die(c, o)))
19537 -                       link_mask(topology_die_cpumask, cpu, i);
19538         }
19540 -       threads = cpumask_weight(topology_sibling_cpumask(cpu));
19541 -       if (threads > __max_smt_threads)
19542 -               __max_smt_threads = threads;
19545  /* maps the cpu to the sched domain representing multi-core */
19546 @@ -2044,7 +2046,7 @@ static bool amd_set_max_freq_ratio(void)
19547                 return false;
19548         }
19550 -       highest_perf = perf_caps.highest_perf;
19551 +       highest_perf = amd_get_highest_perf();
19552         nominal_perf = perf_caps.nominal_perf;
19554         if (!highest_perf || !nominal_perf) {
19555 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
19556 index 6bd2f8b830e4..62f795352c02 100644
19557 --- a/arch/x86/kvm/cpuid.c
19558 +++ b/arch/x86/kvm/cpuid.c
19559 @@ -589,7 +589,8 @@ static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
19560         case 7:
19561                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
19562                 entry->eax = 0;
19563 -               entry->ecx = F(RDPID);
19564 +               if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
19565 +                       entry->ecx = F(RDPID);
19566                 ++array->nent;
19567         default:
19568                 break;
19569 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
19570 index f7970ba6219f..8fc71e70857d 100644
19571 --- a/arch/x86/kvm/emulate.c
19572 +++ b/arch/x86/kvm/emulate.c
19573 @@ -4220,7 +4220,7 @@ static bool valid_cr(int nr)
19574         }
19577 -static int check_cr_read(struct x86_emulate_ctxt *ctxt)
19578 +static int check_cr_access(struct x86_emulate_ctxt *ctxt)
19580         if (!valid_cr(ctxt->modrm_reg))
19581                 return emulate_ud(ctxt);
19582 @@ -4228,80 +4228,6 @@ static int check_cr_read(struct x86_emulate_ctxt *ctxt)
19583         return X86EMUL_CONTINUE;
19586 -static int check_cr_write(struct x86_emulate_ctxt *ctxt)
19588 -       u64 new_val = ctxt->src.val64;
19589 -       int cr = ctxt->modrm_reg;
19590 -       u64 efer = 0;
19592 -       static u64 cr_reserved_bits[] = {
19593 -               0xffffffff00000000ULL,
19594 -               0, 0, 0, /* CR3 checked later */
19595 -               CR4_RESERVED_BITS,
19596 -               0, 0, 0,
19597 -               CR8_RESERVED_BITS,
19598 -       };
19600 -       if (!valid_cr(cr))
19601 -               return emulate_ud(ctxt);
19603 -       if (new_val & cr_reserved_bits[cr])
19604 -               return emulate_gp(ctxt, 0);
19606 -       switch (cr) {
19607 -       case 0: {
19608 -               u64 cr4;
19609 -               if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
19610 -                   ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
19611 -                       return emulate_gp(ctxt, 0);
19613 -               cr4 = ctxt->ops->get_cr(ctxt, 4);
19614 -               ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
19616 -               if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
19617 -                   !(cr4 & X86_CR4_PAE))
19618 -                       return emulate_gp(ctxt, 0);
19620 -               break;
19621 -               }
19622 -       case 3: {
19623 -               u64 rsvd = 0;
19625 -               ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
19626 -               if (efer & EFER_LMA) {
19627 -                       u64 maxphyaddr;
19628 -                       u32 eax, ebx, ecx, edx;
19630 -                       eax = 0x80000008;
19631 -                       ecx = 0;
19632 -                       if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
19633 -                                                &edx, true))
19634 -                               maxphyaddr = eax & 0xff;
19635 -                       else
19636 -                               maxphyaddr = 36;
19637 -                       rsvd = rsvd_bits(maxphyaddr, 63);
19638 -                       if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
19639 -                               rsvd &= ~X86_CR3_PCID_NOFLUSH;
19640 -               }
19642 -               if (new_val & rsvd)
19643 -                       return emulate_gp(ctxt, 0);
19645 -               break;
19646 -               }
19647 -       case 4: {
19648 -               ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
19650 -               if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
19651 -                       return emulate_gp(ctxt, 0);
19653 -               break;
19654 -               }
19655 -       }
19657 -       return X86EMUL_CONTINUE;
19660  static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
19662         unsigned long dr7;
19663 @@ -4576,7 +4502,7 @@ static const struct opcode group8[] = {
19664   * from the register case of group9.
19665   */
19666  static const struct gprefix pfx_0f_c7_7 = {
19667 -       N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
19668 +       N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
19669  };
19672 @@ -4841,10 +4767,10 @@ static const struct opcode twobyte_table[256] = {
19673         D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
19674         D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
19675         /* 0x20 - 0x2F */
19676 -       DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
19677 +       DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
19678         DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
19679         IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
19680 -                                               check_cr_write),
19681 +                                               check_cr_access),
19682         IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
19683                                                 check_dr_write),
19684         N, N, N, N,
19685 diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
19686 index 0d359115429a..f016838faedd 100644
19687 --- a/arch/x86/kvm/kvm_emulate.h
19688 +++ b/arch/x86/kvm/kvm_emulate.h
19689 @@ -468,6 +468,7 @@ enum x86_intercept {
19690         x86_intercept_clgi,
19691         x86_intercept_skinit,
19692         x86_intercept_rdtscp,
19693 +       x86_intercept_rdpid,
19694         x86_intercept_icebp,
19695         x86_intercept_wbinvd,
19696         x86_intercept_monitor,
19697 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
19698 index cc369b9ad8f1..fa023f3feb25 100644
19699 --- a/arch/x86/kvm/lapic.c
19700 +++ b/arch/x86/kvm/lapic.c
19701 @@ -296,6 +296,10 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
19703                 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
19704         }
19706 +       /* Check if there are APF page ready requests pending */
19707 +       if (enabled)
19708 +               kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
19711  static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
19712 @@ -1909,8 +1913,8 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
19713         if (!apic->lapic_timer.hv_timer_in_use)
19714                 goto out;
19715         WARN_ON(rcuwait_active(&vcpu->wait));
19716 -       cancel_hv_timer(apic);
19717         apic_timer_expired(apic, false);
19718 +       cancel_hv_timer(apic);
19720         if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
19721                 advance_periodic_target_expiration(apic);
19722 @@ -2261,6 +2265,8 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
19723                 if (value & MSR_IA32_APICBASE_ENABLE) {
19724                         kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
19725                         static_branch_slow_dec_deferred(&apic_hw_disabled);
19726 +                       /* Check if there are APF page ready requests pending */
19727 +                       kvm_make_request(KVM_REQ_APF_READY, vcpu);
19728                 } else {
19729                         static_branch_inc(&apic_hw_disabled.key);
19730                         atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
19731 diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
19732 index 951dae4e7175..cd0faa187674 100644
19733 --- a/arch/x86/kvm/mmu/mmu.c
19734 +++ b/arch/x86/kvm/mmu/mmu.c
19735 @@ -3193,14 +3193,14 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
19736                 if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
19737                     (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
19738                         mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
19739 -               } else {
19740 +               } else if (mmu->pae_root) {
19741                         for (i = 0; i < 4; ++i)
19742                                 if (mmu->pae_root[i] != 0)
19743                                         mmu_free_root_page(kvm,
19744                                                            &mmu->pae_root[i],
19745                                                            &invalid_list);
19746 -                       mmu->root_hpa = INVALID_PAGE;
19747                 }
19748 +               mmu->root_hpa = INVALID_PAGE;
19749                 mmu->root_pgd = 0;
19750         }
19752 @@ -3312,9 +3312,23 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
19753          * the shadow page table may be a PAE or a long mode page table.
19754          */
19755         pm_mask = PT_PRESENT_MASK;
19756 -       if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL)
19757 +       if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
19758                 pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
19760 +               /*
19761 +                * Allocate the page for the PDPTEs when shadowing 32-bit NPT
19762 +                * with 64-bit only when needed.  Unlike 32-bit NPT, it doesn't
19763 +                * need to be in low mem.  See also lm_root below.
19764 +                */
19765 +               if (!vcpu->arch.mmu->pae_root) {
19766 +                       WARN_ON_ONCE(!tdp_enabled);
19768 +                       vcpu->arch.mmu->pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
19769 +                       if (!vcpu->arch.mmu->pae_root)
19770 +                               return -ENOMEM;
19771 +               }
19772 +       }
19774         for (i = 0; i < 4; ++i) {
19775                 MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
19776                 if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
19777 @@ -3337,21 +3351,19 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
19778         vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
19780         /*
19781 -        * If we shadow a 32 bit page table with a long mode page
19782 -        * table we enter this path.
19783 +        * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
19784 +        * tables are allocated and initialized at MMU creation as there is no
19785 +        * equivalent level in the guest's NPT to shadow.  Allocate the tables
19786 +        * on demand, as running a 32-bit L1 VMM is very rare.  The PDP is
19787 +        * handled above (to share logic with PAE), deal with the PML4 here.
19788          */
19789         if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
19790                 if (vcpu->arch.mmu->lm_root == NULL) {
19791 -                       /*
19792 -                        * The additional page necessary for this is only
19793 -                        * allocated on demand.
19794 -                        */
19796                         u64 *lm_root;
19798                         lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
19799 -                       if (lm_root == NULL)
19800 -                               return 1;
19801 +                       if (!lm_root)
19802 +                               return -ENOMEM;
19804                         lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
19806 @@ -3653,6 +3665,14 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
19807         struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
19808         bool async;
19810 +       /*
19811 +        * Retry the page fault if the gfn hit a memslot that is being deleted
19812 +        * or moved.  This ensures any existing SPTEs for the old memslot will
19813 +        * be zapped before KVM inserts a new MMIO SPTE for the gfn.
19814 +        */
19815 +       if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
19816 +               return true;
19818         /* Don't expose private memslots to L2. */
19819         if (is_guest_mode(vcpu) && !kvm_is_visible_memslot(slot)) {
19820                 *pfn = KVM_PFN_NOSLOT;
19821 @@ -4615,12 +4635,17 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
19822         struct kvm_mmu *context = &vcpu->arch.guest_mmu;
19823         union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);
19825 -       context->shadow_root_level = new_role.base.level;
19827         __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, false, false);
19829 -       if (new_role.as_u64 != context->mmu_role.as_u64)
19830 +       if (new_role.as_u64 != context->mmu_role.as_u64) {
19831                 shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
19833 +               /*
19834 +                * Override the level set by the common init helper, nested TDP
19835 +                * always uses the host's TDP configuration.
19836 +                */
19837 +               context->shadow_root_level = new_role.base.level;
19838 +       }
19840  EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
19842 @@ -5240,9 +5265,11 @@ static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
19843          * while the PDP table is a per-vCPU construct that's allocated at MMU
19844          * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
19845          * x86_64.  Therefore we need to allocate the PDP table in the first
19846 -        * 4GB of memory, which happens to fit the DMA32 zone.  Except for
19847 -        * SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can
19848 -        * skip allocating the PDP table.
19849 +        * 4GB of memory, which happens to fit the DMA32 zone.  TDP paging
19850 +        * generally doesn't use PAE paging and can skip allocating the PDP
19851 +        * table.  The main exception, handled here, is SVM's 32-bit NPT.  The
19852 +        * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
19853 +        * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots().
19854          */
19855         if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
19856                 return 0;
19857 diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
19858 index 874ea309279f..dbc6214d69de 100644
19859 --- a/arch/x86/kvm/svm/sev.c
19860 +++ b/arch/x86/kvm/svm/sev.c
19861 @@ -87,7 +87,7 @@ static bool __sev_recycle_asids(int min_asid, int max_asid)
19862         return true;
19865 -static int sev_asid_new(struct kvm_sev_info *sev)
19866 +static int sev_asid_new(bool es_active)
19868         int pos, min_asid, max_asid;
19869         bool retry = true;
19870 @@ -98,8 +98,8 @@ static int sev_asid_new(struct kvm_sev_info *sev)
19871          * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
19872          * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
19873          */
19874 -       min_asid = sev->es_active ? 0 : min_sev_asid - 1;
19875 -       max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
19876 +       min_asid = es_active ? 0 : min_sev_asid - 1;
19877 +       max_asid = es_active ? min_sev_asid - 1 : max_sev_asid;
19878  again:
19879         pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_asid);
19880         if (pos >= max_asid) {
19881 @@ -179,13 +179,17 @@ static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
19882  static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
19884         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
19885 +       bool es_active = argp->id == KVM_SEV_ES_INIT;
19886         int asid, ret;
19888 +       if (kvm->created_vcpus)
19889 +               return -EINVAL;
19891         ret = -EBUSY;
19892         if (unlikely(sev->active))
19893                 return ret;
19895 -       asid = sev_asid_new(sev);
19896 +       asid = sev_asid_new(es_active);
19897         if (asid < 0)
19898                 return ret;
19900 @@ -194,6 +198,7 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
19901                 goto e_free;
19903         sev->active = true;
19904 +       sev->es_active = es_active;
19905         sev->asid = asid;
19906         INIT_LIST_HEAD(&sev->regions_list);
19908 @@ -204,16 +209,6 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
19909         return ret;
19912 -static int sev_es_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
19914 -       if (!sev_es)
19915 -               return -ENOTTY;
19917 -       to_kvm_svm(kvm)->sev_info.es_active = true;
19919 -       return sev_guest_init(kvm, argp);
19922  static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
19924         struct sev_data_activate *data;
19925 @@ -564,6 +559,7 @@ static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
19927         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
19928         struct sev_data_launch_update_vmsa *vmsa;
19929 +       struct kvm_vcpu *vcpu;
19930         int i, ret;
19932         if (!sev_es_guest(kvm))
19933 @@ -573,8 +569,8 @@ static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
19934         if (!vmsa)
19935                 return -ENOMEM;
19937 -       for (i = 0; i < kvm->created_vcpus; i++) {
19938 -               struct vcpu_svm *svm = to_svm(kvm->vcpus[i]);
19939 +       kvm_for_each_vcpu(i, vcpu, kvm) {
19940 +               struct vcpu_svm *svm = to_svm(vcpu);
19942                 /* Perform some pre-encryption checks against the VMSA */
19943                 ret = sev_es_sync_vmsa(svm);
19944 @@ -1127,12 +1123,15 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
19945         mutex_lock(&kvm->lock);
19947         switch (sev_cmd.id) {
19948 +       case KVM_SEV_ES_INIT:
19949 +               if (!sev_es) {
19950 +                       r = -ENOTTY;
19951 +                       goto out;
19952 +               }
19953 +               fallthrough;
19954         case KVM_SEV_INIT:
19955                 r = sev_guest_init(kvm, &sev_cmd);
19956                 break;
19957 -       case KVM_SEV_ES_INIT:
19958 -               r = sev_es_guest_init(kvm, &sev_cmd);
19959 -               break;
19960         case KVM_SEV_LAUNCH_START:
19961                 r = sev_launch_start(kvm, &sev_cmd);
19962                 break;
19963 @@ -1349,8 +1348,11 @@ void __init sev_hardware_setup(void)
19964                 goto out;
19966         sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
19967 -       if (!sev_reclaim_asid_bitmap)
19968 +       if (!sev_reclaim_asid_bitmap) {
19969 +               bitmap_free(sev_asid_bitmap);
19970 +               sev_asid_bitmap = NULL;
19971                 goto out;
19972 +       }
19974         pr_info("SEV supported: %u ASIDs\n", max_sev_asid - min_sev_asid + 1);
19975         sev_supported = true;
19976 @@ -1666,7 +1668,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
19977         return -EINVAL;
19980 -static void pre_sev_es_run(struct vcpu_svm *svm)
19981 +void sev_es_unmap_ghcb(struct vcpu_svm *svm)
19983         if (!svm->ghcb)
19984                 return;
19985 @@ -1702,9 +1704,6 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
19986         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
19987         int asid = sev_get_asid(svm->vcpu.kvm);
19989 -       /* Perform any SEV-ES pre-run actions */
19990 -       pre_sev_es_run(svm);
19992         /* Assign the asid allocated with this SEV guest */
19993         svm->asid = asid;
19995 @@ -2104,5 +2103,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
19996          * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
19997          * non-zero value.
19998          */
19999 +       if (!svm->ghcb)
20000 +               return;
20002         ghcb_set_sw_exit_info_2(svm->ghcb, 1);
20004 diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
20005 index 58a45bb139f8..9a6825feaf53 100644
20006 --- a/arch/x86/kvm/svm/svm.c
20007 +++ b/arch/x86/kvm/svm/svm.c
20008 @@ -564,9 +564,8 @@ static int svm_cpu_init(int cpu)
20009         clear_page(page_address(sd->save_area));
20011         if (svm_sev_enabled()) {
20012 -               sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
20013 -                                             sizeof(void *),
20014 -                                             GFP_KERNEL);
20015 +               sd->sev_vmcbs = kcalloc(max_sev_asid + 1, sizeof(void *),
20016 +                                       GFP_KERNEL);
20017                 if (!sd->sev_vmcbs)
20018                         goto free_save_area;
20019         }
20020 @@ -969,21 +968,6 @@ static __init int svm_hardware_setup(void)
20021                 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
20022         }
20024 -       if (IS_ENABLED(CONFIG_KVM_AMD_SEV) && sev) {
20025 -               sev_hardware_setup();
20026 -       } else {
20027 -               sev = false;
20028 -               sev_es = false;
20029 -       }
20031 -       svm_adjust_mmio_mask();
20033 -       for_each_possible_cpu(cpu) {
20034 -               r = svm_cpu_init(cpu);
20035 -               if (r)
20036 -                       goto err;
20037 -       }
20039         /*
20040          * KVM's MMU doesn't support using 2-level paging for itself, and thus
20041          * NPT isn't supported if the host is using 2-level paging since host
20042 @@ -998,6 +982,21 @@ static __init int svm_hardware_setup(void)
20043         kvm_configure_mmu(npt_enabled, get_max_npt_level(), PG_LEVEL_1G);
20044         pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
20046 +       if (IS_ENABLED(CONFIG_KVM_AMD_SEV) && sev && npt_enabled) {
20047 +               sev_hardware_setup();
20048 +       } else {
20049 +               sev = false;
20050 +               sev_es = false;
20051 +       }
20053 +       svm_adjust_mmio_mask();
20055 +       for_each_possible_cpu(cpu) {
20056 +               r = svm_cpu_init(cpu);
20057 +               if (r)
20058 +                       goto err;
20059 +       }
20061         if (nrips) {
20062                 if (!boot_cpu_has(X86_FEATURE_NRIPS))
20063                         nrips = false;
20064 @@ -1417,6 +1416,9 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
20065         struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
20066         unsigned int i;
20068 +       if (sev_es_guest(vcpu->kvm))
20069 +               sev_es_unmap_ghcb(svm);
20071         if (svm->guest_state_loaded)
20072                 return;
20074 @@ -1898,7 +1900,7 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
20076  static int pf_interception(struct vcpu_svm *svm)
20078 -       u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
20079 +       u64 fault_address = svm->vmcb->control.exit_info_2;
20080         u64 error_code = svm->vmcb->control.exit_info_1;
20082         return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
20083 @@ -2738,6 +2740,10 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
20084         case MSR_TSC_AUX:
20085                 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
20086                         return 1;
20087 +               if (!msr_info->host_initiated &&
20088 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
20089 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
20090 +                       return 1;
20091                 msr_info->data = svm->tsc_aux;
20092                 break;
20093         /*
20094 @@ -2809,7 +2815,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
20095  static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
20097         struct vcpu_svm *svm = to_svm(vcpu);
20098 -       if (!sev_es_guest(svm->vcpu.kvm) || !err)
20099 +       if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->ghcb))
20100                 return kvm_complete_insn_gp(&svm->vcpu, err);
20102         ghcb_set_sw_exit_info_1(svm->ghcb, 1);
20103 @@ -2946,6 +2952,11 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
20104                 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
20105                         return 1;
20107 +               if (!msr->host_initiated &&
20108 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
20109 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
20110 +                       return 1;
20112                 /*
20113                  * This is rare, so we update the MSR here instead of using
20114                  * direct_access_msrs.  Doing that would require a rdmsr in
20115 @@ -3804,15 +3815,15 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
20116          * have them in state 'on' as recorded before entering guest mode.
20117          * Same as enter_from_user_mode().
20118          *
20119 -        * guest_exit_irqoff() restores host context and reinstates RCU if
20120 -        * enabled and required.
20121 +        * context_tracking_guest_exit() restores host context and reinstates
20122 +        * RCU if enabled and required.
20123          *
20124          * This needs to be done before the below as native_read_msr()
20125          * contains a tracepoint and x86_spec_ctrl_restore_host() calls
20126          * into world and some more.
20127          */
20128         lockdep_hardirqs_off(CALLER_ADDR0);
20129 -       guest_exit_irqoff();
20130 +       context_tracking_guest_exit();
20132         instrumentation_begin();
20133         trace_hardirqs_off_finish();
20134 diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
20135 index 39e071fdab0c..98da0b91f273 100644
20136 --- a/arch/x86/kvm/svm/svm.h
20137 +++ b/arch/x86/kvm/svm/svm.h
20138 @@ -571,6 +571,7 @@ void sev_es_init_vmcb(struct vcpu_svm *svm);
20139  void sev_es_create_vcpu(struct vcpu_svm *svm);
20140  void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
20141  void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu);
20142 +void sev_es_unmap_ghcb(struct vcpu_svm *svm);
20144  /* vmenter.S */
20146 diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
20147 index bcca0b80e0d0..4ba2a43e188b 100644
20148 --- a/arch/x86/kvm/vmx/nested.c
20149 +++ b/arch/x86/kvm/vmx/nested.c
20150 @@ -619,6 +619,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
20151         }
20153         /* KVM unconditionally exposes the FS/GS base MSRs to L1. */
20154 +#ifdef CONFIG_X86_64
20155         nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
20156                                              MSR_FS_BASE, MSR_TYPE_RW);
20158 @@ -627,6 +628,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
20160         nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
20161                                              MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
20162 +#endif
20164         /*
20165          * Checking the L0->L1 bitmap is trying to verify two things:
20166 @@ -3098,15 +3100,8 @@ static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
20167                         nested_vmx_handle_enlightened_vmptrld(vcpu, false);
20169                 if (evmptrld_status == EVMPTRLD_VMFAIL ||
20170 -                   evmptrld_status == EVMPTRLD_ERROR) {
20171 -                       pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
20172 -                                            __func__);
20173 -                       vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
20174 -                       vcpu->run->internal.suberror =
20175 -                               KVM_INTERNAL_ERROR_EMULATION;
20176 -                       vcpu->run->internal.ndata = 0;
20177 +                   evmptrld_status == EVMPTRLD_ERROR)
20178                         return false;
20179 -               }
20180         }
20182         return true;
20183 @@ -3194,8 +3189,16 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
20185  static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
20187 -       if (!nested_get_evmcs_page(vcpu))
20188 +       if (!nested_get_evmcs_page(vcpu)) {
20189 +               pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
20190 +                                    __func__);
20191 +               vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
20192 +               vcpu->run->internal.suberror =
20193 +                       KVM_INTERNAL_ERROR_EMULATION;
20194 +               vcpu->run->internal.ndata = 0;
20196                 return false;
20197 +       }
20199         if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
20200                 return false;
20201 @@ -4422,7 +4425,15 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
20202         /* trying to cancel vmlaunch/vmresume is a bug */
20203         WARN_ON_ONCE(vmx->nested.nested_run_pending);
20205 -       kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
20206 +       if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
20207 +               /*
20208 +                * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
20209 +                * Enlightened VMCS after migration and we still need to
20210 +                * do that when something is forcing L2->L1 exit prior to
20211 +                * the first L2 run.
20212 +                */
20213 +               (void)nested_get_evmcs_page(vcpu);
20214 +       }
20216         /* Service the TLB flush request for L2 before switching to L1. */
20217         if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
20218 @@ -4601,9 +4612,9 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
20219         else if (addr_size == 0)
20220                 off = (gva_t)sign_extend64(off, 15);
20221         if (base_is_valid)
20222 -               off += kvm_register_read(vcpu, base_reg);
20223 +               off += kvm_register_readl(vcpu, base_reg);
20224         if (index_is_valid)
20225 -               off += kvm_register_read(vcpu, index_reg) << scaling;
20226 +               off += kvm_register_readl(vcpu, index_reg) << scaling;
20227         vmx_get_segment(vcpu, &s, seg_reg);
20229         /*
20230 @@ -5479,16 +5490,11 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
20231                 if (!nested_vmx_check_eptp(vcpu, new_eptp))
20232                         return 1;
20234 -               kvm_mmu_unload(vcpu);
20235                 mmu->ept_ad = accessed_dirty;
20236                 mmu->mmu_role.base.ad_disabled = !accessed_dirty;
20237                 vmcs12->ept_pointer = new_eptp;
20238 -               /*
20239 -                * TODO: Check what's the correct approach in case
20240 -                * mmu reload fails. Currently, we just let the next
20241 -                * reload potentially fail
20242 -                */
20243 -               kvm_mmu_reload(vcpu);
20245 +               kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
20246         }
20248         return 0;
20249 @@ -5717,7 +5723,7 @@ static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
20251         /* Decode instruction info and find the field to access */
20252         vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
20253 -       field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
20254 +       field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
20256         /* Out-of-range fields always cause a VM exit from L2 to L1 */
20257         if (field >> 15)
20258 diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
20259 index 29b40e092d13..ae63d59be38c 100644
20260 --- a/arch/x86/kvm/vmx/vmx.c
20261 +++ b/arch/x86/kvm/vmx/vmx.c
20262 @@ -36,6 +36,7 @@
20263  #include <asm/debugreg.h>
20264  #include <asm/desc.h>
20265  #include <asm/fpu/internal.h>
20266 +#include <asm/idtentry.h>
20267  #include <asm/io.h>
20268  #include <asm/irq_remapping.h>
20269  #include <asm/kexec.h>
20270 @@ -156,9 +157,11 @@ static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = {
20271         MSR_IA32_SPEC_CTRL,
20272         MSR_IA32_PRED_CMD,
20273         MSR_IA32_TSC,
20274 +#ifdef CONFIG_X86_64
20275         MSR_FS_BASE,
20276         MSR_GS_BASE,
20277         MSR_KERNEL_GS_BASE,
20278 +#endif
20279         MSR_IA32_SYSENTER_CS,
20280         MSR_IA32_SYSENTER_ESP,
20281         MSR_IA32_SYSENTER_EIP,
20282 @@ -1731,7 +1734,8 @@ static void setup_msrs(struct vcpu_vmx *vmx)
20283         if (update_transition_efer(vmx))
20284                 vmx_setup_uret_msr(vmx, MSR_EFER);
20286 -       if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
20287 +       if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)  ||
20288 +           guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDPID))
20289                 vmx_setup_uret_msr(vmx, MSR_TSC_AUX);
20291         vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL);
20292 @@ -1930,7 +1934,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
20293                 break;
20294         case MSR_TSC_AUX:
20295                 if (!msr_info->host_initiated &&
20296 -                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
20297 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
20298 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
20299                         return 1;
20300                 goto find_uret_msr;
20301         case MSR_IA32_DEBUGCTLMSR:
20302 @@ -2227,7 +2232,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
20303                 break;
20304         case MSR_TSC_AUX:
20305                 if (!msr_info->host_initiated &&
20306 -                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
20307 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
20308 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
20309                         return 1;
20310                 /* Check reserved bit, higher 32 bits should be zero */
20311                 if ((data >> 32) != 0)
20312 @@ -4299,7 +4305,23 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
20313                                                   xsaves_enabled, false);
20314         }
20316 -       vmx_adjust_sec_exec_feature(vmx, &exec_control, rdtscp, RDTSCP);
20317 +       /*
20318 +        * RDPID is also gated by ENABLE_RDTSCP, turn on the control if either
20319 +        * feature is exposed to the guest.  This creates a virtualization hole
20320 +        * if both are supported in hardware but only one is exposed to the
20321 +        * guest, but letting the guest execute RDTSCP or RDPID when either one
20322 +        * is advertised is preferable to emulating the advertised instruction
20323 +        * in KVM on #UD, and obviously better than incorrectly injecting #UD.
20324 +        */
20325 +       if (cpu_has_vmx_rdtscp()) {
20326 +               bool rdpid_or_rdtscp_enabled =
20327 +                       guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) ||
20328 +                       guest_cpuid_has(vcpu, X86_FEATURE_RDPID);
20330 +               vmx_adjust_secondary_exec_control(vmx, &exec_control,
20331 +                                                 SECONDARY_EXEC_ENABLE_RDTSCP,
20332 +                                                 rdpid_or_rdtscp_enabled, false);
20333 +       }
20334         vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID);
20336         vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND);
20337 @@ -5062,12 +5084,12 @@ static int handle_cr(struct kvm_vcpu *vcpu)
20338                 case 3:
20339                         WARN_ON_ONCE(enable_unrestricted_guest);
20340                         val = kvm_read_cr3(vcpu);
20341 -                       kvm_register_write(vcpu, reg, val);
20342 +                       kvm_register_writel(vcpu, reg, val);
20343                         trace_kvm_cr_read(cr, val);
20344                         return kvm_skip_emulated_instruction(vcpu);
20345                 case 8:
20346                         val = kvm_get_cr8(vcpu);
20347 -                       kvm_register_write(vcpu, reg, val);
20348 +                       kvm_register_writel(vcpu, reg, val);
20349                         trace_kvm_cr_read(cr, val);
20350                         return kvm_skip_emulated_instruction(vcpu);
20351                 }
20352 @@ -5140,7 +5162,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
20353                 unsigned long val;
20355                 kvm_get_dr(vcpu, dr, &val);
20356 -               kvm_register_write(vcpu, reg, val);
20357 +               kvm_register_writel(vcpu, reg, val);
20358                 err = 0;
20359         } else {
20360                 err = kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg));
20361 @@ -5792,7 +5814,6 @@ void dump_vmcs(void)
20362         u32 vmentry_ctl, vmexit_ctl;
20363         u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control;
20364         unsigned long cr4;
20365 -       u64 efer;
20367         if (!dump_invalid_vmcs) {
20368                 pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n");
20369 @@ -5804,7 +5825,6 @@ void dump_vmcs(void)
20370         cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
20371         pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
20372         cr4 = vmcs_readl(GUEST_CR4);
20373 -       efer = vmcs_read64(GUEST_IA32_EFER);
20374         secondary_exec_control = 0;
20375         if (cpu_has_secondary_exec_ctrls())
20376                 secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
20377 @@ -5816,9 +5836,7 @@ void dump_vmcs(void)
20378         pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
20379                cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
20380         pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
20381 -       if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
20382 -           (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA))
20383 -       {
20384 +       if (cpu_has_vmx_ept()) {
20385                 pr_err("PDPTR0 = 0x%016llx  PDPTR1 = 0x%016llx\n",
20386                        vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1));
20387                 pr_err("PDPTR2 = 0x%016llx  PDPTR3 = 0x%016llx\n",
20388 @@ -5844,7 +5862,8 @@ void dump_vmcs(void)
20389         if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) ||
20390             (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER)))
20391                 pr_err("EFER =     0x%016llx  PAT = 0x%016llx\n",
20392 -                      efer, vmcs_read64(GUEST_IA32_PAT));
20393 +                      vmcs_read64(GUEST_IA32_EFER),
20394 +                      vmcs_read64(GUEST_IA32_PAT));
20395         pr_err("DebugCtl = 0x%016llx  DebugExceptions = 0x%016lx\n",
20396                vmcs_read64(GUEST_IA32_DEBUGCTL),
20397                vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
20398 @@ -6395,18 +6414,17 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
20400  void vmx_do_interrupt_nmi_irqoff(unsigned long entry);
20402 -static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu, u32 intr_info)
20403 +static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu,
20404 +                                       unsigned long entry)
20406 -       unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
20407 -       gate_desc *desc = (gate_desc *)host_idt_base + vector;
20409         kvm_before_interrupt(vcpu);
20410 -       vmx_do_interrupt_nmi_irqoff(gate_offset(desc));
20411 +       vmx_do_interrupt_nmi_irqoff(entry);
20412         kvm_after_interrupt(vcpu);
20415  static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
20417 +       const unsigned long nmi_entry = (unsigned long)asm_exc_nmi_noist;
20418         u32 intr_info = vmx_get_intr_info(&vmx->vcpu);
20420         /* if exit due to PF check for async PF */
20421 @@ -6417,18 +6435,20 @@ static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
20422                 kvm_machine_check();
20423         /* We need to handle NMIs before interrupts are enabled */
20424         else if (is_nmi(intr_info))
20425 -               handle_interrupt_nmi_irqoff(&vmx->vcpu, intr_info);
20426 +               handle_interrupt_nmi_irqoff(&vmx->vcpu, nmi_entry);
20429  static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
20431         u32 intr_info = vmx_get_intr_info(vcpu);
20432 +       unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
20433 +       gate_desc *desc = (gate_desc *)host_idt_base + vector;
20435         if (WARN_ONCE(!is_external_intr(intr_info),
20436             "KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info))
20437                 return;
20439 -       handle_interrupt_nmi_irqoff(vcpu, intr_info);
20440 +       handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc));
20443  static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
20444 @@ -6681,15 +6701,15 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
20445          * have them in state 'on' as recorded before entering guest mode.
20446          * Same as enter_from_user_mode().
20447          *
20448 -        * guest_exit_irqoff() restores host context and reinstates RCU if
20449 -        * enabled and required.
20450 +        * context_tracking_guest_exit() restores host context and reinstates
20451 +        * RCU if enabled and required.
20452          *
20453          * This needs to be done before the below as native_read_msr()
20454          * contains a tracepoint and x86_spec_ctrl_restore_host() calls
20455          * into world and some more.
20456          */
20457         lockdep_hardirqs_off(CALLER_ADDR0);
20458 -       guest_exit_irqoff();
20459 +       context_tracking_guest_exit();
20461         instrumentation_begin();
20462         trace_hardirqs_off_finish();
20463 @@ -6894,12 +6914,9 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
20465         for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i) {
20466                 u32 index = vmx_uret_msrs_list[i];
20467 -               u32 data_low, data_high;
20468                 int j = vmx->nr_uret_msrs;
20470 -               if (rdmsr_safe(index, &data_low, &data_high) < 0)
20471 -                       continue;
20472 -               if (wrmsr_safe(index, data_low, data_high) < 0)
20473 +               if (kvm_probe_user_return_msr(index))
20474                         continue;
20476                 vmx->guest_uret_msrs[j].slot = i;
20477 @@ -6938,9 +6955,11 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
20478         bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
20480         vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R);
20481 +#ifdef CONFIG_X86_64
20482         vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
20483         vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
20484         vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
20485 +#endif
20486         vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
20487         vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
20488         vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
20489 @@ -7330,9 +7349,11 @@ static __init void vmx_set_cpu_caps(void)
20490         if (!cpu_has_vmx_xsaves())
20491                 kvm_cpu_cap_clear(X86_FEATURE_XSAVES);
20493 -       /* CPUID 0x80000001 */
20494 -       if (!cpu_has_vmx_rdtscp())
20495 +       /* CPUID 0x80000001 and 0x7 (RDPID) */
20496 +       if (!cpu_has_vmx_rdtscp()) {
20497                 kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
20498 +               kvm_cpu_cap_clear(X86_FEATURE_RDPID);
20499 +       }
20501         if (cpu_has_vmx_waitpkg())
20502                 kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
20503 @@ -7388,8 +7409,9 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
20504         /*
20505          * RDPID causes #UD if disabled through secondary execution controls.
20506          * Because it is marked as EmulateOnUD, we need to intercept it here.
20507 +        * Note, RDPID is hidden behind ENABLE_RDTSCP.
20508          */
20509 -       case x86_intercept_rdtscp:
20510 +       case x86_intercept_rdpid:
20511                 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_RDTSCP)) {
20512                         exception->vector = UD_VECTOR;
20513                         exception->error_code_valid = false;
20514 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
20515 index ee0dc58ac3a5..86678f8b3502 100644
20516 --- a/arch/x86/kvm/x86.c
20517 +++ b/arch/x86/kvm/x86.c
20518 @@ -335,6 +335,22 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
20519         }
20522 +int kvm_probe_user_return_msr(u32 msr)
20524 +       u64 val;
20525 +       int ret;
20527 +       preempt_disable();
20528 +       ret = rdmsrl_safe(msr, &val);
20529 +       if (ret)
20530 +               goto out;
20531 +       ret = wrmsrl_safe(msr, val);
20532 +out:
20533 +       preempt_enable();
20534 +       return ret;
20536 +EXPORT_SYMBOL_GPL(kvm_probe_user_return_msr);
20538  void kvm_define_user_return_msr(unsigned slot, u32 msr)
20540         BUG_ON(slot >= KVM_MAX_NR_USER_RETURN_MSRS);
20541 @@ -1072,10 +1088,15 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
20542                 return 0;
20543         }
20545 -       if (is_long_mode(vcpu) && kvm_vcpu_is_illegal_gpa(vcpu, cr3))
20546 +       /*
20547 +        * Do not condition the GPA check on long mode, this helper is used to
20548 +        * stuff CR3, e.g. for RSM emulation, and there is no guarantee that
20549 +        * the current vCPU mode is accurate.
20550 +        */
20551 +       if (kvm_vcpu_is_illegal_gpa(vcpu, cr3))
20552                 return 1;
20553 -       else if (is_pae_paging(vcpu) &&
20554 -                !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
20556 +       if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
20557                 return 1;
20559         kvm_mmu_new_pgd(vcpu, cr3, skip_tlb_flush, skip_tlb_flush);
20560 @@ -5859,7 +5880,8 @@ static void kvm_init_msr_list(void)
20561                                 continue;
20562                         break;
20563                 case MSR_TSC_AUX:
20564 -                       if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
20565 +                       if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP) &&
20566 +                           !kvm_cpu_cap_has(X86_FEATURE_RDPID))
20567                                 continue;
20568                         break;
20569                 case MSR_IA32_UMWAIT_CONTROL:
20570 @@ -7959,6 +7981,18 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
20572  static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
20575 + * Indirection to move queue_work() out of the tk_core.seq write held
20576 + * region to prevent possible deadlocks against time accessors which
20577 + * are invoked with work related locks held.
20578 + */
20579 +static void pvclock_irq_work_fn(struct irq_work *w)
20581 +       queue_work(system_long_wq, &pvclock_gtod_work);
20584 +static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn);
20586  /*
20587   * Notification about pvclock gtod data update.
20588   */
20589 @@ -7970,13 +8004,14 @@ static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
20591         update_pvclock_gtod(tk);
20593 -       /* disable master clock if host does not trust, or does not
20594 -        * use, TSC based clocksource.
20595 +       /*
20596 +        * Disable master clock if host does not trust, or does not use,
20597 +        * TSC based clocksource. Delegate queue_work() to irq_work as
20598 +        * this is invoked with tk_core.seq write held.
20599          */
20600         if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) &&
20601             atomic_read(&kvm_guest_has_master_clock) != 0)
20602 -               queue_work(system_long_wq, &pvclock_gtod_work);
20604 +               irq_work_queue(&pvclock_irq_work);
20605         return 0;
20608 @@ -8091,6 +8126,8 @@ void kvm_arch_exit(void)
20609         cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
20610  #ifdef CONFIG_X86_64
20611         pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
20612 +       irq_work_sync(&pvclock_irq_work);
20613 +       cancel_work_sync(&pvclock_gtod_work);
20614  #endif
20615         kvm_x86_ops.hardware_enable = NULL;
20616         kvm_mmu_module_exit();
20617 @@ -9199,6 +9236,15 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
20618         local_irq_disable();
20619         kvm_after_interrupt(vcpu);
20621 +       /*
20622 +        * Wait until after servicing IRQs to account guest time so that any
20623 +        * ticks that occurred while running the guest are properly accounted
20624 +        * to the guest.  Waiting until IRQs are enabled degrades the accuracy
20625 +        * of accounting via context tracking, but the loss of accuracy is
20626 +        * acceptable for all known use cases.
20627 +        */
20628 +       vtime_account_guest_exit();
20630         if (lapic_in_kernel(vcpu)) {
20631                 s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta;
20632                 if (delta != S64_MIN) {
20633 @@ -11020,6 +11066,9 @@ bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
20635  bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
20637 +       if (vcpu->arch.guest_state_protected)
20638 +               return true;
20640         return vcpu->arch.preempted_in_kernel;
20643 @@ -11290,7 +11339,7 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
20644         if (!kvm_pv_async_pf_enabled(vcpu))
20645                 return true;
20646         else
20647 -               return apf_pageready_slot_free(vcpu);
20648 +               return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu);
20651  void kvm_arch_start_assignment(struct kvm *kvm)
20652 @@ -11539,7 +11588,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
20654                 fallthrough;
20655         case INVPCID_TYPE_ALL_INCL_GLOBAL:
20656 -               kvm_mmu_unload(vcpu);
20657 +               kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
20658                 return kvm_skip_emulated_instruction(vcpu);
20660         default:
20661 diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
20662 index ae17250e1efe..7f27bb65a572 100644
20663 --- a/arch/x86/kvm/xen.c
20664 +++ b/arch/x86/kvm/xen.c
20665 @@ -673,7 +673,7 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
20666         bool longmode;
20667         u64 input, params[6];
20669 -       input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
20670 +       input = (u64)kvm_register_readl(vcpu, VCPU_REGS_RAX);
20672         /* Hyper-V hypercalls get bit 31 set in EAX */
20673         if ((input & 0x80000000) &&
20674 diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
20675 index 75a0915b0d01..40bbe56bde32 100644
20676 --- a/arch/x86/lib/msr-smp.c
20677 +++ b/arch/x86/lib/msr-smp.c
20678 @@ -252,7 +252,7 @@ static void __wrmsr_safe_regs_on_cpu(void *info)
20679         rv->err = wrmsr_safe_regs(rv->regs);
20682 -int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
20683 +int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
20685         int err;
20686         struct msr_regs_info rv;
20687 @@ -265,7 +265,7 @@ int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
20689  EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
20691 -int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
20692 +int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
20694         int err;
20695         struct msr_regs_info rv;
20696 diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
20697 index 6c5eb6f3f14f..a19374d26101 100644
20698 --- a/arch/x86/mm/mem_encrypt_identity.c
20699 +++ b/arch/x86/mm/mem_encrypt_identity.c
20700 @@ -503,14 +503,10 @@ void __init sme_enable(struct boot_params *bp)
20702  #define AMD_SME_BIT    BIT(0)
20703  #define AMD_SEV_BIT    BIT(1)
20704 -       /*
20705 -        * Set the feature mask (SME or SEV) based on whether we are
20706 -        * running under a hypervisor.
20707 -        */
20708 -       eax = 1;
20709 -       ecx = 0;
20710 -       native_cpuid(&eax, &ebx, &ecx, &edx);
20711 -       feature_mask = (ecx & BIT(31)) ? AMD_SEV_BIT : AMD_SME_BIT;
20713 +       /* Check the SEV MSR whether SEV or SME is enabled */
20714 +       sev_status   = __rdmsr(MSR_AMD64_SEV);
20715 +       feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
20717         /*
20718          * Check for the SME/SEV feature:
20719 @@ -530,19 +526,26 @@ void __init sme_enable(struct boot_params *bp)
20721         /* Check if memory encryption is enabled */
20722         if (feature_mask == AMD_SME_BIT) {
20723 +               /*
20724 +                * No SME if Hypervisor bit is set. This check is here to
20725 +                * prevent a guest from trying to enable SME. For running as a
20726 +                * KVM guest the MSR_K8_SYSCFG will be sufficient, but there
20727 +                * might be other hypervisors which emulate that MSR as non-zero
20728 +                * or even pass it through to the guest.
20729 +                * A malicious hypervisor can still trick a guest into this
20730 +                * path, but there is no way to protect against that.
20731 +                */
20732 +               eax = 1;
20733 +               ecx = 0;
20734 +               native_cpuid(&eax, &ebx, &ecx, &edx);
20735 +               if (ecx & BIT(31))
20736 +                       return;
20738                 /* For SME, check the SYSCFG MSR */
20739                 msr = __rdmsr(MSR_K8_SYSCFG);
20740                 if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
20741                         return;
20742         } else {
20743 -               /* For SEV, check the SEV MSR */
20744 -               msr = __rdmsr(MSR_AMD64_SEV);
20745 -               if (!(msr & MSR_AMD64_SEV_ENABLED))
20746 -                       return;
20748 -               /* Save SEV_STATUS to avoid reading MSR again */
20749 -               sev_status = msr;
20751                 /* SEV state cannot be controlled by a command line option */
20752                 sme_me_mask = me_mask;
20753                 sev_enabled = true;
20754 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
20755 index f6a9e2e36642..1c27e6f43f80 100644
20756 --- a/arch/x86/mm/pgtable.c
20757 +++ b/arch/x86/mm/pgtable.c
20758 @@ -550,7 +550,7 @@ int ptep_test_and_clear_young(struct vm_area_struct *vma,
20759         return ret;
20762 -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
20763 +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG)
20764  int pmdp_test_and_clear_young(struct vm_area_struct *vma,
20765                               unsigned long addr, pmd_t *pmdp)
20767 @@ -562,6 +562,9 @@ int pmdp_test_and_clear_young(struct vm_area_struct *vma,
20769         return ret;
20771 +#endif
20773 +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
20774  int pudp_test_and_clear_young(struct vm_area_struct *vma,
20775                               unsigned long addr, pud_t *pudp)
20777 diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
20778 index cd3914fc9f3d..e94e0050a583 100644
20779 --- a/arch/x86/power/hibernate.c
20780 +++ b/arch/x86/power/hibernate.c
20781 @@ -13,8 +13,8 @@
20782  #include <linux/kdebug.h>
20783  #include <linux/cpu.h>
20784  #include <linux/pgtable.h>
20786 -#include <crypto/hash.h>
20787 +#include <linux/types.h>
20788 +#include <linux/crc32.h>
20790  #include <asm/e820/api.h>
20791  #include <asm/init.h>
20792 @@ -54,95 +54,33 @@ int pfn_is_nosave(unsigned long pfn)
20793         return pfn >= nosave_begin_pfn && pfn < nosave_end_pfn;
20797 -#define MD5_DIGEST_SIZE 16
20799  struct restore_data_record {
20800         unsigned long jump_address;
20801         unsigned long jump_address_phys;
20802         unsigned long cr3;
20803         unsigned long magic;
20804 -       u8 e820_digest[MD5_DIGEST_SIZE];
20805 +       unsigned long e820_checksum;
20806  };
20808 -#if IS_BUILTIN(CONFIG_CRYPTO_MD5)
20809  /**
20810 - * get_e820_md5 - calculate md5 according to given e820 table
20811 + * compute_e820_crc32 - calculate crc32 of a given e820 table
20812   *
20813   * @table: the e820 table to be calculated
20814 - * @buf: the md5 result to be stored to
20815 + *
20816 + * Return: the resulting checksum
20817   */
20818 -static int get_e820_md5(struct e820_table *table, void *buf)
20819 +static inline u32 compute_e820_crc32(struct e820_table *table)
20821 -       struct crypto_shash *tfm;
20822 -       struct shash_desc *desc;
20823 -       int size;
20824 -       int ret = 0;
20826 -       tfm = crypto_alloc_shash("md5", 0, 0);
20827 -       if (IS_ERR(tfm))
20828 -               return -ENOMEM;
20830 -       desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
20831 -                      GFP_KERNEL);
20832 -       if (!desc) {
20833 -               ret = -ENOMEM;
20834 -               goto free_tfm;
20835 -       }
20837 -       desc->tfm = tfm;
20839 -       size = offsetof(struct e820_table, entries) +
20840 +       int size = offsetof(struct e820_table, entries) +
20841                 sizeof(struct e820_entry) * table->nr_entries;
20843 -       if (crypto_shash_digest(desc, (u8 *)table, size, buf))
20844 -               ret = -EINVAL;
20846 -       kfree_sensitive(desc);
20848 -free_tfm:
20849 -       crypto_free_shash(tfm);
20850 -       return ret;
20853 -static int hibernation_e820_save(void *buf)
20855 -       return get_e820_md5(e820_table_firmware, buf);
20858 -static bool hibernation_e820_mismatch(void *buf)
20860 -       int ret;
20861 -       u8 result[MD5_DIGEST_SIZE];
20863 -       memset(result, 0, MD5_DIGEST_SIZE);
20864 -       /* If there is no digest in suspend kernel, let it go. */
20865 -       if (!memcmp(result, buf, MD5_DIGEST_SIZE))
20866 -               return false;
20868 -       ret = get_e820_md5(e820_table_firmware, result);
20869 -       if (ret)
20870 -               return true;
20872 -       return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false;
20874 -#else
20875 -static int hibernation_e820_save(void *buf)
20877 -       return 0;
20880 -static bool hibernation_e820_mismatch(void *buf)
20882 -       /* If md5 is not builtin for restore kernel, let it go. */
20883 -       return false;
20884 +       return ~crc32_le(~0, (unsigned char const *)table, size);
20886 -#endif
20888  #ifdef CONFIG_X86_64
20889 -#define RESTORE_MAGIC  0x23456789ABCDEF01UL
20890 +#define RESTORE_MAGIC  0x23456789ABCDEF02UL
20891  #else
20892 -#define RESTORE_MAGIC  0x12345678UL
20893 +#define RESTORE_MAGIC  0x12345679UL
20894  #endif
20896  /**
20897 @@ -179,7 +117,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
20898          */
20899         rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK;
20901 -       return hibernation_e820_save(rdr->e820_digest);
20902 +       rdr->e820_checksum = compute_e820_crc32(e820_table_firmware);
20903 +       return 0;
20906  /**
20907 @@ -200,7 +139,7 @@ int arch_hibernation_header_restore(void *addr)
20908         jump_address_phys = rdr->jump_address_phys;
20909         restore_cr3 = rdr->cr3;
20911 -       if (hibernation_e820_mismatch(rdr->e820_digest)) {
20912 +       if (rdr->e820_checksum != compute_e820_crc32(e820_table_firmware)) {
20913                 pr_crit("Hibernate inconsistent memory map detected!\n");
20914                 return -ENODEV;
20915         }
20916 diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
20917 index dc0a337f985b..8183ddb3700c 100644
20918 --- a/arch/x86/xen/enlighten_pv.c
20919 +++ b/arch/x86/xen/enlighten_pv.c
20920 @@ -1276,16 +1276,16 @@ asmlinkage __visible void __init xen_start_kernel(void)
20921         /* Get mfn list */
20922         xen_build_dynamic_phys_to_machine();
20924 +       /* Work out if we support NX */
20925 +       get_cpu_cap(&boot_cpu_data);
20926 +       x86_configure_nx();
20928         /*
20929          * Set up kernel GDT and segment registers, mainly so that
20930          * -fstack-protector code can be executed.
20931          */
20932         xen_setup_gdt(0);
20934 -       /* Work out if we support NX */
20935 -       get_cpu_cap(&boot_cpu_data);
20936 -       x86_configure_nx();
20938         /* Determine virtual and physical address sizes */
20939         get_cpu_address_sizes(&boot_cpu_data);
20941 diff --git a/block/Kconfig b/block/Kconfig
20942 index a2297edfdde8..f688ea5f0dbd 100644
20943 --- a/block/Kconfig
20944 +++ b/block/Kconfig
20945 @@ -83,7 +83,7 @@ config BLK_DEV_INTEGRITY_T10
20947  config BLK_DEV_ZONED
20948         bool "Zoned block device support"
20949 -       select MQ_IOSCHED_DEADLINE
20950 +       select IOSCHED_BFQ
20951         help
20952         Block layer zoned block device support. This option enables
20953         support for ZAC/ZBC/ZNS host-managed and host-aware zoned block
20954 diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
20955 index 2f2158e05a91..e58b2953ac16 100644
20956 --- a/block/Kconfig.iosched
20957 +++ b/block/Kconfig.iosched
20958 @@ -5,13 +5,11 @@ menu "IO Schedulers"
20960  config MQ_IOSCHED_DEADLINE
20961         tristate "MQ deadline I/O scheduler"
20962 -       default y
20963         help
20964           MQ version of the deadline IO scheduler.
20966  config MQ_IOSCHED_KYBER
20967         tristate "Kyber I/O scheduler"
20968 -       default y
20969         help
20970           The Kyber I/O scheduler is a low-overhead scheduler suitable for
20971           multiqueue and other fast devices. Given target latencies for reads and
20972 @@ -20,6 +18,7 @@ config MQ_IOSCHED_KYBER
20974  config IOSCHED_BFQ
20975         tristate "BFQ I/O scheduler"
20976 +       default y
20977         help
20978         BFQ I/O scheduler for BLK-MQ. BFQ distributes the bandwidth of
20979         of the device among all processes according to their weights,
20980 diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
20981 index 95586137194e..bc319931d2b3 100644
20982 --- a/block/bfq-iosched.c
20983 +++ b/block/bfq-iosched.c
20984 @@ -1012,7 +1012,7 @@ static void
20985  bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
20986                       struct bfq_io_cq *bic, bool bfq_already_existing)
20988 -       unsigned int old_wr_coeff = bfqq->wr_coeff;
20989 +       unsigned int old_wr_coeff = 1;
20990         bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
20992         if (bic->saved_has_short_ttime)
20993 @@ -1033,7 +1033,13 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
20994         bfqq->ttime = bic->saved_ttime;
20995         bfqq->io_start_time = bic->saved_io_start_time;
20996         bfqq->tot_idle_time = bic->saved_tot_idle_time;
20997 -       bfqq->wr_coeff = bic->saved_wr_coeff;
20998 +       /*
20999 +        * Restore weight coefficient only if low_latency is on
21000 +        */
21001 +       if (bfqd->low_latency) {
21002 +               old_wr_coeff = bfqq->wr_coeff;
21003 +               bfqq->wr_coeff = bic->saved_wr_coeff;
21004 +       }
21005         bfqq->service_from_wr = bic->saved_service_from_wr;
21006         bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
21007         bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
21008 @@ -2257,10 +2263,9 @@ static void bfq_remove_request(struct request_queue *q,
21012 -static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
21013 +static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
21014                 unsigned int nr_segs)
21016 -       struct request_queue *q = hctx->queue;
21017         struct bfq_data *bfqd = q->elevator->elevator_data;
21018         struct request *free = NULL;
21019         /*
21020 diff --git a/block/blk-iocost.c b/block/blk-iocost.c
21021 index 98d656bdb42b..4fbc875f7cb2 100644
21022 --- a/block/blk-iocost.c
21023 +++ b/block/blk-iocost.c
21024 @@ -1073,7 +1073,17 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
21026         lockdep_assert_held(&ioc->lock);
21028 -       inuse = clamp_t(u32, inuse, 1, active);
21029 +       /*
21030 +        * For an active leaf node, its inuse shouldn't be zero or exceed
21031 +        * @active. An active internal node's inuse is solely determined by the
21032 +        * inuse to active ratio of its children regardless of @inuse.
21033 +        */
21034 +       if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
21035 +               inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
21036 +                                          iocg->child_active_sum);
21037 +       } else {
21038 +               inuse = clamp_t(u32, inuse, 1, active);
21039 +       }
21041         iocg->last_inuse = iocg->inuse;
21042         if (save)
21043 @@ -1090,7 +1100,7 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
21044                 /* update the level sums */
21045                 parent->child_active_sum += (s32)(active - child->active);
21046                 parent->child_inuse_sum += (s32)(inuse - child->inuse);
21047 -               /* apply the udpates */
21048 +               /* apply the updates */
21049                 child->active = active;
21050                 child->inuse = inuse;
21052 diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
21053 index e1e997af89a0..fdeb9773b55c 100644
21054 --- a/block/blk-mq-sched.c
21055 +++ b/block/blk-mq-sched.c
21056 @@ -348,14 +348,16 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
21057                 unsigned int nr_segs)
21059         struct elevator_queue *e = q->elevator;
21060 -       struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
21061 -       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
21062 +       struct blk_mq_ctx *ctx;
21063 +       struct blk_mq_hw_ctx *hctx;
21064         bool ret = false;
21065         enum hctx_type type;
21067         if (e && e->type->ops.bio_merge)
21068 -               return e->type->ops.bio_merge(hctx, bio, nr_segs);
21069 +               return e->type->ops.bio_merge(q, bio, nr_segs);
21071 +       ctx = blk_mq_get_ctx(q);
21072 +       hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
21073         type = hctx->type;
21074         if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
21075             list_empty_careful(&ctx->rq_lists[type]))
21076 diff --git a/block/blk-mq.c b/block/blk-mq.c
21077 index d4d7c1caa439..0e120547ccb7 100644
21078 --- a/block/blk-mq.c
21079 +++ b/block/blk-mq.c
21080 @@ -2216,8 +2216,9 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
21081                 /* Bypass scheduler for flush requests */
21082                 blk_insert_flush(rq);
21083                 blk_mq_run_hw_queue(data.hctx, true);
21084 -       } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
21085 -                               !blk_queue_nonrot(q))) {
21086 +       } else if (plug && (q->nr_hw_queues == 1 ||
21087 +                  blk_mq_is_sbitmap_shared(rq->mq_hctx->flags) ||
21088 +                  q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
21089                 /*
21090                  * Use plugging if we have a ->commit_rqs() hook as well, as
21091                  * we know the driver uses bd->last in a smart fashion.
21092 @@ -3269,10 +3270,12 @@ EXPORT_SYMBOL(blk_mq_init_allocated_queue);
21093  /* tags can _not_ be used after returning from blk_mq_exit_queue */
21094  void blk_mq_exit_queue(struct request_queue *q)
21096 -       struct blk_mq_tag_set   *set = q->tag_set;
21097 +       struct blk_mq_tag_set *set = q->tag_set;
21099 -       blk_mq_del_queue_tag_set(q);
21100 +       /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
21101         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
21102 +       /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
21103 +       blk_mq_del_queue_tag_set(q);
21106  static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
21107 diff --git a/block/elevator.c b/block/elevator.c
21108 index 293c5c81397a..71111fa80628 100644
21109 --- a/block/elevator.c
21110 +++ b/block/elevator.c
21111 @@ -616,15 +616,15 @@ static inline bool elv_support_iosched(struct request_queue *q)
21114  /*
21115 - * For single queue devices, default to using mq-deadline. If we have multiple
21116 - * queues or mq-deadline is not available, default to "none".
21117 + * For single queue devices, default to using bfq. If we have multiple
21118 + * queues or bfq is not available, default to "none".
21119   */
21120  static struct elevator_type *elevator_get_default(struct request_queue *q)
21122         if (q->nr_hw_queues != 1)
21123                 return NULL;
21125 -       return elevator_get(q, "mq-deadline", false);
21126 +       return elevator_get(q, "bfq", false);
21129  /*
21130 diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
21131 index 33d34d69cade..79b69d7046d6 100644
21132 --- a/block/kyber-iosched.c
21133 +++ b/block/kyber-iosched.c
21134 @@ -560,11 +560,12 @@ static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
21135         }
21138 -static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
21139 +static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
21140                 unsigned int nr_segs)
21142 +       struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
21143 +       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
21144         struct kyber_hctx_data *khd = hctx->sched_data;
21145 -       struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
21146         struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
21147         unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
21148         struct list_head *rq_list = &kcq->rq_list[sched_domain];
21149 diff --git a/block/mq-deadline.c b/block/mq-deadline.c
21150 index f3631a287466..3aabcd2a7893 100644
21151 --- a/block/mq-deadline.c
21152 +++ b/block/mq-deadline.c
21153 @@ -461,10 +461,9 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
21154         return ELEVATOR_NO_MERGE;
21157 -static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
21158 +static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
21159                 unsigned int nr_segs)
21161 -       struct request_queue *q = hctx->queue;
21162         struct deadline_data *dd = q->elevator->elevator_data;
21163         struct request *free = NULL;
21164         bool ret;
21165 diff --git a/crypto/api.c b/crypto/api.c
21166 index ed08cbd5b9d3..c4eda56cff89 100644
21167 --- a/crypto/api.c
21168 +++ b/crypto/api.c
21169 @@ -562,7 +562,7 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
21171         struct crypto_alg *alg;
21173 -       if (unlikely(!mem))
21174 +       if (IS_ERR_OR_NULL(mem))
21175                 return;
21177         alg = tfm->__crt_alg;
21178 diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
21179 index a057ecb1288d..6cd7f7025df4 100644
21180 --- a/crypto/async_tx/async_xor.c
21181 +++ b/crypto/async_tx/async_xor.c
21182 @@ -233,6 +233,7 @@ async_xor_offs(struct page *dest, unsigned int offset,
21183                 if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
21184                         src_cnt--;
21185                         src_list++;
21186 +                       src_offs++;
21187                 }
21189                 /* wait for any prerequisite operations */
21190 diff --git a/crypto/rng.c b/crypto/rng.c
21191 index a888d84b524a..fea082b25fe4 100644
21192 --- a/crypto/rng.c
21193 +++ b/crypto/rng.c
21194 @@ -34,22 +34,18 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
21195         u8 *buf = NULL;
21196         int err;
21198 -       crypto_stats_get(alg);
21199         if (!seed && slen) {
21200                 buf = kmalloc(slen, GFP_KERNEL);
21201 -               if (!buf) {
21202 -                       crypto_alg_put(alg);
21203 +               if (!buf)
21204                         return -ENOMEM;
21205 -               }
21207                 err = get_random_bytes_wait(buf, slen);
21208 -               if (err) {
21209 -                       crypto_alg_put(alg);
21210 +               if (err)
21211                         goto out;
21212 -               }
21213                 seed = buf;
21214         }
21216 +       crypto_stats_get(alg);
21217         err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
21218         crypto_stats_rng_seed(alg, err);
21219  out:
21220 diff --git a/crypto/zstd.c b/crypto/zstd.c
21221 index 1a3309f066f7..154a969c83a8 100644
21222 --- a/crypto/zstd.c
21223 +++ b/crypto/zstd.c
21224 @@ -18,22 +18,22 @@
21225  #define ZSTD_DEF_LEVEL 3
21227  struct zstd_ctx {
21228 -       ZSTD_CCtx *cctx;
21229 -       ZSTD_DCtx *dctx;
21230 +       zstd_cctx *cctx;
21231 +       zstd_dctx *dctx;
21232         void *cwksp;
21233         void *dwksp;
21234  };
21236 -static ZSTD_parameters zstd_params(void)
21237 +static zstd_parameters zstd_params(void)
21239 -       return ZSTD_getParams(ZSTD_DEF_LEVEL, 0, 0);
21240 +       return zstd_get_params(ZSTD_DEF_LEVEL, 0);
21243  static int zstd_comp_init(struct zstd_ctx *ctx)
21245         int ret = 0;
21246 -       const ZSTD_parameters params = zstd_params();
21247 -       const size_t wksp_size = ZSTD_CCtxWorkspaceBound(params.cParams);
21248 +       const zstd_parameters params = zstd_params();
21249 +       const size_t wksp_size = zstd_cctx_workspace_bound(&params.cParams);
21251         ctx->cwksp = vzalloc(wksp_size);
21252         if (!ctx->cwksp) {
21253 @@ -41,7 +41,7 @@ static int zstd_comp_init(struct zstd_ctx *ctx)
21254                 goto out;
21255         }
21257 -       ctx->cctx = ZSTD_initCCtx(ctx->cwksp, wksp_size);
21258 +       ctx->cctx = zstd_init_cctx(ctx->cwksp, wksp_size);
21259         if (!ctx->cctx) {
21260                 ret = -EINVAL;
21261                 goto out_free;
21262 @@ -56,7 +56,7 @@ static int zstd_comp_init(struct zstd_ctx *ctx)
21263  static int zstd_decomp_init(struct zstd_ctx *ctx)
21265         int ret = 0;
21266 -       const size_t wksp_size = ZSTD_DCtxWorkspaceBound();
21267 +       const size_t wksp_size = zstd_dctx_workspace_bound();
21269         ctx->dwksp = vzalloc(wksp_size);
21270         if (!ctx->dwksp) {
21271 @@ -64,7 +64,7 @@ static int zstd_decomp_init(struct zstd_ctx *ctx)
21272                 goto out;
21273         }
21275 -       ctx->dctx = ZSTD_initDCtx(ctx->dwksp, wksp_size);
21276 +       ctx->dctx = zstd_init_dctx(ctx->dwksp, wksp_size);
21277         if (!ctx->dctx) {
21278                 ret = -EINVAL;
21279                 goto out_free;
21280 @@ -152,10 +152,10 @@ static int __zstd_compress(const u8 *src, unsigned int slen,
21282         size_t out_len;
21283         struct zstd_ctx *zctx = ctx;
21284 -       const ZSTD_parameters params = zstd_params();
21285 +       const zstd_parameters params = zstd_params();
21287 -       out_len = ZSTD_compressCCtx(zctx->cctx, dst, *dlen, src, slen, params);
21288 -       if (ZSTD_isError(out_len))
21289 +       out_len = zstd_compress_cctx(zctx->cctx, dst, *dlen, src, slen, &params);
21290 +       if (zstd_is_error(out_len))
21291                 return -EINVAL;
21292         *dlen = out_len;
21293         return 0;
21294 @@ -182,8 +182,8 @@ static int __zstd_decompress(const u8 *src, unsigned int slen,
21295         size_t out_len;
21296         struct zstd_ctx *zctx = ctx;
21298 -       out_len = ZSTD_decompressDCtx(zctx->dctx, dst, *dlen, src, slen);
21299 -       if (ZSTD_isError(out_len))
21300 +       out_len = zstd_decompress_dctx(zctx->dctx, dst, *dlen, src, slen);
21301 +       if (zstd_is_error(out_len))
21302                 return -EINVAL;
21303         *dlen = out_len;
21304         return 0;
21305 diff --git a/drivers/accessibility/speakup/speakup_acntpc.c b/drivers/accessibility/speakup/speakup_acntpc.c
21306 index c1ec087dca13..b2d0d4266f62 100644
21307 --- a/drivers/accessibility/speakup/speakup_acntpc.c
21308 +++ b/drivers/accessibility/speakup/speakup_acntpc.c
21309 @@ -198,7 +198,7 @@ static void do_catch_up(struct spk_synth *synth)
21310                 full_time_val = full_time->u.n.value;
21311                 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
21312                 if (synth_full()) {
21313 -                       schedule_timeout(msecs_to_jiffies(full_time_val));
21314 +                       schedule_msec_hrtimeout((full_time_val));
21315                         continue;
21316                 }
21317                 set_current_state(TASK_RUNNING);
21318 @@ -226,7 +226,7 @@ static void do_catch_up(struct spk_synth *synth)
21319                         jiffy_delta_val = jiffy_delta->u.n.value;
21320                         delay_time_val = delay_time->u.n.value;
21321                         spin_unlock_irqrestore(&speakup_info.spinlock, flags);
21322 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
21323 +                       schedule_msec_hrtimeout(delay_time_val);
21324                         jiff_max = jiffies + jiffy_delta_val;
21325                 }
21326         }
21327 diff --git a/drivers/accessibility/speakup/speakup_apollo.c b/drivers/accessibility/speakup/speakup_apollo.c
21328 index cd63581b2e99..d636157a2844 100644
21329 --- a/drivers/accessibility/speakup/speakup_apollo.c
21330 +++ b/drivers/accessibility/speakup/speakup_apollo.c
21331 @@ -165,7 +165,7 @@ static void do_catch_up(struct spk_synth *synth)
21332                 if (!synth->io_ops->synth_out(synth, ch)) {
21333                         synth->io_ops->tiocmset(synth, 0, UART_MCR_RTS);
21334                         synth->io_ops->tiocmset(synth, UART_MCR_RTS, 0);
21335 -                       schedule_timeout(msecs_to_jiffies(full_time_val));
21336 +                       schedule_msec_hrtimeout(full_time_val);
21337                         continue;
21338                 }
21339                 if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
21340 diff --git a/drivers/accessibility/speakup/speakup_decext.c b/drivers/accessibility/speakup/speakup_decext.c
21341 index 092cfd08a9e1..e7fc85f8ce5c 100644
21342 --- a/drivers/accessibility/speakup/speakup_decext.c
21343 +++ b/drivers/accessibility/speakup/speakup_decext.c
21344 @@ -180,7 +180,7 @@ static void do_catch_up(struct spk_synth *synth)
21345                 if (ch == '\n')
21346                         ch = 0x0D;
21347                 if (synth_full() || !synth->io_ops->synth_out(synth, ch)) {
21348 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
21349 +                       schedule_msec_hrtimeout(delay_time_val);
21350                         continue;
21351                 }
21352                 set_current_state(TASK_RUNNING);
21353 diff --git a/drivers/accessibility/speakup/speakup_decpc.c b/drivers/accessibility/speakup/speakup_decpc.c
21354 index dec314dee214..2a5deb5256b2 100644
21355 --- a/drivers/accessibility/speakup/speakup_decpc.c
21356 +++ b/drivers/accessibility/speakup/speakup_decpc.c
21357 @@ -398,7 +398,7 @@ static void do_catch_up(struct spk_synth *synth)
21358                 if (ch == '\n')
21359                         ch = 0x0D;
21360                 if (dt_sendchar(ch)) {
21361 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
21362 +                       schedule_msec_hrtimeout((delay_time_val));
21363                         continue;
21364                 }
21365                 set_current_state(TASK_RUNNING);
21366 diff --git a/drivers/accessibility/speakup/speakup_dectlk.c b/drivers/accessibility/speakup/speakup_dectlk.c
21367 index 580ec796816b..67c156b90ddb 100644
21368 --- a/drivers/accessibility/speakup/speakup_dectlk.c
21369 +++ b/drivers/accessibility/speakup/speakup_dectlk.c
21370 @@ -256,7 +256,7 @@ static void do_catch_up(struct spk_synth *synth)
21371                 if (ch == '\n')
21372                         ch = 0x0D;
21373                 if (synth_full_val || !synth->io_ops->synth_out(synth, ch)) {
21374 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
21375 +                       schedule_msec_hrtimeout(delay_time_val);
21376                         continue;
21377                 }
21378                 set_current_state(TASK_RUNNING);
21379 diff --git a/drivers/accessibility/speakup/speakup_dtlk.c b/drivers/accessibility/speakup/speakup_dtlk.c
21380 index 92838d3ae9eb..b687cb4d3268 100644
21381 --- a/drivers/accessibility/speakup/speakup_dtlk.c
21382 +++ b/drivers/accessibility/speakup/speakup_dtlk.c
21383 @@ -211,7 +211,7 @@ static void do_catch_up(struct spk_synth *synth)
21384                 delay_time_val = delay_time->u.n.value;
21385                 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
21386                 if (synth_full()) {
21387 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
21388 +                       schedule_msec_hrtimeout((delay_time_val));
21389                         continue;
21390                 }
21391                 set_current_state(TASK_RUNNING);
21392 @@ -227,7 +227,7 @@ static void do_catch_up(struct spk_synth *synth)
21393                         delay_time_val = delay_time->u.n.value;
21394                         jiffy_delta_val = jiffy_delta->u.n.value;
21395                         spin_unlock_irqrestore(&speakup_info.spinlock, flags);
21396 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
21397 +                       schedule_msec_hrtimeout((delay_time_val));
21398                         jiff_max = jiffies + jiffy_delta_val;
21399                 }
21400         }
21401 diff --git a/drivers/accessibility/speakup/speakup_keypc.c b/drivers/accessibility/speakup/speakup_keypc.c
21402 index 311f4aa0be22..99c523fdcc98 100644
21403 --- a/drivers/accessibility/speakup/speakup_keypc.c
21404 +++ b/drivers/accessibility/speakup/speakup_keypc.c
21405 @@ -199,7 +199,7 @@ static void do_catch_up(struct spk_synth *synth)
21406                 full_time_val = full_time->u.n.value;
21407                 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
21408                 if (synth_full()) {
21409 -                       schedule_timeout(msecs_to_jiffies(full_time_val));
21410 +                       schedule_msec_hrtimeout((full_time_val));
21411                         continue;
21412                 }
21413                 set_current_state(TASK_RUNNING);
21414 @@ -232,7 +232,7 @@ static void do_catch_up(struct spk_synth *synth)
21415                         jiffy_delta_val = jiffy_delta->u.n.value;
21416                         delay_time_val = delay_time->u.n.value;
21417                         spin_unlock_irqrestore(&speakup_info.spinlock, flags);
21418 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
21419 +                       schedule_msec_hrtimeout(delay_time_val);
21420                         jiff_max = jiffies + jiffy_delta_val;
21421                 }
21422         }
21423 diff --git a/drivers/accessibility/speakup/synth.c b/drivers/accessibility/speakup/synth.c
21424 index 2b8699673bac..bf0cbdaf564f 100644
21425 --- a/drivers/accessibility/speakup/synth.c
21426 +++ b/drivers/accessibility/speakup/synth.c
21427 @@ -93,12 +93,8 @@ static void _spk_do_catch_up(struct spk_synth *synth, int unicode)
21428                 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
21429                 if (ch == '\n')
21430                         ch = synth->procspeech;
21431 -               if (unicode)
21432 -                       ret = synth->io_ops->synth_out_unicode(synth, ch);
21433 -               else
21434 -                       ret = synth->io_ops->synth_out(synth, ch);
21435 -               if (!ret) {
21436 -                       schedule_timeout(msecs_to_jiffies(full_time_val));
21437 +               if (!synth->io_ops->synth_out(synth, ch)) {
21438 +                       schedule_msec_hrtimeout(full_time_val);
21439                         continue;
21440                 }
21441                 if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
21442 @@ -108,11 +104,9 @@ static void _spk_do_catch_up(struct spk_synth *synth, int unicode)
21443                         full_time_val = full_time->u.n.value;
21444                         spin_unlock_irqrestore(&speakup_info.spinlock, flags);
21445                         if (synth->io_ops->synth_out(synth, synth->procspeech))
21446 -                               schedule_timeout(
21447 -                                       msecs_to_jiffies(delay_time_val));
21448 +                               schedule_msec_hrtimeout(delay_time_val);
21449                         else
21450 -                               schedule_timeout(
21451 -                                       msecs_to_jiffies(full_time_val));
21452 +                               schedule_msec_hrtimeout(full_time_val);
21453                         jiff_max = jiffies + jiffy_delta_val;
21454                 }
21455                 set_current_state(TASK_RUNNING);
21456 diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
21457 index f2d0e5915dab..0a0a982f9c28 100644
21458 --- a/drivers/acpi/arm64/gtdt.c
21459 +++ b/drivers/acpi/arm64/gtdt.c
21460 @@ -329,7 +329,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
21461                                         int index)
21463         struct platform_device *pdev;
21464 -       int irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
21465 +       int irq;
21467         /*
21468          * According to SBSA specification the size of refresh and control
21469 @@ -338,7 +338,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
21470         struct resource res[] = {
21471                 DEFINE_RES_MEM(wd->control_frame_address, SZ_4K),
21472                 DEFINE_RES_MEM(wd->refresh_frame_address, SZ_4K),
21473 -               DEFINE_RES_IRQ(irq),
21474 +               {},
21475         };
21476         int nr_res = ARRAY_SIZE(res);
21478 @@ -348,10 +348,11 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
21480         if (!(wd->refresh_frame_address && wd->control_frame_address)) {
21481                 pr_err(FW_BUG "failed to get the Watchdog base address.\n");
21482 -               acpi_unregister_gsi(wd->timer_interrupt);
21483                 return -EINVAL;
21484         }
21486 +       irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
21487 +       res[2] = (struct resource)DEFINE_RES_IRQ(irq);
21488         if (irq <= 0) {
21489                 pr_warn("failed to map the Watchdog interrupt.\n");
21490                 nr_res--;
21491 @@ -364,7 +365,8 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
21492          */
21493         pdev = platform_device_register_simple("sbsa-gwdt", index, res, nr_res);
21494         if (IS_ERR(pdev)) {
21495 -               acpi_unregister_gsi(wd->timer_interrupt);
21496 +               if (irq > 0)
21497 +                       acpi_unregister_gsi(wd->timer_interrupt);
21498                 return PTR_ERR(pdev);
21499         }
21501 diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
21502 index 69057fcd2c04..a5e6fd0bafa1 100644
21503 --- a/drivers/acpi/cppc_acpi.c
21504 +++ b/drivers/acpi/cppc_acpi.c
21505 @@ -119,23 +119,15 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
21506   */
21507  #define NUM_RETRIES 500ULL
21509 -struct cppc_attr {
21510 -       struct attribute attr;
21511 -       ssize_t (*show)(struct kobject *kobj,
21512 -                       struct attribute *attr, char *buf);
21513 -       ssize_t (*store)(struct kobject *kobj,
21514 -                       struct attribute *attr, const char *c, ssize_t count);
21517  #define define_one_cppc_ro(_name)              \
21518 -static struct cppc_attr _name =                        \
21519 +static struct kobj_attribute _name =           \
21520  __ATTR(_name, 0444, show_##_name, NULL)
21522  #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
21524  #define show_cppc_data(access_fn, struct_name, member_name)            \
21525         static ssize_t show_##member_name(struct kobject *kobj,         \
21526 -                                       struct attribute *attr, char *buf) \
21527 +                               struct kobj_attribute *attr, char *buf) \
21528         {                                                               \
21529                 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);           \
21530                 struct struct_name st_name = {0};                       \
21531 @@ -161,7 +153,7 @@ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
21532  show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
21534  static ssize_t show_feedback_ctrs(struct kobject *kobj,
21535 -               struct attribute *attr, char *buf)
21536 +               struct kobj_attribute *attr, char *buf)
21538         struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
21539         struct cppc_perf_fb_ctrs fb_ctrs = {0};
21540 diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
21541 index 7b54dc95d36b..4058e0241091 100644
21542 --- a/drivers/acpi/custom_method.c
21543 +++ b/drivers/acpi/custom_method.c
21544 @@ -42,6 +42,8 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
21545                                    sizeof(struct acpi_table_header)))
21546                         return -EFAULT;
21547                 uncopied_bytes = max_size = table.length;
21548 +               /* make sure the buf is not allocated */
21549 +               kfree(buf);
21550                 buf = kzalloc(max_size, GFP_KERNEL);
21551                 if (!buf)
21552                         return -ENOMEM;
21553 @@ -55,6 +57,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
21554             (*ppos + count < count) ||
21555             (count > uncopied_bytes)) {
21556                 kfree(buf);
21557 +               buf = NULL;
21558                 return -EINVAL;
21559         }
21561 @@ -76,7 +79,6 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
21562                 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
21563         }
21565 -       kfree(buf);
21566         return count;
21569 diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
21570 index 096153761ebc..58876248b192 100644
21571 --- a/drivers/acpi/device_pm.c
21572 +++ b/drivers/acpi/device_pm.c
21573 @@ -1310,6 +1310,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
21574                 {"PNP0C0B", }, /* Generic ACPI fan */
21575                 {"INT3404", }, /* Fan */
21576                 {"INTC1044", }, /* Fan for Tiger Lake generation */
21577 +               {"INTC1048", }, /* Fan for Alder Lake generation */
21578                 {}
21579         };
21580         struct acpi_device *adev = ACPI_COMPANION(dev);
21581 diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
21582 index 6efe7edd7b1e..345777bf7af9 100644
21583 --- a/drivers/acpi/scan.c
21584 +++ b/drivers/acpi/scan.c
21585 @@ -701,6 +701,7 @@ int acpi_device_add(struct acpi_device *device,
21587                 result = acpi_device_set_name(device, acpi_device_bus_id);
21588                 if (result) {
21589 +                       kfree_const(acpi_device_bus_id->bus_id);
21590                         kfree(acpi_device_bus_id);
21591                         goto err_unlock;
21592                 }
21593 diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
21594 index 53b22e26266c..2d821ed78453 100644
21595 --- a/drivers/android/Kconfig
21596 +++ b/drivers/android/Kconfig
21597 @@ -9,7 +9,7 @@ config ANDROID
21598  if ANDROID
21600  config ANDROID_BINDER_IPC
21601 -       bool "Android Binder IPC Driver"
21602 +       tristate "Android Binder IPC Driver"
21603         depends on MMU
21604         default n
21605         help
21606 @@ -21,8 +21,8 @@ config ANDROID_BINDER_IPC
21607           between said processes.
21609  config ANDROID_BINDERFS
21610 -       bool "Android Binderfs filesystem"
21611 -       depends on ANDROID_BINDER_IPC
21612 +       tristate "Android Binderfs filesystem"
21613 +       depends on (ANDROID_BINDER_IPC=y) || (ANDROID_BINDER_IPC=m && m)
21614         default n
21615         help
21616           Binderfs is a pseudo-filesystem for the Android Binder IPC driver
21617 diff --git a/drivers/android/Makefile b/drivers/android/Makefile
21618 index c9d3d0c99c25..b9d5ce8deca2 100644
21619 --- a/drivers/android/Makefile
21620 +++ b/drivers/android/Makefile
21621 @@ -1,6 +1,10 @@
21622  # SPDX-License-Identifier: GPL-2.0-only
21623  ccflags-y += -I$(src)                  # needed for trace events
21625 -obj-$(CONFIG_ANDROID_BINDERFS)         += binderfs.o
21626 -obj-$(CONFIG_ANDROID_BINDER_IPC)       += binder.o binder_alloc.o
21627 -obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
21628 +binder_linux-y := binder.o binder_alloc.o
21629 +obj-$(CONFIG_ANDROID_BINDER_IPC) += binder_linux.o
21630 +binder_linux-$(CONFIG_ANDROID_BINDERFS) += binderfs.o
21631 +binder_linux-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
21633 +# binder-$(CONFIG_ANDROID_BINDER_IPC) := binder.o binder_alloc.o
21634 +# binder-$(CONFIG_ANDROID_BINDERFS) += binderfs.o
21635 diff --git a/drivers/android/binder.c b/drivers/android/binder.c
21636 index c119736ca56a..569850551e88 100644
21637 --- a/drivers/android/binder.c
21638 +++ b/drivers/android/binder.c
21639 @@ -5788,9 +5788,20 @@ static int __init binder_init(void)
21640         return ret;
21643 -device_initcall(binder_init);
21644 +module_init(binder_init);
21646 + * binder will have no exit function since binderfs instances can be mounted
21647 + * multiple times and also in user namespaces finding and destroying them all
21648 + * is not feasible without introducing insane locking. Just ignoring existing
21649 + * instances on module unload also wouldn't work since we would loose track of
21650 + * what major numer was dynamically allocated and also what minor numbers are
21651 + * already given out. So this would get us into all kinds of issues with device
21652 + * number reuse. So simply don't allow unloading unless we are forced to do so.
21653 + */
21655 +MODULE_AUTHOR("Google, Inc.");
21656 +MODULE_DESCRIPTION("Driver for Android binder device");
21657 +MODULE_LICENSE("GPL v2");
21659  #define CREATE_TRACE_POINTS
21660  #include "binder_trace.h"
21662 -MODULE_LICENSE("GPL v2");
21663 diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
21664 index 7caf74ad2405..07c11e8d6dad 100644
21665 --- a/drivers/android/binder_alloc.c
21666 +++ b/drivers/android/binder_alloc.c
21667 @@ -38,8 +38,7 @@ enum {
21668  };
21669  static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
21671 -module_param_named(debug_mask, binder_alloc_debug_mask,
21672 -                  uint, 0644);
21673 +module_param_named(alloc_debug_mask, binder_alloc_debug_mask, uint, 0644);
21675  #define binder_alloc_debug(mask, x...) \
21676         do { \
21677 diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
21678 index 6e8e001381af..e4e0678e2781 100644
21679 --- a/drivers/android/binder_alloc.h
21680 +++ b/drivers/android/binder_alloc.h
21681 @@ -6,6 +6,7 @@
21682  #ifndef _LINUX_BINDER_ALLOC_H
21683  #define _LINUX_BINDER_ALLOC_H
21685 +#include <linux/kconfig.h>
21686  #include <linux/rbtree.h>
21687  #include <linux/list.h>
21688  #include <linux/mm.h>
21689 @@ -109,7 +110,7 @@ struct binder_alloc {
21690         size_t pages_high;
21691  };
21693 -#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
21694 +#if IS_ENABLED(CONFIG_ANDROID_BINDER_IPC_SELFTEST)
21695  void binder_selftest_alloc(struct binder_alloc *alloc);
21696  #else
21697  static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
21698 diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
21699 index 6cd79011e35d..da5bcb3203dc 100644
21700 --- a/drivers/android/binder_internal.h
21701 +++ b/drivers/android/binder_internal.h
21702 @@ -5,6 +5,7 @@
21704  #include <linux/export.h>
21705  #include <linux/fs.h>
21706 +#include <linux/kconfig.h>
21707  #include <linux/list.h>
21708  #include <linux/miscdevice.h>
21709  #include <linux/mutex.h>
21710 @@ -77,7 +78,7 @@ extern const struct file_operations binder_fops;
21712  extern char *binder_devices_param;
21714 -#ifdef CONFIG_ANDROID_BINDERFS
21715 +#if IS_ENABLED(CONFIG_ANDROID_BINDERFS)
21716  extern bool is_binderfs_device(const struct inode *inode);
21717  extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name,
21718                                            const struct file_operations *fops,
21719 @@ -98,7 +99,7 @@ static inline struct dentry *binderfs_create_file(struct dentry *dir,
21720  static inline void binderfs_remove_file(struct dentry *dentry) {}
21721  #endif
21723 -#ifdef CONFIG_ANDROID_BINDERFS
21724 +#if IS_ENABLED(CONFIG_ANDROID_BINDERFS)
21725  extern int __init init_binderfs(void);
21726  #else
21727  static inline int __init init_binderfs(void)
21728 diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
21729 index e80ba93c62a9..1a18e9dbb2a6 100644
21730 --- a/drivers/android/binderfs.c
21731 +++ b/drivers/android/binderfs.c
21732 @@ -113,7 +113,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
21733         struct super_block *sb = ref_inode->i_sb;
21734         struct binderfs_info *info = sb->s_fs_info;
21735  #if defined(CONFIG_IPC_NS)
21736 -       bool use_reserve = (info->ipc_ns == &init_ipc_ns);
21737 +       bool use_reserve = (info->ipc_ns == show_init_ipc_ns());
21738  #else
21739         bool use_reserve = true;
21740  #endif
21741 @@ -402,7 +402,7 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
21742         struct dentry *root = sb->s_root;
21743         struct binderfs_info *info = sb->s_fs_info;
21744  #if defined(CONFIG_IPC_NS)
21745 -       bool use_reserve = (info->ipc_ns == &init_ipc_ns);
21746 +       bool use_reserve = (info->ipc_ns == show_init_ipc_ns());
21747  #else
21748         bool use_reserve = true;
21749  #endif
21750 @@ -682,7 +682,7 @@ static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
21751                 return -ENOMEM;
21752         info = sb->s_fs_info;
21754 -       info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
21755 +       info->ipc_ns = get_ipc_ns_exported(current->nsproxy->ipc_ns);
21757         info->root_gid = make_kgid(sb->s_user_ns, 0);
21758         if (!gid_valid(info->root_gid))
21759 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
21760 index 00ba8e5a1ccc..33192a8f687d 100644
21761 --- a/drivers/ata/ahci.c
21762 +++ b/drivers/ata/ahci.c
21763 @@ -1772,6 +1772,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
21764                 hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
21766  #ifdef CONFIG_ARM64
21767 +       if (pdev->vendor == PCI_VENDOR_ID_HUAWEI &&
21768 +           pdev->device == 0xa235 &&
21769 +           pdev->revision < 0x30)
21770 +               hpriv->flags |= AHCI_HFLAG_NO_SXS;
21772         if (pdev->vendor == 0x177d && pdev->device == 0xa01c)
21773                 hpriv->irq_handler = ahci_thunderx_irq_handler;
21774  #endif
21775 diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
21776 index 98b8baa47dc5..d1f284f0c83d 100644
21777 --- a/drivers/ata/ahci.h
21778 +++ b/drivers/ata/ahci.h
21779 @@ -242,6 +242,7 @@ enum {
21780                                                         suspend/resume */
21781         AHCI_HFLAG_IGN_NOTSUPP_POWER_ON = (1 << 27), /* ignore -EOPNOTSUPP
21782                                                         from phy_power_on() */
21783 +       AHCI_HFLAG_NO_SXS               = (1 << 28), /* SXS not supported */
21785         /* ap->flags bits */
21787 diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
21788 index 5b32df5d33ad..6e9c5ade4c2e 100644
21789 --- a/drivers/ata/ahci_brcm.c
21790 +++ b/drivers/ata/ahci_brcm.c
21791 @@ -86,7 +86,8 @@ struct brcm_ahci_priv {
21792         u32 port_mask;
21793         u32 quirks;
21794         enum brcm_ahci_version version;
21795 -       struct reset_control *rcdev;
21796 +       struct reset_control *rcdev_rescal;
21797 +       struct reset_control *rcdev_ahci;
21798  };
21800  static inline u32 brcm_sata_readreg(void __iomem *addr)
21801 @@ -352,8 +353,8 @@ static int brcm_ahci_suspend(struct device *dev)
21802         else
21803                 ret = 0;
21805 -       if (priv->version != BRCM_SATA_BCM7216)
21806 -               reset_control_assert(priv->rcdev);
21807 +       reset_control_assert(priv->rcdev_ahci);
21808 +       reset_control_rearm(priv->rcdev_rescal);
21810         return ret;
21812 @@ -365,10 +366,10 @@ static int __maybe_unused brcm_ahci_resume(struct device *dev)
21813         struct brcm_ahci_priv *priv = hpriv->plat_data;
21814         int ret = 0;
21816 -       if (priv->version == BRCM_SATA_BCM7216)
21817 -               ret = reset_control_reset(priv->rcdev);
21818 -       else
21819 -               ret = reset_control_deassert(priv->rcdev);
21820 +       ret = reset_control_deassert(priv->rcdev_ahci);
21821 +       if (ret)
21822 +               return ret;
21823 +       ret = reset_control_reset(priv->rcdev_rescal);
21824         if (ret)
21825                 return ret;
21827 @@ -434,7 +435,6 @@ static int brcm_ahci_probe(struct platform_device *pdev)
21829         const struct of_device_id *of_id;
21830         struct device *dev = &pdev->dev;
21831 -       const char *reset_name = NULL;
21832         struct brcm_ahci_priv *priv;
21833         struct ahci_host_priv *hpriv;
21834         struct resource *res;
21835 @@ -456,15 +456,15 @@ static int brcm_ahci_probe(struct platform_device *pdev)
21836         if (IS_ERR(priv->top_ctrl))
21837                 return PTR_ERR(priv->top_ctrl);
21839 -       /* Reset is optional depending on platform and named differently */
21840 -       if (priv->version == BRCM_SATA_BCM7216)
21841 -               reset_name = "rescal";
21842 -       else
21843 -               reset_name = "ahci";
21845 -       priv->rcdev = devm_reset_control_get_optional(&pdev->dev, reset_name);
21846 -       if (IS_ERR(priv->rcdev))
21847 -               return PTR_ERR(priv->rcdev);
21848 +       if (priv->version == BRCM_SATA_BCM7216) {
21849 +               priv->rcdev_rescal = devm_reset_control_get_optional_shared(
21850 +                       &pdev->dev, "rescal");
21851 +               if (IS_ERR(priv->rcdev_rescal))
21852 +                       return PTR_ERR(priv->rcdev_rescal);
21853 +       }
21854 +       priv->rcdev_ahci = devm_reset_control_get_optional(&pdev->dev, "ahci");
21855 +       if (IS_ERR(priv->rcdev_ahci))
21856 +               return PTR_ERR(priv->rcdev_ahci);
21858         hpriv = ahci_platform_get_resources(pdev, 0);
21859         if (IS_ERR(hpriv))
21860 @@ -485,10 +485,10 @@ static int brcm_ahci_probe(struct platform_device *pdev)
21861                 break;
21862         }
21864 -       if (priv->version == BRCM_SATA_BCM7216)
21865 -               ret = reset_control_reset(priv->rcdev);
21866 -       else
21867 -               ret = reset_control_deassert(priv->rcdev);
21868 +       ret = reset_control_reset(priv->rcdev_rescal);
21869 +       if (ret)
21870 +               return ret;
21871 +       ret = reset_control_deassert(priv->rcdev_ahci);
21872         if (ret)
21873                 return ret;
21875 @@ -539,8 +539,8 @@ static int brcm_ahci_probe(struct platform_device *pdev)
21876  out_disable_clks:
21877         ahci_platform_disable_clks(hpriv);
21878  out_reset:
21879 -       if (priv->version != BRCM_SATA_BCM7216)
21880 -               reset_control_assert(priv->rcdev);
21881 +       reset_control_assert(priv->rcdev_ahci);
21882 +       reset_control_rearm(priv->rcdev_rescal);
21883         return ret;
21886 diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
21887 index ea5bf5f4cbed..fec2e9754aed 100644
21888 --- a/drivers/ata/libahci.c
21889 +++ b/drivers/ata/libahci.c
21890 @@ -493,6 +493,11 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
21891                 cap |= HOST_CAP_ALPM;
21892         }
21894 +       if ((cap & HOST_CAP_SXS) && (hpriv->flags & AHCI_HFLAG_NO_SXS)) {
21895 +               dev_info(dev, "controller does not support SXS, disabling CAP_SXS\n");
21896 +               cap &= ~HOST_CAP_SXS;
21897 +       }
21899         if (hpriv->force_port_map && port_map != hpriv->force_port_map) {
21900                 dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
21901                          port_map, hpriv->force_port_map);
21902 diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
21903 index de638dafce21..b2f552088291 100644
21904 --- a/drivers/ata/libahci_platform.c
21905 +++ b/drivers/ata/libahci_platform.c
21906 @@ -582,11 +582,13 @@ int ahci_platform_init_host(struct platform_device *pdev,
21907         int i, irq, n_ports, rc;
21909         irq = platform_get_irq(pdev, 0);
21910 -       if (irq <= 0) {
21911 +       if (irq < 0) {
21912                 if (irq != -EPROBE_DEFER)
21913                         dev_err(dev, "no irq\n");
21914                 return irq;
21915         }
21916 +       if (!irq)
21917 +               return -EINVAL;
21919         hpriv->irq = irq;
21921 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
21922 index e9cf31f38450..63f39440a9b4 100644
21923 --- a/drivers/ata/pata_arasan_cf.c
21924 +++ b/drivers/ata/pata_arasan_cf.c
21925 @@ -818,12 +818,19 @@ static int arasan_cf_probe(struct platform_device *pdev)
21926         else
21927                 quirk = CF_BROKEN_UDMA; /* as it is on spear1340 */
21929 -       /* if irq is 0, support only PIO */
21930 -       acdev->irq = platform_get_irq(pdev, 0);
21931 -       if (acdev->irq)
21932 +       /*
21933 +        * If there's an error getting IRQ (or we do get IRQ0),
21934 +        * support only PIO
21935 +        */
21936 +       ret = platform_get_irq(pdev, 0);
21937 +       if (ret > 0) {
21938 +               acdev->irq = ret;
21939                 irq_handler = arasan_cf_interrupt;
21940 -       else
21941 +       } else  if (ret == -EPROBE_DEFER) {
21942 +               return ret;
21943 +       } else  {
21944                 quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA;
21945 +       }
21947         acdev->pbase = res->start;
21948         acdev->vbase = devm_ioremap(&pdev->dev, res->start,
21949 diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
21950 index d1644a8ef9fa..abc0e87ca1a8 100644
21951 --- a/drivers/ata/pata_ixp4xx_cf.c
21952 +++ b/drivers/ata/pata_ixp4xx_cf.c
21953 @@ -165,8 +165,12 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
21954                 return -ENOMEM;
21956         irq = platform_get_irq(pdev, 0);
21957 -       if (irq)
21958 +       if (irq > 0)
21959                 irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
21960 +       else if (irq < 0)
21961 +               return irq;
21962 +       else
21963 +               return -EINVAL;
21965         /* Setup expansion bus chip selects */
21966         *data->cs0_cfg = data->cs0_bits;
21967 diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
21968 index 664ef658a955..b62446ea5f40 100644
21969 --- a/drivers/ata/sata_mv.c
21970 +++ b/drivers/ata/sata_mv.c
21971 @@ -4097,6 +4097,10 @@ static int mv_platform_probe(struct platform_device *pdev)
21972                 n_ports = mv_platform_data->n_ports;
21973                 irq = platform_get_irq(pdev, 0);
21974         }
21975 +       if (irq < 0)
21976 +               return irq;
21977 +       if (!irq)
21978 +               return -EINVAL;
21980         host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
21981         hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
21982 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
21983 index 653c8c6ac7a7..aedeb2dc1a18 100644
21984 --- a/drivers/base/devtmpfs.c
21985 +++ b/drivers/base/devtmpfs.c
21986 @@ -419,7 +419,6 @@ static int __init devtmpfs_setup(void *p)
21987         init_chroot(".");
21988  out:
21989         *(int *)p = err;
21990 -       complete(&setup_done);
21991         return err;
21994 @@ -432,6 +431,7 @@ static int __ref devtmpfsd(void *p)
21996         int err = devtmpfs_setup(p);
21998 +       complete(&setup_done);
21999         if (err)
22000                 return err;
22001         devtmpfs_work_loop();
22002 diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
22003 index 78355095e00d..7e2c79e2a88b 100644
22004 --- a/drivers/base/firmware_loader/main.c
22005 +++ b/drivers/base/firmware_loader/main.c
22006 @@ -465,6 +465,8 @@ static int fw_decompress_xz(struct device *dev, struct fw_priv *fw_priv,
22007  static char fw_path_para[256];
22008  static const char * const fw_path[] = {
22009         fw_path_para,
22010 +       "/etc/firmware/" UTS_RELEASE,
22011 +       "/etc/firmware",
22012         "/lib/firmware/updates/" UTS_RELEASE,
22013         "/lib/firmware/updates",
22014         "/lib/firmware/" UTS_RELEASE,
22015 diff --git a/drivers/base/node.c b/drivers/base/node.c
22016 index f449dbb2c746..2c36f61d30bc 100644
22017 --- a/drivers/base/node.c
22018 +++ b/drivers/base/node.c
22019 @@ -268,21 +268,20 @@ static void node_init_cache_dev(struct node *node)
22020         if (!dev)
22021                 return;
22023 +       device_initialize(dev);
22024         dev->parent = &node->dev;
22025         dev->release = node_cache_release;
22026         if (dev_set_name(dev, "memory_side_cache"))
22027 -               goto free_dev;
22028 +               goto put_device;
22030 -       if (device_register(dev))
22031 -               goto free_name;
22032 +       if (device_add(dev))
22033 +               goto put_device;
22035         pm_runtime_no_callbacks(dev);
22036         node->cache_dev = dev;
22037         return;
22038 -free_name:
22039 -       kfree_const(dev->kobj.name);
22040 -free_dev:
22041 -       kfree(dev);
22042 +put_device:
22043 +       put_device(dev);
22046  /**
22047 @@ -319,25 +318,24 @@ void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs)
22048                 return;
22050         dev = &info->dev;
22051 +       device_initialize(dev);
22052         dev->parent = node->cache_dev;
22053         dev->release = node_cacheinfo_release;
22054         dev->groups = cache_groups;
22055         if (dev_set_name(dev, "index%d", cache_attrs->level))
22056 -               goto free_cache;
22057 +               goto put_device;
22059         info->cache_attrs = *cache_attrs;
22060 -       if (device_register(dev)) {
22061 +       if (device_add(dev)) {
22062                 dev_warn(&node->dev, "failed to add cache level:%d\n",
22063                          cache_attrs->level);
22064 -               goto free_name;
22065 +               goto put_device;
22066         }
22067         pm_runtime_no_callbacks(dev);
22068         list_add_tail(&info->node, &node->cache_attrs);
22069         return;
22070 -free_name:
22071 -       kfree_const(dev->kobj.name);
22072 -free_cache:
22073 -       kfree(info);
22074 +put_device:
22075 +       put_device(dev);
22078  static void node_remove_caches(struct node *node)
22079 diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
22080 index fe1dad68aee4..ae011f2bc537 100644
22081 --- a/drivers/base/power/runtime.c
22082 +++ b/drivers/base/power/runtime.c
22083 @@ -1637,6 +1637,7 @@ void pm_runtime_init(struct device *dev)
22084         dev->power.request_pending = false;
22085         dev->power.request = RPM_REQ_NONE;
22086         dev->power.deferred_resume = false;
22087 +       dev->power.needs_force_resume = 0;
22088         INIT_WORK(&dev->power.work, pm_runtime_work);
22090         dev->power.timer_expires = 0;
22091 @@ -1804,10 +1805,12 @@ int pm_runtime_force_suspend(struct device *dev)
22092          * its parent, but set its status to RPM_SUSPENDED anyway in case this
22093          * function will be called again for it in the meantime.
22094          */
22095 -       if (pm_runtime_need_not_resume(dev))
22096 +       if (pm_runtime_need_not_resume(dev)) {
22097                 pm_runtime_set_suspended(dev);
22098 -       else
22099 +       } else {
22100                 __update_runtime_status(dev, RPM_SUSPENDED);
22101 +               dev->power.needs_force_resume = 1;
22102 +       }
22104         return 0;
22106 @@ -1834,7 +1837,7 @@ int pm_runtime_force_resume(struct device *dev)
22107         int (*callback)(struct device *);
22108         int ret = 0;
22110 -       if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
22111 +       if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
22112                 goto out;
22114         /*
22115 @@ -1853,6 +1856,7 @@ int pm_runtime_force_resume(struct device *dev)
22117         pm_runtime_mark_last_busy(dev);
22118  out:
22119 +       dev->power.needs_force_resume = 0;
22120         pm_runtime_enable(dev);
22121         return ret;
22123 diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
22124 index ff2ee87987c7..211a335a608d 100644
22125 --- a/drivers/base/regmap/regmap-debugfs.c
22126 +++ b/drivers/base/regmap/regmap-debugfs.c
22127 @@ -660,6 +660,7 @@ void regmap_debugfs_exit(struct regmap *map)
22128                 regmap_debugfs_free_dump_cache(map);
22129                 mutex_unlock(&map->cache_lock);
22130                 kfree(map->debugfs_name);
22131 +               map->debugfs_name = NULL;
22132         } else {
22133                 struct regmap_debugfs_node *node, *tmp;
22135 diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
22136 index fa3719ef80e4..88310ac9ce90 100644
22137 --- a/drivers/base/swnode.c
22138 +++ b/drivers/base/swnode.c
22139 @@ -1032,6 +1032,7 @@ int device_add_software_node(struct device *dev, const struct software_node *nod
22140         }
22142         set_secondary_fwnode(dev, &swnode->fwnode);
22143 +       software_node_notify(dev, KOBJ_ADD);
22145         return 0;
22147 @@ -1105,8 +1106,8 @@ int software_node_notify(struct device *dev, unsigned long action)
22149         switch (action) {
22150         case KOBJ_ADD:
22151 -               ret = sysfs_create_link(&dev->kobj, &swnode->kobj,
22152 -                                       "software_node");
22153 +               ret = sysfs_create_link_nowarn(&dev->kobj, &swnode->kobj,
22154 +                                              "software_node");
22155                 if (ret)
22156                         break;
22158 diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
22159 index 104b713f4055..d601e49f80e0 100644
22160 --- a/drivers/block/ataflop.c
22161 +++ b/drivers/block/ataflop.c
22162 @@ -729,8 +729,12 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
22163         unsigned long   flags;
22164         int ret;
22166 -       if (type)
22167 +       if (type) {
22168                 type--;
22169 +               if (type >= NUM_DISK_MINORS ||
22170 +                   minor2disktype[type].drive_types > DriveType)
22171 +                       return -EINVAL;
22172 +       }
22174         q = unit[drive].disk[type]->queue;
22175         blk_mq_freeze_queue(q);
22176 @@ -742,11 +746,6 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
22177         local_irq_restore(flags);
22179         if (type) {
22180 -               if (type >= NUM_DISK_MINORS ||
22181 -                   minor2disktype[type].drive_types > DriveType) {
22182 -                       ret = -EINVAL;
22183 -                       goto out;
22184 -               }
22185                 type = minor2disktype[type].index;
22186                 UDT = &atari_disk_type[type];
22187         }
22188 @@ -2002,7 +2001,10 @@ static void ataflop_probe(dev_t dev)
22189         int drive = MINOR(dev) & 3;
22190         int type  = MINOR(dev) >> 2;
22192 -       if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS)
22193 +       if (type)
22194 +               type--;
22196 +       if (drive >= FD_MAX_UNITS || type >= NUM_DISK_MINORS)
22197                 return;
22198         mutex_lock(&ataflop_probe_lock);
22199         if (!unit[drive].disk[type]) {
22200 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
22201 index 4ff71b579cfc..974da561b8e5 100644
22202 --- a/drivers/block/nbd.c
22203 +++ b/drivers/block/nbd.c
22204 @@ -1980,7 +1980,8 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
22205          * config ref and try to destroy the workqueue from inside the work
22206          * queue.
22207          */
22208 -       flush_workqueue(nbd->recv_workq);
22209 +       if (nbd->recv_workq)
22210 +               flush_workqueue(nbd->recv_workq);
22211         if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
22212                                &nbd->config->runtime_flags))
22213                 nbd_config_put(nbd);
22214 diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
22215 index bfcab1c782b5..dae54dd1aeac 100644
22216 --- a/drivers/block/null_blk/zoned.c
22217 +++ b/drivers/block/null_blk/zoned.c
22218 @@ -180,6 +180,7 @@ int null_register_zoned_dev(struct nullb *nullb)
22219  void null_free_zoned_dev(struct nullb_device *dev)
22221         kvfree(dev->zones);
22222 +       dev->zones = NULL;
22225  int null_report_zones(struct gendisk *disk, sector_t sector,
22226 diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
22227 index d4aa6bfc9555..49ad400a5225 100644
22228 --- a/drivers/block/rnbd/rnbd-clt-sysfs.c
22229 +++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
22230 @@ -432,10 +432,14 @@ void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
22231          * i.e. rnbd_clt_unmap_dev_store() leading to a sysfs warning because
22232          * of sysfs link already was removed already.
22233          */
22234 -       if (dev->blk_symlink_name && try_module_get(THIS_MODULE)) {
22235 -               sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name);
22236 +       if (dev->blk_symlink_name) {
22237 +               if (try_module_get(THIS_MODULE)) {
22238 +                       sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name);
22239 +                       module_put(THIS_MODULE);
22240 +               }
22241 +               /* It should be freed always. */
22242                 kfree(dev->blk_symlink_name);
22243 -               module_put(THIS_MODULE);
22244 +               dev->blk_symlink_name = NULL;
22245         }
22248 @@ -479,11 +483,7 @@ static int rnbd_clt_get_path_name(struct rnbd_clt_dev *dev, char *buf,
22249         while ((s = strchr(pathname, '/')))
22250                 s[0] = '!';
22252 -       ret = snprintf(buf, len, "%s", pathname);
22253 -       if (ret >= len)
22254 -               return -ENAMETOOLONG;
22256 -       ret = snprintf(buf, len, "%s@%s", buf, dev->sess->sessname);
22257 +       ret = snprintf(buf, len, "%s@%s", pathname, dev->sess->sessname);
22258         if (ret >= len)
22259                 return -ENAMETOOLONG;
22261 diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
22262 index 45a470076652..5ab7319ff2ea 100644
22263 --- a/drivers/block/rnbd/rnbd-clt.c
22264 +++ b/drivers/block/rnbd/rnbd-clt.c
22265 @@ -693,7 +693,11 @@ static void remap_devs(struct rnbd_clt_session *sess)
22266                 return;
22267         }
22269 -       rtrs_clt_query(sess->rtrs, &attrs);
22270 +       err = rtrs_clt_query(sess->rtrs, &attrs);
22271 +       if (err) {
22272 +               pr_err("rtrs_clt_query(\"%s\"): %d\n", sess->sessname, err);
22273 +               return;
22274 +       }
22275         mutex_lock(&sess->lock);
22276         sess->max_io_size = attrs.max_io_size;
22278 @@ -1234,7 +1238,11 @@ find_and_get_or_create_sess(const char *sessname,
22279                 err = PTR_ERR(sess->rtrs);
22280                 goto wake_up_and_put;
22281         }
22282 -       rtrs_clt_query(sess->rtrs, &attrs);
22284 +       err = rtrs_clt_query(sess->rtrs, &attrs);
22285 +       if (err)
22286 +               goto close_rtrs;
22288         sess->max_io_size = attrs.max_io_size;
22289         sess->queue_depth = attrs.queue_depth;
22291 diff --git a/drivers/block/rnbd/rnbd-clt.h b/drivers/block/rnbd/rnbd-clt.h
22292 index 537d499dad3b..73d980840531 100644
22293 --- a/drivers/block/rnbd/rnbd-clt.h
22294 +++ b/drivers/block/rnbd/rnbd-clt.h
22295 @@ -87,7 +87,7 @@ struct rnbd_clt_session {
22296         DECLARE_BITMAP(cpu_queues_bm, NR_CPUS);
22297         int     __percpu        *cpu_rr; /* per-cpu var for CPU round-robin */
22298         atomic_t                busy;
22299 -       int                     queue_depth;
22300 +       size_t                  queue_depth;
22301         u32                     max_io_size;
22302         struct blk_mq_tag_set   tag_set;
22303         struct mutex            lock; /* protects state and devs_list */
22304 diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
22305 index a6a68d44f517..677770f32843 100644
22306 --- a/drivers/block/rnbd/rnbd-srv.c
22307 +++ b/drivers/block/rnbd/rnbd-srv.c
22308 @@ -341,7 +341,9 @@ void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev)
22309         struct rnbd_srv_session *sess = sess_dev->sess;
22311         sess_dev->keep_id = true;
22312 -       mutex_lock(&sess->lock);
22313 +       /* It is already started to close by client's close message. */
22314 +       if (!mutex_trylock(&sess->lock))
22315 +               return;
22316         rnbd_srv_destroy_dev_session_sysfs(sess_dev);
22317         mutex_unlock(&sess->lock);
22319 diff --git a/drivers/block/swim.c b/drivers/block/swim.c
22320 index cc6a0bc6c005..ac5c170d76e8 100644
22321 --- a/drivers/block/swim.c
22322 +++ b/drivers/block/swim.c
22323 @@ -328,7 +328,7 @@ static inline void swim_motor(struct swim __iomem *base,
22324                         if (swim_readbit(base, MOTOR_ON))
22325                                 break;
22326                         set_current_state(TASK_INTERRUPTIBLE);
22327 -                       schedule_timeout(1);
22328 +                       schedule_min_hrtimeout();
22329                 }
22330         } else if (action == OFF) {
22331                 swim_action(base, MOTOR_OFF);
22332 @@ -347,7 +347,7 @@ static inline void swim_eject(struct swim __iomem *base)
22333                 if (!swim_readbit(base, DISK_IN))
22334                         break;
22335                 set_current_state(TASK_INTERRUPTIBLE);
22336 -               schedule_timeout(1);
22337 +               schedule_min_hrtimeout();
22338         }
22339         swim_select(base, RELAX);
22341 @@ -372,6 +372,7 @@ static inline int swim_step(struct swim __iomem *base)
22343                 set_current_state(TASK_INTERRUPTIBLE);
22344                 schedule_timeout(1);
22345 +               schedule_min_hrtimeout();
22347                 swim_select(base, RELAX);
22348                 if (!swim_readbit(base, STEP))
22349 diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
22350 index b0c71d3a81a0..bda5c815e441 100644
22351 --- a/drivers/block/xen-blkback/common.h
22352 +++ b/drivers/block/xen-blkback/common.h
22353 @@ -313,6 +313,7 @@ struct xen_blkif {
22355         struct work_struct      free_work;
22356         unsigned int            nr_ring_pages;
22357 +       bool                    multi_ref;
22358         /* All rings for this device. */
22359         struct xen_blkif_ring   *rings;
22360         unsigned int            nr_rings;
22361 diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
22362 index c2aaf690352c..125b22205d38 100644
22363 --- a/drivers/block/xen-blkback/xenbus.c
22364 +++ b/drivers/block/xen-blkback/xenbus.c
22365 @@ -998,14 +998,17 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
22366         for (i = 0; i < nr_grefs; i++) {
22367                 char ring_ref_name[RINGREF_NAME_LEN];
22369 -               snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
22370 +               if (blkif->multi_ref)
22371 +                       snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
22372 +               else {
22373 +                       WARN_ON(i != 0);
22374 +                       snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref");
22375 +               }
22377                 err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
22378                                    "%u", &ring_ref[i]);
22380                 if (err != 1) {
22381 -                       if (nr_grefs == 1)
22382 -                               break;
22384                         err = -EINVAL;
22385                         xenbus_dev_fatal(dev, err, "reading %s/%s",
22386                                          dir, ring_ref_name);
22387 @@ -1013,18 +1016,6 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
22388                 }
22389         }
22391 -       if (err != 1) {
22392 -               WARN_ON(nr_grefs != 1);
22394 -               err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u",
22395 -                                  &ring_ref[0]);
22396 -               if (err != 1) {
22397 -                       err = -EINVAL;
22398 -                       xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
22399 -                       return err;
22400 -               }
22401 -       }
22403         err = -ENOMEM;
22404         for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
22405                 req = kzalloc(sizeof(*req), GFP_KERNEL);
22406 @@ -1129,10 +1120,15 @@ static int connect_ring(struct backend_info *be)
22407                  blkif->nr_rings, blkif->blk_protocol, protocol,
22408                  blkif->vbd.feature_gnt_persistent ? "persistent grants" : "");
22410 -       ring_page_order = xenbus_read_unsigned(dev->otherend,
22411 -                                              "ring-page-order", 0);
22413 -       if (ring_page_order > xen_blkif_max_ring_order) {
22414 +       err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
22415 +                          &ring_page_order);
22416 +       if (err != 1) {
22417 +               blkif->nr_ring_pages = 1;
22418 +               blkif->multi_ref = false;
22419 +       } else if (ring_page_order <= xen_blkif_max_ring_order) {
22420 +               blkif->nr_ring_pages = 1 << ring_page_order;
22421 +               blkif->multi_ref = true;
22422 +       } else {
22423                 err = -EINVAL;
22424                 xenbus_dev_fatal(dev, err,
22425                                  "requested ring page order %d exceed max:%d",
22426 @@ -1141,8 +1137,6 @@ static int connect_ring(struct backend_info *be)
22427                 return err;
22428         }
22430 -       blkif->nr_ring_pages = 1 << ring_page_order;
22432         if (blkif->nr_rings == 1)
22433                 return read_per_ring_refs(&blkif->rings[0], dev->otherend);
22434         else {
22435 diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
22436 index 5cbfbd948f67..4a901508e48e 100644
22437 --- a/drivers/bluetooth/btusb.c
22438 +++ b/drivers/bluetooth/btusb.c
22439 @@ -399,7 +399,9 @@ static const struct usb_device_id blacklist_table[] = {
22441         /* MediaTek Bluetooth devices */
22442         { USB_VENDOR_AND_INTERFACE_INFO(0x0e8d, 0xe0, 0x01, 0x01),
22443 -         .driver_info = BTUSB_MEDIATEK },
22444 +         .driver_info = BTUSB_MEDIATEK |
22445 +                        BTUSB_WIDEBAND_SPEECH |
22446 +                        BTUSB_VALID_LE_STATES },
22448         /* Additional MediaTek MT7615E Bluetooth devices */
22449         { USB_DEVICE(0x13d3, 0x3560), .driver_info = BTUSB_MEDIATEK},
22450 diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c
22451 index c2546bf229fb..08c28740dc4e 100644
22452 --- a/drivers/bus/mhi/core/boot.c
22453 +++ b/drivers/bus/mhi/core/boot.c
22454 @@ -389,7 +389,6 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
22455  void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
22457         const struct firmware *firmware = NULL;
22458 -       struct image_info *image_info;
22459         struct device *dev = &mhi_cntrl->mhi_dev->dev;
22460         const char *fw_name;
22461         void *buf;
22462 @@ -491,44 +490,42 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
22463  fw_load_ee_pthru:
22464         /* Transitioning into MHI RESET->READY state */
22465         ret = mhi_ready_state_transition(mhi_cntrl);
22467 -       if (!mhi_cntrl->fbc_download)
22468 -               return;
22470         if (ret) {
22471                 dev_err(dev, "MHI did not enter READY state\n");
22472                 goto error_ready_state;
22473         }
22475 -       /* Wait for the SBL event */
22476 -       ret = wait_event_timeout(mhi_cntrl->state_event,
22477 -                                mhi_cntrl->ee == MHI_EE_SBL ||
22478 -                                MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
22479 -                                msecs_to_jiffies(mhi_cntrl->timeout_ms));
22480 +       dev_info(dev, "Wait for device to enter SBL or Mission mode\n");
22481 +       return;
22483 -       if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
22484 -               dev_err(dev, "MHI did not enter SBL\n");
22485 -               goto error_ready_state;
22486 +error_ready_state:
22487 +       if (mhi_cntrl->fbc_download) {
22488 +               mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
22489 +               mhi_cntrl->fbc_image = NULL;
22490         }
22492 -       /* Start full firmware image download */
22493 -       image_info = mhi_cntrl->fbc_image;
22494 +error_fw_load:
22495 +       mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
22496 +       wake_up_all(&mhi_cntrl->state_event);
22499 +int mhi_download_amss_image(struct mhi_controller *mhi_cntrl)
22501 +       struct image_info *image_info = mhi_cntrl->fbc_image;
22502 +       struct device *dev = &mhi_cntrl->mhi_dev->dev;
22503 +       int ret;
22505 +       if (!image_info)
22506 +               return -EIO;
22508         ret = mhi_fw_load_bhie(mhi_cntrl,
22509                                /* Vector table is the last entry */
22510                                &image_info->mhi_buf[image_info->entries - 1]);
22511         if (ret) {
22512 -               dev_err(dev, "MHI did not load image over BHIe, ret: %d\n",
22513 -                       ret);
22514 -               goto error_fw_load;
22515 +               dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret);
22516 +               mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
22517 +               wake_up_all(&mhi_cntrl->state_event);
22518         }
22520 -       return;
22522 -error_ready_state:
22523 -       mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
22524 -       mhi_cntrl->fbc_image = NULL;
22526 -error_fw_load:
22527 -       mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
22528 -       wake_up_all(&mhi_cntrl->state_event);
22529 +       return ret;
22531 diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
22532 index be4eebb0971b..08b7f4a06bfc 100644
22533 --- a/drivers/bus/mhi/core/init.c
22534 +++ b/drivers/bus/mhi/core/init.c
22535 @@ -508,8 +508,6 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
22537         /* Setup wake db */
22538         mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
22539 -       mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
22540 -       mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
22541         mhi_cntrl->wake_set = false;
22543         /* Setup channel db address for each channel in tre_ring */
22544 @@ -552,6 +550,7 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
22545         struct mhi_ring *buf_ring;
22546         struct mhi_ring *tre_ring;
22547         struct mhi_chan_ctxt *chan_ctxt;
22548 +       u32 tmp;
22550         buf_ring = &mhi_chan->buf_ring;
22551         tre_ring = &mhi_chan->tre_ring;
22552 @@ -565,7 +564,19 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
22553         vfree(buf_ring->base);
22555         buf_ring->base = tre_ring->base = NULL;
22556 +       tre_ring->ctxt_wp = NULL;
22557         chan_ctxt->rbase = 0;
22558 +       chan_ctxt->rlen = 0;
22559 +       chan_ctxt->rp = 0;
22560 +       chan_ctxt->wp = 0;
22562 +       tmp = chan_ctxt->chcfg;
22563 +       tmp &= ~CHAN_CTX_CHSTATE_MASK;
22564 +       tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
22565 +       chan_ctxt->chcfg = tmp;
22567 +       /* Update to all cores */
22568 +       smp_wmb();
22571  int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
22572 @@ -863,12 +874,10 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
22573         u32 soc_info;
22574         int ret, i;
22576 -       if (!mhi_cntrl)
22577 -               return -EINVAL;
22579 -       if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
22580 +       if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
22581 +           !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
22582             !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
22583 -           !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs)
22584 +           !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs || !mhi_cntrl->irq)
22585                 return -EINVAL;
22587         ret = parse_config(mhi_cntrl, config);
22588 @@ -890,8 +899,7 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
22589         INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
22590         init_waitqueue_head(&mhi_cntrl->state_event);
22592 -       mhi_cntrl->hiprio_wq = alloc_ordered_workqueue
22593 -                               ("mhi_hiprio_wq", WQ_MEM_RECLAIM | WQ_HIGHPRI);
22594 +       mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI);
22595         if (!mhi_cntrl->hiprio_wq) {
22596                 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
22597                 ret = -ENOMEM;
22598 @@ -1296,7 +1304,8 @@ static int mhi_driver_remove(struct device *dev)
22600                 mutex_lock(&mhi_chan->mutex);
22602 -               if (ch_state[dir] == MHI_CH_STATE_ENABLED &&
22603 +               if ((ch_state[dir] == MHI_CH_STATE_ENABLED ||
22604 +                    ch_state[dir] == MHI_CH_STATE_STOP) &&
22605                     !mhi_chan->offload_ch)
22606                         mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
22608 diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h
22609 index 6f80ec30c0cd..6f37439e5247 100644
22610 --- a/drivers/bus/mhi/core/internal.h
22611 +++ b/drivers/bus/mhi/core/internal.h
22612 @@ -619,6 +619,7 @@ int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl);
22613  int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
22614  int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
22615                  enum mhi_cmd_type cmd);
22616 +int mhi_download_amss_image(struct mhi_controller *mhi_cntrl);
22617  static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl)
22619         return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
22620 diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
22621 index 4e0131b94056..61c37b23dd71 100644
22622 --- a/drivers/bus/mhi/core/main.c
22623 +++ b/drivers/bus/mhi/core/main.c
22624 @@ -242,10 +242,17 @@ static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
22625         smp_wmb();
22628 +static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
22630 +       return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
22633  int mhi_destroy_device(struct device *dev, void *data)
22635 +       struct mhi_chan *ul_chan, *dl_chan;
22636         struct mhi_device *mhi_dev;
22637         struct mhi_controller *mhi_cntrl;
22638 +       enum mhi_ee_type ee = MHI_EE_MAX;
22640         if (dev->bus != &mhi_bus_type)
22641                 return 0;
22642 @@ -257,6 +264,17 @@ int mhi_destroy_device(struct device *dev, void *data)
22643         if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
22644                 return 0;
22646 +       ul_chan = mhi_dev->ul_chan;
22647 +       dl_chan = mhi_dev->dl_chan;
22649 +       /*
22650 +        * If execution environment is specified, remove only those devices that
22651 +        * started in them based on ee_mask for the channels as we move on to a
22652 +        * different execution environment
22653 +        */
22654 +       if (data)
22655 +               ee = *(enum mhi_ee_type *)data;
22657         /*
22658          * For the suspend and resume case, this function will get called
22659          * without mhi_unregister_controller(). Hence, we need to drop the
22660 @@ -264,11 +282,19 @@ int mhi_destroy_device(struct device *dev, void *data)
22661          * be sure that there will be no instances of mhi_dev left after
22662          * this.
22663          */
22664 -       if (mhi_dev->ul_chan)
22665 -               put_device(&mhi_dev->ul_chan->mhi_dev->dev);
22666 +       if (ul_chan) {
22667 +               if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
22668 +                       return 0;
22670 -       if (mhi_dev->dl_chan)
22671 -               put_device(&mhi_dev->dl_chan->mhi_dev->dev);
22672 +               put_device(&ul_chan->mhi_dev->dev);
22673 +       }
22675 +       if (dl_chan) {
22676 +               if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
22677 +                       return 0;
22679 +               put_device(&dl_chan->mhi_dev->dev);
22680 +       }
22682         dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
22683                  mhi_dev->name);
22684 @@ -383,7 +409,16 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev)
22685         struct mhi_event_ctxt *er_ctxt =
22686                 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
22687         struct mhi_ring *ev_ring = &mhi_event->ring;
22688 -       void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
22689 +       dma_addr_t ptr = er_ctxt->rp;
22690 +       void *dev_rp;
22692 +       if (!is_valid_ring_ptr(ev_ring, ptr)) {
22693 +               dev_err(&mhi_cntrl->mhi_dev->dev,
22694 +                       "Event ring rp points outside of the event ring\n");
22695 +               return IRQ_HANDLED;
22696 +       }
22698 +       dev_rp = mhi_to_virtual(ev_ring, ptr);
22700         /* Only proceed if event ring has pending events */
22701         if (ev_ring->rp == dev_rp)
22702 @@ -409,7 +444,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
22703         struct device *dev = &mhi_cntrl->mhi_dev->dev;
22704         enum mhi_state state = MHI_STATE_MAX;
22705         enum mhi_pm_state pm_state = 0;
22706 -       enum mhi_ee_type ee = 0;
22707 +       enum mhi_ee_type ee = MHI_EE_MAX;
22709         write_lock_irq(&mhi_cntrl->pm_lock);
22710         if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
22711 @@ -418,8 +453,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
22712         }
22714         state = mhi_get_mhi_state(mhi_cntrl);
22715 -       ee = mhi_cntrl->ee;
22716 -       mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
22717 +       ee = mhi_get_exec_env(mhi_cntrl);
22718         dev_dbg(dev, "local ee:%s device ee:%s dev_state:%s\n",
22719                 TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee),
22720                 TO_MHI_STATE_STR(state));
22721 @@ -431,27 +465,30 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
22722         }
22723         write_unlock_irq(&mhi_cntrl->pm_lock);
22725 -        /* If device supports RDDM don't bother processing SYS error */
22726 -       if (mhi_cntrl->rddm_image) {
22727 -               /* host may be performing a device power down already */
22728 -               if (!mhi_is_active(mhi_cntrl))
22729 -                       goto exit_intvec;
22730 +       if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee)
22731 +               goto exit_intvec;
22733 -               if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) {
22734 +       switch (ee) {
22735 +       case MHI_EE_RDDM:
22736 +               /* proceed if power down is not already in progress */
22737 +               if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
22738                         mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
22739 +                       mhi_cntrl->ee = ee;
22740                         wake_up_all(&mhi_cntrl->state_event);
22741                 }
22742 -               goto exit_intvec;
22743 -       }
22745 -       if (pm_state == MHI_PM_SYS_ERR_DETECT) {
22746 +               break;
22747 +       case MHI_EE_PBL:
22748 +       case MHI_EE_EDL:
22749 +       case MHI_EE_PTHRU:
22750 +               mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
22751 +               mhi_cntrl->ee = ee;
22752                 wake_up_all(&mhi_cntrl->state_event);
22754 -               /* For fatal errors, we let controller decide next step */
22755 -               if (MHI_IN_PBL(ee))
22756 -                       mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
22757 -               else
22758 -                       mhi_pm_sys_err_handler(mhi_cntrl);
22759 +               mhi_pm_sys_err_handler(mhi_cntrl);
22760 +               break;
22761 +       default:
22762 +               wake_up_all(&mhi_cntrl->state_event);
22763 +               mhi_pm_sys_err_handler(mhi_cntrl);
22764 +               break;
22765         }
22767  exit_intvec:
22768 @@ -536,6 +573,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
22769                 struct mhi_buf_info *buf_info;
22770                 u16 xfer_len;
22772 +               if (!is_valid_ring_ptr(tre_ring, ptr)) {
22773 +                       dev_err(&mhi_cntrl->mhi_dev->dev,
22774 +                               "Event element points outside of the tre ring\n");
22775 +                       break;
22776 +               }
22777                 /* Get the TRB this event points to */
22778                 ev_tre = mhi_to_virtual(tre_ring, ptr);
22780 @@ -570,8 +612,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
22781                         /* notify client */
22782                         mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
22784 -                       if (mhi_chan->dir == DMA_TO_DEVICE)
22785 +                       if (mhi_chan->dir == DMA_TO_DEVICE) {
22786                                 atomic_dec(&mhi_cntrl->pending_pkts);
22787 +                               /* Release the reference got from mhi_queue() */
22788 +                               mhi_cntrl->runtime_put(mhi_cntrl);
22789 +                       }
22791                         /*
22792                          * Recycle the buffer if buffer is pre-allocated,
22793 @@ -695,6 +740,12 @@ static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
22794         struct mhi_chan *mhi_chan;
22795         u32 chan;
22797 +       if (!is_valid_ring_ptr(mhi_ring, ptr)) {
22798 +               dev_err(&mhi_cntrl->mhi_dev->dev,
22799 +                       "Event element points outside of the cmd ring\n");
22800 +               return;
22801 +       }
22803         cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
22805         chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
22806 @@ -719,6 +770,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
22807         struct device *dev = &mhi_cntrl->mhi_dev->dev;
22808         u32 chan;
22809         int count = 0;
22810 +       dma_addr_t ptr = er_ctxt->rp;
22812         /*
22813          * This is a quick check to avoid unnecessary event processing
22814 @@ -728,7 +780,13 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
22815         if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
22816                 return -EIO;
22818 -       dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
22819 +       if (!is_valid_ring_ptr(ev_ring, ptr)) {
22820 +               dev_err(&mhi_cntrl->mhi_dev->dev,
22821 +                       "Event ring rp points outside of the event ring\n");
22822 +               return -EIO;
22823 +       }
22825 +       dev_rp = mhi_to_virtual(ev_ring, ptr);
22826         local_rp = ev_ring->rp;
22828         while (dev_rp != local_rp) {
22829 @@ -834,6 +892,8 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
22830                          */
22831                         if (chan < mhi_cntrl->max_chan) {
22832                                 mhi_chan = &mhi_cntrl->mhi_chan[chan];
22833 +                               if (!mhi_chan->configured)
22834 +                                       break;
22835                                 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
22836                                 event_quota--;
22837                         }
22838 @@ -845,7 +905,15 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
22840                 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
22841                 local_rp = ev_ring->rp;
22842 -               dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
22844 +               ptr = er_ctxt->rp;
22845 +               if (!is_valid_ring_ptr(ev_ring, ptr)) {
22846 +                       dev_err(&mhi_cntrl->mhi_dev->dev,
22847 +                               "Event ring rp points outside of the event ring\n");
22848 +                       return -EIO;
22849 +               }
22851 +               dev_rp = mhi_to_virtual(ev_ring, ptr);
22852                 count++;
22853         }
22855 @@ -868,11 +936,18 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
22856         int count = 0;
22857         u32 chan;
22858         struct mhi_chan *mhi_chan;
22859 +       dma_addr_t ptr = er_ctxt->rp;
22861         if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
22862                 return -EIO;
22864 -       dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
22865 +       if (!is_valid_ring_ptr(ev_ring, ptr)) {
22866 +               dev_err(&mhi_cntrl->mhi_dev->dev,
22867 +                       "Event ring rp points outside of the event ring\n");
22868 +               return -EIO;
22869 +       }
22871 +       dev_rp = mhi_to_virtual(ev_ring, ptr);
22872         local_rp = ev_ring->rp;
22874         while (dev_rp != local_rp && event_quota > 0) {
22875 @@ -886,7 +961,8 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
22876                  * Only process the event ring elements whose channel
22877                  * ID is within the maximum supported range.
22878                  */
22879 -               if (chan < mhi_cntrl->max_chan) {
22880 +               if (chan < mhi_cntrl->max_chan &&
22881 +                   mhi_cntrl->mhi_chan[chan].configured) {
22882                         mhi_chan = &mhi_cntrl->mhi_chan[chan];
22884                         if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
22885 @@ -900,7 +976,15 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
22887                 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
22888                 local_rp = ev_ring->rp;
22889 -               dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
22891 +               ptr = er_ctxt->rp;
22892 +               if (!is_valid_ring_ptr(ev_ring, ptr)) {
22893 +                       dev_err(&mhi_cntrl->mhi_dev->dev,
22894 +                               "Event ring rp points outside of the event ring\n");
22895 +                       return -EIO;
22896 +               }
22898 +               dev_rp = mhi_to_virtual(ev_ring, ptr);
22899                 count++;
22900         }
22901         read_lock_bh(&mhi_cntrl->pm_lock);
22902 @@ -1004,9 +1088,11 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
22903         if (unlikely(ret))
22904                 goto exit_unlock;
22906 -       /* trigger M3 exit if necessary */
22907 -       if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
22908 -               mhi_trigger_resume(mhi_cntrl);
22909 +       /* Packet is queued, take a usage ref to exit M3 if necessary
22910 +        * for host->device buffer, balanced put is done on buffer completion
22911 +        * for device->host buffer, balanced put is after ringing the DB
22912 +        */
22913 +       mhi_cntrl->runtime_get(mhi_cntrl);
22915         /* Assert dev_wake (to exit/prevent M1/M2)*/
22916         mhi_cntrl->wake_toggle(mhi_cntrl);
22917 @@ -1014,12 +1100,11 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
22918         if (mhi_chan->dir == DMA_TO_DEVICE)
22919                 atomic_inc(&mhi_cntrl->pending_pkts);
22921 -       if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {
22922 -               ret = -EIO;
22923 -               goto exit_unlock;
22924 -       }
22925 +       if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
22926 +               mhi_ring_chan_db(mhi_cntrl, mhi_chan);
22928 -       mhi_ring_chan_db(mhi_cntrl, mhi_chan);
22929 +       if (dir == DMA_FROM_DEVICE)
22930 +               mhi_cntrl->runtime_put(mhi_cntrl);
22932  exit_unlock:
22933         read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
22934 @@ -1365,6 +1450,7 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
22935         struct mhi_ring *ev_ring;
22936         struct device *dev = &mhi_cntrl->mhi_dev->dev;
22937         unsigned long flags;
22938 +       dma_addr_t ptr;
22940         dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
22942 @@ -1372,7 +1458,15 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
22944         /* mark all stale events related to channel as STALE event */
22945         spin_lock_irqsave(&mhi_event->lock, flags);
22946 -       dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
22948 +       ptr = er_ctxt->rp;
22949 +       if (!is_valid_ring_ptr(ev_ring, ptr)) {
22950 +               dev_err(&mhi_cntrl->mhi_dev->dev,
22951 +                       "Event ring rp points outside of the event ring\n");
22952 +               dev_rp = ev_ring->rp;
22953 +       } else {
22954 +               dev_rp = mhi_to_virtual(ev_ring, ptr);
22955 +       }
22957         local_rp = ev_ring->rp;
22958         while (dev_rp != local_rp) {
22959 @@ -1403,8 +1497,11 @@ static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
22960         while (tre_ring->rp != tre_ring->wp) {
22961                 struct mhi_buf_info *buf_info = buf_ring->rp;
22963 -               if (mhi_chan->dir == DMA_TO_DEVICE)
22964 +               if (mhi_chan->dir == DMA_TO_DEVICE) {
22965                         atomic_dec(&mhi_cntrl->pending_pkts);
22966 +                       /* Release the reference got from mhi_queue() */
22967 +                       mhi_cntrl->runtime_put(mhi_cntrl);
22968 +               }
22970                 if (!buf_info->pre_mapped)
22971                         mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
22972 diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
22973 index 681960c72d2a..87d3b73bcade 100644
22974 --- a/drivers/bus/mhi/core/pm.c
22975 +++ b/drivers/bus/mhi/core/pm.c
22976 @@ -377,24 +377,28 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
22978         struct mhi_event *mhi_event;
22979         struct device *dev = &mhi_cntrl->mhi_dev->dev;
22980 +       enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
22981         int i, ret;
22983         dev_dbg(dev, "Processing Mission Mode transition\n");
22985         write_lock_irq(&mhi_cntrl->pm_lock);
22986         if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
22987 -               mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
22988 +               ee = mhi_get_exec_env(mhi_cntrl);
22990 -       if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
22991 +       if (!MHI_IN_MISSION_MODE(ee)) {
22992                 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
22993                 write_unlock_irq(&mhi_cntrl->pm_lock);
22994                 wake_up_all(&mhi_cntrl->state_event);
22995                 return -EIO;
22996         }
22997 +       mhi_cntrl->ee = ee;
22998         write_unlock_irq(&mhi_cntrl->pm_lock);
23000         wake_up_all(&mhi_cntrl->state_event);
23002 +       device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee,
23003 +                             mhi_destroy_device);
23004         mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
23006         /* Force MHI to be in M0 state before continuing */
23007 @@ -755,6 +759,8 @@ void mhi_pm_st_worker(struct work_struct *work)
23008                          * either SBL or AMSS states
23009                          */
23010                         mhi_create_devices(mhi_cntrl);
23011 +                       if (mhi_cntrl->fbc_download)
23012 +                               mhi_download_amss_image(mhi_cntrl);
23013                         break;
23014                 case DEV_ST_TRANSITION_MISSION_MODE:
23015                         mhi_pm_mission_mode_transition(mhi_cntrl);
23016 @@ -1092,7 +1098,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
23017                                                            &val) ||
23018                                         !val,
23019                                 msecs_to_jiffies(mhi_cntrl->timeout_ms));
23020 -               if (ret) {
23021 +               if (!ret) {
23022                         ret = -EIO;
23023                         dev_info(dev, "Failed to reset MHI due to syserr state\n");
23024                         goto error_bhi_offset;
23025 diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
23026 index 20673a4b4a3c..ef549c695b55 100644
23027 --- a/drivers/bus/mhi/pci_generic.c
23028 +++ b/drivers/bus/mhi/pci_generic.c
23029 @@ -230,6 +230,21 @@ static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
23030         }
23033 +static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force)
23035 +       /* no-op */
23038 +static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override)
23040 +       /* no-op */
23043 +static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl)
23045 +       /* no-op */
23048  static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
23050         struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
23051 @@ -433,6 +448,9 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
23052         mhi_cntrl->status_cb = mhi_pci_status_cb;
23053         mhi_cntrl->runtime_get = mhi_pci_runtime_get;
23054         mhi_cntrl->runtime_put = mhi_pci_runtime_put;
23055 +       mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
23056 +       mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
23057 +       mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
23059         err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
23060         if (err)
23061 @@ -498,6 +516,12 @@ static void mhi_pci_remove(struct pci_dev *pdev)
23062         mhi_unregister_controller(mhi_cntrl);
23065 +static void mhi_pci_shutdown(struct pci_dev *pdev)
23067 +       mhi_pci_remove(pdev);
23068 +       pci_set_power_state(pdev, PCI_D3hot);
23071  static void mhi_pci_reset_prepare(struct pci_dev *pdev)
23073         struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
23074 @@ -668,6 +692,7 @@ static struct pci_driver mhi_pci_driver = {
23075         .id_table       = mhi_pci_id_table,
23076         .probe          = mhi_pci_probe,
23077         .remove         = mhi_pci_remove,
23078 +       .shutdown       = mhi_pci_shutdown,
23079         .err_handler    = &mhi_pci_err_handler,
23080         .driver.pm      = &mhi_pci_pm_ops
23081  };
23082 diff --git a/drivers/bus/qcom-ebi2.c b/drivers/bus/qcom-ebi2.c
23083 index 03ddcf426887..0b8f53a688b8 100644
23084 --- a/drivers/bus/qcom-ebi2.c
23085 +++ b/drivers/bus/qcom-ebi2.c
23086 @@ -353,8 +353,10 @@ static int qcom_ebi2_probe(struct platform_device *pdev)
23088                 /* Figure out the chipselect */
23089                 ret = of_property_read_u32(child, "reg", &csindex);
23090 -               if (ret)
23091 +               if (ret) {
23092 +                       of_node_put(child);
23093                         return ret;
23094 +               }
23096                 if (csindex > 5) {
23097                         dev_err(dev,
23098 diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
23099 index 3d74f237f005..68145e326eb9 100644
23100 --- a/drivers/bus/ti-sysc.c
23101 +++ b/drivers/bus/ti-sysc.c
23102 @@ -635,6 +635,51 @@ static int sysc_parse_and_check_child_range(struct sysc *ddata)
23103         return 0;
23106 +/* Interconnect instances to probe before l4_per instances */
23107 +static struct resource early_bus_ranges[] = {
23108 +       /* am3/4 l4_wkup */
23109 +       { .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
23110 +       /* omap4/5 and dra7 l4_cfg */
23111 +       { .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
23112 +       /* omap4 l4_wkup */
23113 +       { .start = 0x4a300000, .end = 0x4a300000 + 0x30000,  },
23114 +       /* omap5 and dra7 l4_wkup without dra7 dcan segment */
23115 +       { .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000,  },
23118 +static atomic_t sysc_defer = ATOMIC_INIT(10);
23120 +/**
23121 + * sysc_defer_non_critical - defer non_critical interconnect probing
23122 + * @ddata: device driver data
23123 + *
23124 + * We want to probe l4_cfg and l4_wkup interconnect instances before any
23125 + * l4_per instances as l4_per instances depend on resources on l4_cfg and
23126 + * l4_wkup interconnects.
23127 + */
23128 +static int sysc_defer_non_critical(struct sysc *ddata)
23130 +       struct resource *res;
23131 +       int i;
23133 +       if (!atomic_read(&sysc_defer))
23134 +               return 0;
23136 +       for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
23137 +               res = &early_bus_ranges[i];
23138 +               if (ddata->module_pa >= res->start &&
23139 +                   ddata->module_pa <= res->end) {
23140 +                       atomic_set(&sysc_defer, 0);
23142 +                       return 0;
23143 +               }
23144 +       }
23146 +       atomic_dec_if_positive(&sysc_defer);
23148 +       return -EPROBE_DEFER;
23151  static struct device_node *stdout_path;
23153  static void sysc_init_stdout_path(struct sysc *ddata)
23154 @@ -856,15 +901,19 @@ static int sysc_map_and_check_registers(struct sysc *ddata)
23155         struct device_node *np = ddata->dev->of_node;
23156         int error;
23158 -       if (!of_get_property(np, "reg", NULL))
23159 -               return 0;
23161         error = sysc_parse_and_check_child_range(ddata);
23162         if (error)
23163                 return error;
23165 +       error = sysc_defer_non_critical(ddata);
23166 +       if (error)
23167 +               return error;
23169         sysc_check_children(ddata);
23171 +       if (!of_get_property(np, "reg", NULL))
23172 +               return 0;
23174         error = sysc_parse_registers(ddata);
23175         if (error)
23176                 return error;
23177 diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
23178 index 9874fc1c815b..1831099306aa 100644
23179 --- a/drivers/cdrom/gdrom.c
23180 +++ b/drivers/cdrom/gdrom.c
23181 @@ -743,6 +743,13 @@ static const struct blk_mq_ops gdrom_mq_ops = {
23182  static int probe_gdrom(struct platform_device *devptr)
23184         int err;
23186 +       /*
23187 +        * Ensure our "one" device is initialized properly in case of previous
23188 +        * usages of it
23189 +        */
23190 +       memset(&gd, 0, sizeof(gd));
23192         /* Start the device */
23193         if (gdrom_execute_diagnostic() != 1) {
23194                 pr_warn("ATA Probe for GDROM failed\n");
23195 @@ -831,6 +838,8 @@ static int remove_gdrom(struct platform_device *devptr)
23196         if (gdrom_major)
23197                 unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
23198         unregister_cdrom(gd.cd_info);
23199 +       kfree(gd.cd_info);
23200 +       kfree(gd.toc);
23202         return 0;
23204 @@ -846,7 +855,7 @@ static struct platform_driver gdrom_driver = {
23205  static int __init init_gdrom(void)
23207         int rc;
23208 -       gd.toc = NULL;
23210         rc = platform_driver_register(&gdrom_driver);
23211         if (rc)
23212                 return rc;
23213 @@ -862,8 +871,6 @@ static void __exit exit_gdrom(void)
23215         platform_device_unregister(pd);
23216         platform_driver_unregister(&gdrom_driver);
23217 -       kfree(gd.toc);
23218 -       kfree(gd.cd_info);
23221  module_init(init_gdrom);
23222 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
23223 index c44ad18464f1..ca87178200e0 100644
23224 --- a/drivers/char/ipmi/ipmi_msghandler.c
23225 +++ b/drivers/char/ipmi/ipmi_msghandler.c
23226 @@ -3563,7 +3563,7 @@ static void cleanup_smi_msgs(struct ipmi_smi *intf)
23227         /* Current message first, to preserve order */
23228         while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
23229                 /* Wait for the message to clear out. */
23230 -               schedule_timeout(1);
23231 +               schedule_min_hrtimeout();
23232         }
23234         /* No need for locks, the interface is down. */
23235 diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
23236 index 0416b9c9d410..9ce5fae0f1cf 100644
23237 --- a/drivers/char/ipmi/ipmi_ssif.c
23238 +++ b/drivers/char/ipmi/ipmi_ssif.c
23239 @@ -1288,7 +1288,7 @@ static void shutdown_ssif(void *send_info)
23241         /* make sure the driver is not looking for flags any more. */
23242         while (ssif_info->ssif_state != SSIF_NORMAL)
23243 -               schedule_timeout(1);
23244 +               schedule_min_hrtimeout();
23246         ssif_info->stopping = true;
23247         del_timer_sync(&ssif_info->watch_timer);
23248 diff --git a/drivers/char/random.c b/drivers/char/random.c
23249 index 0fe9e200e4c8..5d6acfecd919 100644
23250 --- a/drivers/char/random.c
23251 +++ b/drivers/char/random.c
23252 @@ -819,7 +819,7 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng)
23254  static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
23256 -       memcpy(&crng->state[0], "expand 32-byte k", 16);
23257 +       chacha_init_consts(crng->state);
23258         _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
23259         crng_init_try_arch(crng);
23260         crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
23261 @@ -827,7 +827,7 @@ static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
23263  static void __init crng_initialize_primary(struct crng_state *crng)
23265 -       memcpy(&crng->state[0], "expand 32-byte k", 16);
23266 +       chacha_init_consts(crng->state);
23267         _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
23268         if (crng_init_try_arch_early(crng) && trust_cpu) {
23269                 invalidate_batched_entropy();
23270 diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
23271 index 3633ed70f48f..1b18ce5ebab1 100644
23272 --- a/drivers/char/tpm/eventlog/acpi.c
23273 +++ b/drivers/char/tpm/eventlog/acpi.c
23274 @@ -41,6 +41,27 @@ struct acpi_tcpa {
23275         };
23276  };
23278 +/* Check that the given log is indeed a TPM2 log. */
23279 +static bool tpm_is_tpm2_log(void *bios_event_log, u64 len)
23281 +       struct tcg_efi_specid_event_head *efispecid;
23282 +       struct tcg_pcr_event *event_header;
23283 +       int n;
23285 +       if (len < sizeof(*event_header))
23286 +               return false;
23287 +       len -= sizeof(*event_header);
23288 +       event_header = bios_event_log;
23290 +       if (len < sizeof(*efispecid))
23291 +               return false;
23292 +       efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
23294 +       n = memcmp(efispecid->signature, TCG_SPECID_SIG,
23295 +                  sizeof(TCG_SPECID_SIG));
23296 +       return n == 0;
23299  /* read binary bios log */
23300  int tpm_read_log_acpi(struct tpm_chip *chip)
23302 @@ -52,6 +73,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
23303         struct acpi_table_tpm2 *tbl;
23304         struct acpi_tpm2_phy *tpm2_phy;
23305         int format;
23306 +       int ret;
23308         log = &chip->log;
23310 @@ -112,6 +134,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
23312         log->bios_event_log_end = log->bios_event_log + len;
23314 +       ret = -EIO;
23315         virt = acpi_os_map_iomem(start, len);
23316         if (!virt)
23317                 goto err;
23318 @@ -119,11 +142,19 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
23319         memcpy_fromio(log->bios_event_log, virt, len);
23321         acpi_os_unmap_iomem(virt, len);
23323 +       if (chip->flags & TPM_CHIP_FLAG_TPM2 &&
23324 +           !tpm_is_tpm2_log(log->bios_event_log, len)) {
23325 +               /* try EFI log next */
23326 +               ret = -ENODEV;
23327 +               goto err;
23328 +       }
23330         return format;
23332  err:
23333         kfree(log->bios_event_log);
23334         log->bios_event_log = NULL;
23335 -       return -EIO;
23336 +       return ret;
23339 diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
23340 index 7460f230bae4..8512ec76d526 100644
23341 --- a/drivers/char/tpm/eventlog/common.c
23342 +++ b/drivers/char/tpm/eventlog/common.c
23343 @@ -107,6 +107,9 @@ void tpm_bios_log_setup(struct tpm_chip *chip)
23344         int log_version;
23345         int rc = 0;
23347 +       if (chip->flags & TPM_CHIP_FLAG_VIRTUAL)
23348 +               return;
23350         rc = tpm_read_log(chip);
23351         if (rc < 0)
23352                 return;
23353 diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c
23354 index 35229e5143ca..e6cb9d525e30 100644
23355 --- a/drivers/char/tpm/eventlog/efi.c
23356 +++ b/drivers/char/tpm/eventlog/efi.c
23357 @@ -17,6 +17,7 @@ int tpm_read_log_efi(struct tpm_chip *chip)
23360         struct efi_tcg2_final_events_table *final_tbl = NULL;
23361 +       int final_events_log_size = efi_tpm_final_log_size;
23362         struct linux_efi_tpm_eventlog *log_tbl;
23363         struct tpm_bios_log *log;
23364         u32 log_size;
23365 @@ -66,12 +67,12 @@ int tpm_read_log_efi(struct tpm_chip *chip)
23366         ret = tpm_log_version;
23368         if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
23369 -           efi_tpm_final_log_size == 0 ||
23370 +           final_events_log_size == 0 ||
23371             tpm_log_version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
23372                 goto out;
23374         final_tbl = memremap(efi.tpm_final_log,
23375 -                            sizeof(*final_tbl) + efi_tpm_final_log_size,
23376 +                            sizeof(*final_tbl) + final_events_log_size,
23377                              MEMREMAP_WB);
23378         if (!final_tbl) {
23379                 pr_err("Could not map UEFI TPM final log\n");
23380 @@ -80,10 +81,18 @@ int tpm_read_log_efi(struct tpm_chip *chip)
23381                 goto out;
23382         }
23384 -       efi_tpm_final_log_size -= log_tbl->final_events_preboot_size;
23385 +       /*
23386 +        * The 'final events log' size excludes the 'final events preboot log'
23387 +        * at its beginning.
23388 +        */
23389 +       final_events_log_size -= log_tbl->final_events_preboot_size;
23391 +       /*
23392 +        * Allocate memory for the 'combined log' where we will append the
23393 +        * 'final events log' to.
23394 +        */
23395         tmp = krealloc(log->bios_event_log,
23396 -                      log_size + efi_tpm_final_log_size,
23397 +                      log_size + final_events_log_size,
23398                        GFP_KERNEL);
23399         if (!tmp) {
23400                 kfree(log->bios_event_log);
23401 @@ -94,15 +103,19 @@ int tpm_read_log_efi(struct tpm_chip *chip)
23402         log->bios_event_log = tmp;
23404         /*
23405 -        * Copy any of the final events log that didn't also end up in the
23406 -        * main log. Events can be logged in both if events are generated
23407 +        * Append any of the 'final events log' that didn't also end up in the
23408 +        * 'main log'. Events can be logged in both if events are generated
23409          * between GetEventLog() and ExitBootServices().
23410          */
23411         memcpy((void *)log->bios_event_log + log_size,
23412                final_tbl->events + log_tbl->final_events_preboot_size,
23413 -              efi_tpm_final_log_size);
23414 +              final_events_log_size);
23415 +       /*
23416 +        * The size of the 'combined log' is the size of the 'main log' plus
23417 +        * the size of the 'final events log'.
23418 +        */
23419         log->bios_event_log_end = log->bios_event_log +
23420 -               log_size + efi_tpm_final_log_size;
23421 +               log_size + final_events_log_size;
23423  out:
23424         memunmap(final_tbl);
23425 diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
23426 index eff1f12d981a..c84d23951219 100644
23427 --- a/drivers/char/tpm/tpm2-cmd.c
23428 +++ b/drivers/char/tpm/tpm2-cmd.c
23429 @@ -656,6 +656,7 @@ int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
23431         if (nr_commands !=
23432             be32_to_cpup((__be32 *)&buf.data[TPM_HEADER_SIZE + 5])) {
23433 +               rc = -EFAULT;
23434                 tpm_buf_destroy(&buf);
23435                 goto out;
23436         }
23437 diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
23438 index a2e0395cbe61..55b9d3965ae1 100644
23439 --- a/drivers/char/tpm/tpm_tis_core.c
23440 +++ b/drivers/char/tpm/tpm_tis_core.c
23441 @@ -709,16 +709,14 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
23442         cap_t cap;
23443         int ret;
23445 -       /* TPM 2.0 */
23446 -       if (chip->flags & TPM_CHIP_FLAG_TPM2)
23447 -               return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
23449 -       /* TPM 1.2 */
23450         ret = request_locality(chip, 0);
23451         if (ret < 0)
23452                 return ret;
23454 -       ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
23455 +       if (chip->flags & TPM_CHIP_FLAG_TPM2)
23456 +               ret = tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
23457 +       else
23458 +               ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
23460         release_locality(chip, 0);
23462 @@ -1127,12 +1125,20 @@ int tpm_tis_resume(struct device *dev)
23463         if (ret)
23464                 return ret;
23466 -       /* TPM 1.2 requires self-test on resume. This function actually returns
23467 +       /*
23468 +        * TPM 1.2 requires self-test on resume. This function actually returns
23469          * an error code but for unknown reason it isn't handled.
23470          */
23471 -       if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
23472 +       if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
23473 +               ret = request_locality(chip, 0);
23474 +               if (ret < 0)
23475 +                       return ret;
23477                 tpm1_do_selftest(chip);
23479 +               release_locality(chip, 0);
23480 +       }
23482         return 0;
23484  EXPORT_SYMBOL_GPL(tpm_tis_resume);
23485 diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
23486 index ec9a65e7887d..f19c227d20f4 100644
23487 --- a/drivers/char/tpm/tpm_tis_i2c_cr50.c
23488 +++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
23489 @@ -483,6 +483,7 @@ static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len)
23490         expected = be32_to_cpup((__be32 *)(buf + 2));
23491         if (expected > buf_len) {
23492                 dev_err(&chip->dev, "Buffer too small to receive i2c data\n");
23493 +               rc = -E2BIG;
23494                 goto out_err;
23495         }
23497 diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
23498 index 6a0059e508e3..93f5d11c830b 100644
23499 --- a/drivers/char/ttyprintk.c
23500 +++ b/drivers/char/ttyprintk.c
23501 @@ -158,12 +158,23 @@ static int tpk_ioctl(struct tty_struct *tty,
23502         return 0;
23506 + * TTY operations hangup function.
23507 + */
23508 +static void tpk_hangup(struct tty_struct *tty)
23510 +       struct ttyprintk_port *tpkp = tty->driver_data;
23512 +       tty_port_hangup(&tpkp->port);
23515  static const struct tty_operations ttyprintk_ops = {
23516         .open = tpk_open,
23517         .close = tpk_close,
23518         .write = tpk_write,
23519         .write_room = tpk_write_room,
23520         .ioctl = tpk_ioctl,
23521 +       .hangup = tpk_hangup,
23522  };
23524  static const struct tty_port_operations null_ops = { };
23525 diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
23526 index a55b37fc2c8b..bc3be5f3eae1 100644
23527 --- a/drivers/clk/clk-ast2600.c
23528 +++ b/drivers/clk/clk-ast2600.c
23529 @@ -61,10 +61,10 @@ static void __iomem *scu_g6_base;
23530  static const struct aspeed_gate_data aspeed_g6_gates[] = {
23531         /*                                  clk rst  name               parent   flags */
23532         [ASPEED_CLK_GATE_MCLK]          = {  0, -1, "mclk-gate",        "mpll",  CLK_IS_CRITICAL }, /* SDRAM */
23533 -       [ASPEED_CLK_GATE_ECLK]          = {  1, -1, "eclk-gate",        "eclk",  0 },   /* Video Engine */
23534 +       [ASPEED_CLK_GATE_ECLK]          = {  1,  6, "eclk-gate",        "eclk",  0 },   /* Video Engine */
23535         [ASPEED_CLK_GATE_GCLK]          = {  2,  7, "gclk-gate",        NULL,    0 },   /* 2D engine */
23536         /* vclk parent - dclk/d1clk/hclk/mclk */
23537 -       [ASPEED_CLK_GATE_VCLK]          = {  3,  6, "vclk-gate",        NULL,    0 },   /* Video Capture */
23538 +       [ASPEED_CLK_GATE_VCLK]          = {  3, -1, "vclk-gate",        NULL,    0 },   /* Video Capture */
23539         [ASPEED_CLK_GATE_BCLK]          = {  4,  8, "bclk-gate",        "bclk",  0 }, /* PCIe/PCI */
23540         /* From dpll */
23541         [ASPEED_CLK_GATE_DCLK]          = {  5, -1, "dclk-gate",        NULL,    CLK_IS_CRITICAL }, /* DAC */
23542 diff --git a/drivers/clk/imx/clk-imx25.c b/drivers/clk/imx/clk-imx25.c
23543 index a66cabfbf94f..66192fe0a898 100644
23544 --- a/drivers/clk/imx/clk-imx25.c
23545 +++ b/drivers/clk/imx/clk-imx25.c
23546 @@ -73,16 +73,6 @@ enum mx25_clks {
23548  static struct clk *clk[clk_max];
23550 -static struct clk ** const uart_clks[] __initconst = {
23551 -       &clk[uart_ipg_per],
23552 -       &clk[uart1_ipg],
23553 -       &clk[uart2_ipg],
23554 -       &clk[uart3_ipg],
23555 -       &clk[uart4_ipg],
23556 -       &clk[uart5_ipg],
23557 -       NULL
23560  static int __init __mx25_clocks_init(void __iomem *ccm_base)
23562         BUG_ON(!ccm_base);
23563 @@ -228,7 +218,7 @@ static int __init __mx25_clocks_init(void __iomem *ccm_base)
23564          */
23565         clk_set_parent(clk[cko_sel], clk[ipg]);
23567 -       imx_register_uart_clocks(uart_clks);
23568 +       imx_register_uart_clocks(6);
23570         return 0;
23572 diff --git a/drivers/clk/imx/clk-imx27.c b/drivers/clk/imx/clk-imx27.c
23573 index 5585ded8b8c6..56a5fc402b10 100644
23574 --- a/drivers/clk/imx/clk-imx27.c
23575 +++ b/drivers/clk/imx/clk-imx27.c
23576 @@ -49,17 +49,6 @@ static const char *ssi_sel_clks[] = { "spll_gate", "mpll", };
23577  static struct clk *clk[IMX27_CLK_MAX];
23578  static struct clk_onecell_data clk_data;
23580 -static struct clk ** const uart_clks[] __initconst = {
23581 -       &clk[IMX27_CLK_PER1_GATE],
23582 -       &clk[IMX27_CLK_UART1_IPG_GATE],
23583 -       &clk[IMX27_CLK_UART2_IPG_GATE],
23584 -       &clk[IMX27_CLK_UART3_IPG_GATE],
23585 -       &clk[IMX27_CLK_UART4_IPG_GATE],
23586 -       &clk[IMX27_CLK_UART5_IPG_GATE],
23587 -       &clk[IMX27_CLK_UART6_IPG_GATE],
23588 -       NULL
23591  static void __init _mx27_clocks_init(unsigned long fref)
23593         BUG_ON(!ccm);
23594 @@ -176,7 +165,7 @@ static void __init _mx27_clocks_init(unsigned long fref)
23596         clk_prepare_enable(clk[IMX27_CLK_EMI_AHB_GATE]);
23598 -       imx_register_uart_clocks(uart_clks);
23599 +       imx_register_uart_clocks(7);
23601         imx_print_silicon_rev("i.MX27", mx27_revision());
23603 diff --git a/drivers/clk/imx/clk-imx35.c b/drivers/clk/imx/clk-imx35.c
23604 index c1df03665c09..0fe5ac210156 100644
23605 --- a/drivers/clk/imx/clk-imx35.c
23606 +++ b/drivers/clk/imx/clk-imx35.c
23607 @@ -82,14 +82,6 @@ enum mx35_clks {
23609  static struct clk *clk[clk_max];
23611 -static struct clk ** const uart_clks[] __initconst = {
23612 -       &clk[ipg],
23613 -       &clk[uart1_gate],
23614 -       &clk[uart2_gate],
23615 -       &clk[uart3_gate],
23616 -       NULL
23619  static void __init _mx35_clocks_init(void)
23621         void __iomem *base;
23622 @@ -243,7 +235,7 @@ static void __init _mx35_clocks_init(void)
23623          */
23624         clk_prepare_enable(clk[scc_gate]);
23626 -       imx_register_uart_clocks(uart_clks);
23627 +       imx_register_uart_clocks(4);
23629         imx_print_silicon_rev("i.MX35", mx35_revision());
23631 diff --git a/drivers/clk/imx/clk-imx5.c b/drivers/clk/imx/clk-imx5.c
23632 index 01e079b81026..e4493846454d 100644
23633 --- a/drivers/clk/imx/clk-imx5.c
23634 +++ b/drivers/clk/imx/clk-imx5.c
23635 @@ -128,30 +128,6 @@ static const char *ieee1588_sels[] = { "pll3_sw", "pll4_sw", "dummy" /* usbphy2_
23636  static struct clk *clk[IMX5_CLK_END];
23637  static struct clk_onecell_data clk_data;
23639 -static struct clk ** const uart_clks_mx51[] __initconst = {
23640 -       &clk[IMX5_CLK_UART1_IPG_GATE],
23641 -       &clk[IMX5_CLK_UART1_PER_GATE],
23642 -       &clk[IMX5_CLK_UART2_IPG_GATE],
23643 -       &clk[IMX5_CLK_UART2_PER_GATE],
23644 -       &clk[IMX5_CLK_UART3_IPG_GATE],
23645 -       &clk[IMX5_CLK_UART3_PER_GATE],
23646 -       NULL
23649 -static struct clk ** const uart_clks_mx50_mx53[] __initconst = {
23650 -       &clk[IMX5_CLK_UART1_IPG_GATE],
23651 -       &clk[IMX5_CLK_UART1_PER_GATE],
23652 -       &clk[IMX5_CLK_UART2_IPG_GATE],
23653 -       &clk[IMX5_CLK_UART2_PER_GATE],
23654 -       &clk[IMX5_CLK_UART3_IPG_GATE],
23655 -       &clk[IMX5_CLK_UART3_PER_GATE],
23656 -       &clk[IMX5_CLK_UART4_IPG_GATE],
23657 -       &clk[IMX5_CLK_UART4_PER_GATE],
23658 -       &clk[IMX5_CLK_UART5_IPG_GATE],
23659 -       &clk[IMX5_CLK_UART5_PER_GATE],
23660 -       NULL
23663  static void __init mx5_clocks_common_init(void __iomem *ccm_base)
23665         clk[IMX5_CLK_DUMMY]             = imx_clk_fixed("dummy", 0);
23666 @@ -382,7 +358,7 @@ static void __init mx50_clocks_init(struct device_node *np)
23667         r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
23668         clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
23670 -       imx_register_uart_clocks(uart_clks_mx50_mx53);
23671 +       imx_register_uart_clocks(5);
23673  CLK_OF_DECLARE(imx50_ccm, "fsl,imx50-ccm", mx50_clocks_init);
23675 @@ -488,7 +464,7 @@ static void __init mx51_clocks_init(struct device_node *np)
23676         val |= 1 << 23;
23677         writel(val, MXC_CCM_CLPCR);
23679 -       imx_register_uart_clocks(uart_clks_mx51);
23680 +       imx_register_uart_clocks(3);
23682  CLK_OF_DECLARE(imx51_ccm, "fsl,imx51-ccm", mx51_clocks_init);
23684 @@ -633,6 +609,6 @@ static void __init mx53_clocks_init(struct device_node *np)
23685         r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
23686         clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
23688 -       imx_register_uart_clocks(uart_clks_mx50_mx53);
23689 +       imx_register_uart_clocks(5);
23691  CLK_OF_DECLARE(imx53_ccm, "fsl,imx53-ccm", mx53_clocks_init);
23692 diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
23693 index 521d6136d22c..496900de0b0b 100644
23694 --- a/drivers/clk/imx/clk-imx6q.c
23695 +++ b/drivers/clk/imx/clk-imx6q.c
23696 @@ -140,13 +140,6 @@ static inline int clk_on_imx6dl(void)
23697         return of_machine_is_compatible("fsl,imx6dl");
23700 -static const int uart_clk_ids[] __initconst = {
23701 -       IMX6QDL_CLK_UART_IPG,
23702 -       IMX6QDL_CLK_UART_SERIAL,
23705 -static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
23707  static int ldb_di_sel_by_clock_id(int clock_id)
23709         switch (clock_id) {
23710 @@ -440,7 +433,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
23711         struct device_node *np;
23712         void __iomem *anatop_base, *base;
23713         int ret;
23714 -       int i;
23716         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
23717                                           IMX6QDL_CLK_END), GFP_KERNEL);
23718 @@ -982,12 +974,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
23719                                hws[IMX6QDL_CLK_PLL3_USB_OTG]->clk);
23720         }
23722 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
23723 -               int index = uart_clk_ids[i];
23725 -               uart_clks[i] = &hws[index]->clk;
23726 -       }
23728 -       imx_register_uart_clocks(uart_clks);
23729 +       imx_register_uart_clocks(1);
23731  CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init);
23732 diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
23733 index 29eab05c9068..277365970320 100644
23734 --- a/drivers/clk/imx/clk-imx6sl.c
23735 +++ b/drivers/clk/imx/clk-imx6sl.c
23736 @@ -179,19 +179,11 @@ void imx6sl_set_wait_clk(bool enter)
23737                 imx6sl_enable_pll_arm(false);
23740 -static const int uart_clk_ids[] __initconst = {
23741 -       IMX6SL_CLK_UART,
23742 -       IMX6SL_CLK_UART_SERIAL,
23745 -static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
23747  static void __init imx6sl_clocks_init(struct device_node *ccm_node)
23749         struct device_node *np;
23750         void __iomem *base;
23751         int ret;
23752 -       int i;
23754         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
23755                                           IMX6SL_CLK_END), GFP_KERNEL);
23756 @@ -448,12 +440,6 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
23757         clk_set_parent(hws[IMX6SL_CLK_LCDIF_AXI_SEL]->clk,
23758                        hws[IMX6SL_CLK_PLL2_PFD2]->clk);
23760 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
23761 -               int index = uart_clk_ids[i];
23763 -               uart_clks[i] = &hws[index]->clk;
23764 -       }
23766 -       imx_register_uart_clocks(uart_clks);
23767 +       imx_register_uart_clocks(2);
23769  CLK_OF_DECLARE(imx6sl, "fsl,imx6sl-ccm", imx6sl_clocks_init);
23770 diff --git a/drivers/clk/imx/clk-imx6sll.c b/drivers/clk/imx/clk-imx6sll.c
23771 index 8e8288bda4d0..31d777f30039 100644
23772 --- a/drivers/clk/imx/clk-imx6sll.c
23773 +++ b/drivers/clk/imx/clk-imx6sll.c
23774 @@ -76,26 +76,10 @@ static u32 share_count_ssi1;
23775  static u32 share_count_ssi2;
23776  static u32 share_count_ssi3;
23778 -static const int uart_clk_ids[] __initconst = {
23779 -       IMX6SLL_CLK_UART1_IPG,
23780 -       IMX6SLL_CLK_UART1_SERIAL,
23781 -       IMX6SLL_CLK_UART2_IPG,
23782 -       IMX6SLL_CLK_UART2_SERIAL,
23783 -       IMX6SLL_CLK_UART3_IPG,
23784 -       IMX6SLL_CLK_UART3_SERIAL,
23785 -       IMX6SLL_CLK_UART4_IPG,
23786 -       IMX6SLL_CLK_UART4_SERIAL,
23787 -       IMX6SLL_CLK_UART5_IPG,
23788 -       IMX6SLL_CLK_UART5_SERIAL,
23791 -static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
23793  static void __init imx6sll_clocks_init(struct device_node *ccm_node)
23795         struct device_node *np;
23796         void __iomem *base;
23797 -       int i;
23799         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
23800                                           IMX6SLL_CLK_END), GFP_KERNEL);
23801 @@ -356,13 +340,7 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
23803         of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
23805 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
23806 -               int index = uart_clk_ids[i];
23808 -               uart_clks[i] = &hws[index]->clk;
23809 -       }
23811 -       imx_register_uart_clocks(uart_clks);
23812 +       imx_register_uart_clocks(5);
23814         /* Lower the AHB clock rate before changing the clock source. */
23815         clk_set_rate(hws[IMX6SLL_CLK_AHB]->clk, 99000000);
23816 diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
23817 index 20dcce526d07..fc1bd23d4583 100644
23818 --- a/drivers/clk/imx/clk-imx6sx.c
23819 +++ b/drivers/clk/imx/clk-imx6sx.c
23820 @@ -117,18 +117,10 @@ static u32 share_count_ssi3;
23821  static u32 share_count_sai1;
23822  static u32 share_count_sai2;
23824 -static const int uart_clk_ids[] __initconst = {
23825 -       IMX6SX_CLK_UART_IPG,
23826 -       IMX6SX_CLK_UART_SERIAL,
23829 -static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
23831  static void __init imx6sx_clocks_init(struct device_node *ccm_node)
23833         struct device_node *np;
23834         void __iomem *base;
23835 -       int i;
23837         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
23838                                           IMX6SX_CLK_CLK_END), GFP_KERNEL);
23839 @@ -556,12 +548,6 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
23840         clk_set_parent(hws[IMX6SX_CLK_QSPI1_SEL]->clk, hws[IMX6SX_CLK_PLL2_BUS]->clk);
23841         clk_set_parent(hws[IMX6SX_CLK_QSPI2_SEL]->clk, hws[IMX6SX_CLK_PLL2_BUS]->clk);
23843 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
23844 -               int index = uart_clk_ids[i];
23846 -               uart_clks[i] = &hws[index]->clk;
23847 -       }
23849 -       imx_register_uart_clocks(uart_clks);
23850 +       imx_register_uart_clocks(2);
23852  CLK_OF_DECLARE(imx6sx, "fsl,imx6sx-ccm", imx6sx_clocks_init);
23853 diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
23854 index 22d24a6a05e7..c4e0f1c07192 100644
23855 --- a/drivers/clk/imx/clk-imx7d.c
23856 +++ b/drivers/clk/imx/clk-imx7d.c
23857 @@ -377,23 +377,10 @@ static const char *pll_video_bypass_sel[] = { "pll_video_main", "pll_video_main_
23858  static struct clk_hw **hws;
23859  static struct clk_hw_onecell_data *clk_hw_data;
23861 -static const int uart_clk_ids[] __initconst = {
23862 -       IMX7D_UART1_ROOT_CLK,
23863 -       IMX7D_UART2_ROOT_CLK,
23864 -       IMX7D_UART3_ROOT_CLK,
23865 -       IMX7D_UART4_ROOT_CLK,
23866 -       IMX7D_UART5_ROOT_CLK,
23867 -       IMX7D_UART6_ROOT_CLK,
23868 -       IMX7D_UART7_ROOT_CLK,
23871 -static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
23873  static void __init imx7d_clocks_init(struct device_node *ccm_node)
23875         struct device_node *np;
23876         void __iomem *base;
23877 -       int i;
23879         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
23880                                           IMX7D_CLK_END), GFP_KERNEL);
23881 @@ -897,14 +884,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
23882         hws[IMX7D_USB1_MAIN_480M_CLK] = imx_clk_hw_fixed_factor("pll_usb1_main_clk", "osc", 20, 1);
23883         hws[IMX7D_USB_MAIN_480M_CLK] = imx_clk_hw_fixed_factor("pll_usb_main_clk", "osc", 20, 1);
23885 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
23886 -               int index = uart_clk_ids[i];
23888 -               uart_clks[i] = &hws[index]->clk;
23889 -       }
23892 -       imx_register_uart_clocks(uart_clks);
23893 +       imx_register_uart_clocks(7);
23896  CLK_OF_DECLARE(imx7d, "fsl,imx7d-ccm", imx7d_clocks_init);
23897 diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
23898 index 634c0b6636b0..779e09105da7 100644
23899 --- a/drivers/clk/imx/clk-imx7ulp.c
23900 +++ b/drivers/clk/imx/clk-imx7ulp.c
23901 @@ -43,19 +43,6 @@ static const struct clk_div_table ulp_div_table[] = {
23902         { /* sentinel */ },
23903  };
23905 -static const int pcc2_uart_clk_ids[] __initconst = {
23906 -       IMX7ULP_CLK_LPUART4,
23907 -       IMX7ULP_CLK_LPUART5,
23910 -static const int pcc3_uart_clk_ids[] __initconst = {
23911 -       IMX7ULP_CLK_LPUART6,
23912 -       IMX7ULP_CLK_LPUART7,
23915 -static struct clk **pcc2_uart_clks[ARRAY_SIZE(pcc2_uart_clk_ids) + 1] __initdata;
23916 -static struct clk **pcc3_uart_clks[ARRAY_SIZE(pcc3_uart_clk_ids) + 1] __initdata;
23918  static void __init imx7ulp_clk_scg1_init(struct device_node *np)
23920         struct clk_hw_onecell_data *clk_data;
23921 @@ -150,7 +137,6 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
23922         struct clk_hw_onecell_data *clk_data;
23923         struct clk_hw **hws;
23924         void __iomem *base;
23925 -       int i;
23927         clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC2_END),
23928                            GFP_KERNEL);
23929 @@ -190,13 +176,7 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
23931         of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
23933 -       for (i = 0; i < ARRAY_SIZE(pcc2_uart_clk_ids); i++) {
23934 -               int index = pcc2_uart_clk_ids[i];
23936 -               pcc2_uart_clks[i] = &hws[index]->clk;
23937 -       }
23939 -       imx_register_uart_clocks(pcc2_uart_clks);
23940 +       imx_register_uart_clocks(2);
23942  CLK_OF_DECLARE(imx7ulp_clk_pcc2, "fsl,imx7ulp-pcc2", imx7ulp_clk_pcc2_init);
23944 @@ -205,7 +185,6 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
23945         struct clk_hw_onecell_data *clk_data;
23946         struct clk_hw **hws;
23947         void __iomem *base;
23948 -       int i;
23950         clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC3_END),
23951                            GFP_KERNEL);
23952 @@ -244,13 +223,7 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
23954         of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
23956 -       for (i = 0; i < ARRAY_SIZE(pcc3_uart_clk_ids); i++) {
23957 -               int index = pcc3_uart_clk_ids[i];
23959 -               pcc3_uart_clks[i] = &hws[index]->clk;
23960 -       }
23962 -       imx_register_uart_clocks(pcc3_uart_clks);
23963 +       imx_register_uart_clocks(7);
23965  CLK_OF_DECLARE(imx7ulp_clk_pcc3, "fsl,imx7ulp-pcc3", imx7ulp_clk_pcc3_init);
23967 diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
23968 index 6a01eec36dd0..f1919fafb124 100644
23969 --- a/drivers/clk/imx/clk-imx8mm.c
23970 +++ b/drivers/clk/imx/clk-imx8mm.c
23971 @@ -296,20 +296,12 @@ static const char * const clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "
23972  static struct clk_hw_onecell_data *clk_hw_data;
23973  static struct clk_hw **hws;
23975 -static const int uart_clk_ids[] = {
23976 -       IMX8MM_CLK_UART1_ROOT,
23977 -       IMX8MM_CLK_UART2_ROOT,
23978 -       IMX8MM_CLK_UART3_ROOT,
23979 -       IMX8MM_CLK_UART4_ROOT,
23981 -static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
23983  static int imx8mm_clocks_probe(struct platform_device *pdev)
23985         struct device *dev = &pdev->dev;
23986         struct device_node *np = dev->of_node;
23987         void __iomem *base;
23988 -       int ret, i;
23989 +       int ret;
23991         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
23992                                           IMX8MM_CLK_END), GFP_KERNEL);
23993 @@ -634,13 +626,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
23994                 goto unregister_hws;
23995         }
23997 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
23998 -               int index = uart_clk_ids[i];
24000 -               uart_hws[i] = &hws[index]->clk;
24001 -       }
24003 -       imx_register_uart_clocks(uart_hws);
24004 +       imx_register_uart_clocks(4);
24006         return 0;
24008 diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
24009 index 324c5fd0aa04..88f6630cd472 100644
24010 --- a/drivers/clk/imx/clk-imx8mn.c
24011 +++ b/drivers/clk/imx/clk-imx8mn.c
24012 @@ -289,20 +289,12 @@ static const char * const clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "
24013  static struct clk_hw_onecell_data *clk_hw_data;
24014  static struct clk_hw **hws;
24016 -static const int uart_clk_ids[] = {
24017 -       IMX8MN_CLK_UART1_ROOT,
24018 -       IMX8MN_CLK_UART2_ROOT,
24019 -       IMX8MN_CLK_UART3_ROOT,
24020 -       IMX8MN_CLK_UART4_ROOT,
24022 -static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
24024  static int imx8mn_clocks_probe(struct platform_device *pdev)
24026         struct device *dev = &pdev->dev;
24027         struct device_node *np = dev->of_node;
24028         void __iomem *base;
24029 -       int ret, i;
24030 +       int ret;
24032         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
24033                                           IMX8MN_CLK_END), GFP_KERNEL);
24034 @@ -585,13 +577,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
24035                 goto unregister_hws;
24036         }
24038 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
24039 -               int index = uart_clk_ids[i];
24041 -               uart_hws[i] = &hws[index]->clk;
24042 -       }
24044 -       imx_register_uart_clocks(uart_hws);
24045 +       imx_register_uart_clocks(4);
24047         return 0;
24049 diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
24050 index 2f4e1d674e1c..3e6557e7d559 100644
24051 --- a/drivers/clk/imx/clk-imx8mp.c
24052 +++ b/drivers/clk/imx/clk-imx8mp.c
24053 @@ -414,20 +414,11 @@ static const char * const imx8mp_dram_core_sels[] = {"dram_pll_out", "dram_alt_r
24054  static struct clk_hw **hws;
24055  static struct clk_hw_onecell_data *clk_hw_data;
24057 -static const int uart_clk_ids[] = {
24058 -       IMX8MP_CLK_UART1_ROOT,
24059 -       IMX8MP_CLK_UART2_ROOT,
24060 -       IMX8MP_CLK_UART3_ROOT,
24061 -       IMX8MP_CLK_UART4_ROOT,
24063 -static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1];
24065  static int imx8mp_clocks_probe(struct platform_device *pdev)
24067         struct device *dev = &pdev->dev;
24068         struct device_node *np;
24069         void __iomem *anatop_base, *ccm_base;
24070 -       int i;
24072         np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop");
24073         anatop_base = of_iomap(np, 0);
24074 @@ -737,13 +728,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
24076         of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
24078 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
24079 -               int index = uart_clk_ids[i];
24081 -               uart_clks[i] = &hws[index]->clk;
24082 -       }
24084 -       imx_register_uart_clocks(uart_clks);
24085 +       imx_register_uart_clocks(4);
24087         return 0;
24089 diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
24090 index 4dd4ae9d022b..3e1a10d3f55c 100644
24091 --- a/drivers/clk/imx/clk-imx8mq.c
24092 +++ b/drivers/clk/imx/clk-imx8mq.c
24093 @@ -281,20 +281,12 @@ static const char * const pllout_monitor_sels[] = {"osc_25m", "osc_27m", "dummy"
24094  static struct clk_hw_onecell_data *clk_hw_data;
24095  static struct clk_hw **hws;
24097 -static const int uart_clk_ids[] = {
24098 -       IMX8MQ_CLK_UART1_ROOT,
24099 -       IMX8MQ_CLK_UART2_ROOT,
24100 -       IMX8MQ_CLK_UART3_ROOT,
24101 -       IMX8MQ_CLK_UART4_ROOT,
24103 -static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
24105  static int imx8mq_clocks_probe(struct platform_device *pdev)
24107         struct device *dev = &pdev->dev;
24108         struct device_node *np = dev->of_node;
24109         void __iomem *base;
24110 -       int err, i;
24111 +       int err;
24113         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
24114                                           IMX8MQ_CLK_END), GFP_KERNEL);
24115 @@ -629,13 +621,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
24116                 goto unregister_hws;
24117         }
24119 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
24120 -               int index = uart_clk_ids[i];
24122 -               uart_hws[i] = &hws[index]->clk;
24123 -       }
24125 -       imx_register_uart_clocks(uart_hws);
24126 +       imx_register_uart_clocks(4);
24128         return 0;
24130 diff --git a/drivers/clk/imx/clk.c b/drivers/clk/imx/clk.c
24131 index 47882c51cb85..7cc669934253 100644
24132 --- a/drivers/clk/imx/clk.c
24133 +++ b/drivers/clk/imx/clk.c
24134 @@ -147,8 +147,10 @@ void imx_cscmr1_fixup(u32 *val)
24137  #ifndef MODULE
24138 -static int imx_keep_uart_clocks;
24139 -static struct clk ** const *imx_uart_clocks;
24141 +static bool imx_keep_uart_clocks;
24142 +static int imx_enabled_uart_clocks;
24143 +static struct clk **imx_uart_clocks;
24145  static int __init imx_keep_uart_clocks_param(char *str)
24147 @@ -161,24 +163,45 @@ __setup_param("earlycon", imx_keep_uart_earlycon,
24148  __setup_param("earlyprintk", imx_keep_uart_earlyprintk,
24149               imx_keep_uart_clocks_param, 0);
24151 -void imx_register_uart_clocks(struct clk ** const clks[])
24152 +void imx_register_uart_clocks(unsigned int clk_count)
24154 +       imx_enabled_uart_clocks = 0;
24156 +/* i.MX boards use device trees now.  For build tests without CONFIG_OF, do nothing */
24157 +#ifdef CONFIG_OF
24158         if (imx_keep_uart_clocks) {
24159                 int i;
24161 -               imx_uart_clocks = clks;
24162 -               for (i = 0; imx_uart_clocks[i]; i++)
24163 -                       clk_prepare_enable(*imx_uart_clocks[i]);
24164 +               imx_uart_clocks = kcalloc(clk_count, sizeof(struct clk *), GFP_KERNEL);
24166 +               if (!of_stdout)
24167 +                       return;
24169 +               for (i = 0; i < clk_count; i++) {
24170 +                       imx_uart_clocks[imx_enabled_uart_clocks] = of_clk_get(of_stdout, i);
24172 +                       /* Stop if there are no more of_stdout references */
24173 +                       if (IS_ERR(imx_uart_clocks[imx_enabled_uart_clocks]))
24174 +                               return;
24176 +                       /* Only enable the clock if it's not NULL */
24177 +                       if (imx_uart_clocks[imx_enabled_uart_clocks])
24178 +                               clk_prepare_enable(imx_uart_clocks[imx_enabled_uart_clocks++]);
24179 +               }
24180         }
24181 +#endif
24184  static int __init imx_clk_disable_uart(void)
24186 -       if (imx_keep_uart_clocks && imx_uart_clocks) {
24187 +       if (imx_keep_uart_clocks && imx_enabled_uart_clocks) {
24188                 int i;
24190 -               for (i = 0; imx_uart_clocks[i]; i++)
24191 -                       clk_disable_unprepare(*imx_uart_clocks[i]);
24192 +               for (i = 0; i < imx_enabled_uart_clocks; i++) {
24193 +                       clk_disable_unprepare(imx_uart_clocks[i]);
24194 +                       clk_put(imx_uart_clocks[i]);
24195 +               }
24196 +               kfree(imx_uart_clocks);
24197         }
24199         return 0;
24200 diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
24201 index 4f04c8287286..7571603bee23 100644
24202 --- a/drivers/clk/imx/clk.h
24203 +++ b/drivers/clk/imx/clk.h
24204 @@ -11,9 +11,9 @@ extern spinlock_t imx_ccm_lock;
24205  void imx_check_clocks(struct clk *clks[], unsigned int count);
24206  void imx_check_clk_hws(struct clk_hw *clks[], unsigned int count);
24207  #ifndef MODULE
24208 -void imx_register_uart_clocks(struct clk ** const clks[]);
24209 +void imx_register_uart_clocks(unsigned int clk_count);
24210  #else
24211 -static inline void imx_register_uart_clocks(struct clk ** const clks[])
24212 +static inline void imx_register_uart_clocks(unsigned int clk_count)
24215  #endif
24216 diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
24217 index f5746f9ea929..32ac6b6b7530 100644
24218 --- a/drivers/clk/mvebu/armada-37xx-periph.c
24219 +++ b/drivers/clk/mvebu/armada-37xx-periph.c
24220 @@ -84,6 +84,7 @@ struct clk_pm_cpu {
24221         void __iomem *reg_div;
24222         u8 shift_div;
24223         struct regmap *nb_pm_base;
24224 +       unsigned long l1_expiration;
24225  };
24227  #define to_clk_double_div(_hw) container_of(_hw, struct clk_double_div, hw)
24228 @@ -440,33 +441,6 @@ static u8 clk_pm_cpu_get_parent(struct clk_hw *hw)
24229         return val;
24232 -static int clk_pm_cpu_set_parent(struct clk_hw *hw, u8 index)
24234 -       struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
24235 -       struct regmap *base = pm_cpu->nb_pm_base;
24236 -       int load_level;
24238 -       /*
24239 -        * We set the clock parent only if the DVFS is available but
24240 -        * not enabled.
24241 -        */
24242 -       if (IS_ERR(base) || armada_3700_pm_dvfs_is_enabled(base))
24243 -               return -EINVAL;
24245 -       /* Set the parent clock for all the load level */
24246 -       for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) {
24247 -               unsigned int reg, mask,  val,
24248 -                       offset = ARMADA_37XX_NB_TBG_SEL_OFF;
24250 -               armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
24252 -               val = index << offset;
24253 -               mask = ARMADA_37XX_NB_TBG_SEL_MASK << offset;
24254 -               regmap_update_bits(base, reg, mask, val);
24255 -       }
24256 -       return 0;
24259  static unsigned long clk_pm_cpu_recalc_rate(struct clk_hw *hw,
24260                                             unsigned long parent_rate)
24262 @@ -514,8 +488,10 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
24265  /*
24266 - * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz
24267 - * respectively) to L0 frequency (1.2 Ghz) requires a significant
24268 + * Workaround when base CPU frequnecy is 1000 or 1200 MHz
24269 + *
24270 + * Switching the CPU from the L2 or L3 frequencies (250/300 or 200 MHz
24271 + * respectively) to L0 frequency (1/1.2 GHz) requires a significant
24272   * amount of time to let VDD stabilize to the appropriate
24273   * voltage. This amount of time is large enough that it cannot be
24274   * covered by the hardware countdown register. Due to this, the CPU
24275 @@ -525,26 +501,56 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
24276   * To work around this problem, we prevent switching directly from the
24277   * L2/L3 frequencies to the L0 frequency, and instead switch to the L1
24278   * frequency in-between. The sequence therefore becomes:
24279 - * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ)
24280 + * 1. First switch from L2/L3 (200/250/300 MHz) to L1 (500/600 MHz)
24281   * 2. Sleep 20ms for stabling VDD voltage
24282 - * 3. Then switch from L1(600MHZ) to L0(1200Mhz).
24283 + * 3. Then switch from L1 (500/600 MHz) to L0 (1000/1200 MHz).
24284   */
24285 -static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base)
24286 +static void clk_pm_cpu_set_rate_wa(struct clk_pm_cpu *pm_cpu,
24287 +                                  unsigned int new_level, unsigned long rate,
24288 +                                  struct regmap *base)
24290         unsigned int cur_level;
24292 -       if (rate != 1200 * 1000 * 1000)
24293 -               return;
24295         regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
24296         cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
24297 -       if (cur_level <= ARMADA_37XX_DVFS_LOAD_1)
24299 +       if (cur_level == new_level)
24300 +               return;
24302 +       /*
24303 +        * System wants to go to L1 on its own. If we are going from L2/L3,
24304 +        * remember when 20ms will expire. If from L0, set the value so that
24305 +        * next switch to L0 won't have to wait.
24306 +        */
24307 +       if (new_level == ARMADA_37XX_DVFS_LOAD_1) {
24308 +               if (cur_level == ARMADA_37XX_DVFS_LOAD_0)
24309 +                       pm_cpu->l1_expiration = jiffies;
24310 +               else
24311 +                       pm_cpu->l1_expiration = jiffies + msecs_to_jiffies(20);
24312                 return;
24313 +       }
24315 +       /*
24316 +        * If we are setting to L2/L3, just invalidate L1 expiration time,
24317 +        * sleeping is not needed.
24318 +        */
24319 +       if (rate < 1000*1000*1000)
24320 +               goto invalidate_l1_exp;
24322 +       /*
24323 +        * We are going to L0 with rate >= 1GHz. Check whether we have been at
24324 +        * L1 for long enough time. If not, go to L1 for 20ms.
24325 +        */
24326 +       if (pm_cpu->l1_expiration && jiffies >= pm_cpu->l1_expiration)
24327 +               goto invalidate_l1_exp;
24329         regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
24330                            ARMADA_37XX_NB_CPU_LOAD_MASK,
24331                            ARMADA_37XX_DVFS_LOAD_1);
24332         msleep(20);
24334 +invalidate_l1_exp:
24335 +       pm_cpu->l1_expiration = 0;
24338  static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
24339 @@ -578,7 +584,9 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
24340                         reg = ARMADA_37XX_NB_CPU_LOAD;
24341                         mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
24343 -                       clk_pm_cpu_set_rate_wa(rate, base);
24344 +                       /* Apply workaround when base CPU frequency is 1000 or 1200 MHz */
24345 +                       if (parent_rate >= 1000*1000*1000)
24346 +                               clk_pm_cpu_set_rate_wa(pm_cpu, load_level, rate, base);
24348                         regmap_update_bits(base, reg, mask, load_level);
24350 @@ -592,7 +600,6 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
24352  static const struct clk_ops clk_pm_cpu_ops = {
24353         .get_parent = clk_pm_cpu_get_parent,
24354 -       .set_parent = clk_pm_cpu_set_parent,
24355         .round_rate = clk_pm_cpu_round_rate,
24356         .set_rate = clk_pm_cpu_set_rate,
24357         .recalc_rate = clk_pm_cpu_recalc_rate,
24358 diff --git a/drivers/clk/qcom/a53-pll.c b/drivers/clk/qcom/a53-pll.c
24359 index 45cfc57bff92..af6ac17c7dae 100644
24360 --- a/drivers/clk/qcom/a53-pll.c
24361 +++ b/drivers/clk/qcom/a53-pll.c
24362 @@ -93,6 +93,7 @@ static const struct of_device_id qcom_a53pll_match_table[] = {
24363         { .compatible = "qcom,msm8916-a53pll" },
24364         { }
24365  };
24366 +MODULE_DEVICE_TABLE(of, qcom_a53pll_match_table);
24368  static struct platform_driver qcom_a53pll_driver = {
24369         .probe = qcom_a53pll_probe,
24370 diff --git a/drivers/clk/qcom/a7-pll.c b/drivers/clk/qcom/a7-pll.c
24371 index e171d3caf2cf..c4a53e5db229 100644
24372 --- a/drivers/clk/qcom/a7-pll.c
24373 +++ b/drivers/clk/qcom/a7-pll.c
24374 @@ -86,6 +86,7 @@ static const struct of_device_id qcom_a7pll_match_table[] = {
24375         { .compatible = "qcom,sdx55-a7pll" },
24376         { }
24377  };
24378 +MODULE_DEVICE_TABLE(of, qcom_a7pll_match_table);
24380  static struct platform_driver qcom_a7pll_driver = {
24381         .probe = qcom_a7pll_probe,
24382 diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
24383 index 30be87fb222a..bef7899ad0d6 100644
24384 --- a/drivers/clk/qcom/apss-ipq-pll.c
24385 +++ b/drivers/clk/qcom/apss-ipq-pll.c
24386 @@ -81,6 +81,7 @@ static const struct of_device_id apss_ipq_pll_match_table[] = {
24387         { .compatible = "qcom,ipq6018-a53pll" },
24388         { }
24389  };
24390 +MODULE_DEVICE_TABLE(of, apss_ipq_pll_match_table);
24392  static struct platform_driver apss_ipq_pll_driver = {
24393         .probe = apss_ipq_pll_probe,
24394 diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
24395 index 87ee1bad9a9a..4a5d2a914bd6 100644
24396 --- a/drivers/clk/samsung/clk-exynos7.c
24397 +++ b/drivers/clk/samsung/clk-exynos7.c
24398 @@ -537,8 +537,13 @@ static const struct samsung_gate_clock top1_gate_clks[] __initconst = {
24399         GATE(CLK_ACLK_FSYS0_200, "aclk_fsys0_200", "dout_aclk_fsys0_200",
24400                 ENABLE_ACLK_TOP13, 28, CLK_SET_RATE_PARENT |
24401                 CLK_IS_CRITICAL, 0),
24402 +       /*
24403 +        * This clock is required for the CMU_FSYS1 registers access, keep it
24404 +        * enabled permanently until proper runtime PM support is added.
24405 +        */
24406         GATE(CLK_ACLK_FSYS1_200, "aclk_fsys1_200", "dout_aclk_fsys1_200",
24407 -               ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT, 0),
24408 +               ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT |
24409 +               CLK_IS_CRITICAL, 0),
24411         GATE(CLK_SCLK_PHY_FSYS1_26M, "sclk_phy_fsys1_26m",
24412                 "dout_sclk_phy_fsys1_26m", ENABLE_SCLK_TOP1_FSYS11,
24413 diff --git a/drivers/clk/socfpga/clk-gate-a10.c b/drivers/clk/socfpga/clk-gate-a10.c
24414 index cd5df9103614..d62778884208 100644
24415 --- a/drivers/clk/socfpga/clk-gate-a10.c
24416 +++ b/drivers/clk/socfpga/clk-gate-a10.c
24417 @@ -146,6 +146,7 @@ static void __init __socfpga_gate_init(struct device_node *node,
24418                 if (IS_ERR(socfpga_clk->sys_mgr_base_addr)) {
24419                         pr_err("%s: failed to find altr,sys-mgr regmap!\n",
24420                                         __func__);
24421 +                       kfree(socfpga_clk);
24422                         return;
24423                 }
24424         }
24425 diff --git a/drivers/clk/uniphier/clk-uniphier-mux.c b/drivers/clk/uniphier/clk-uniphier-mux.c
24426 index 462c84321b2d..1998e9d4cfc0 100644
24427 --- a/drivers/clk/uniphier/clk-uniphier-mux.c
24428 +++ b/drivers/clk/uniphier/clk-uniphier-mux.c
24429 @@ -31,10 +31,10 @@ static int uniphier_clk_mux_set_parent(struct clk_hw *hw, u8 index)
24430  static u8 uniphier_clk_mux_get_parent(struct clk_hw *hw)
24432         struct uniphier_clk_mux *mux = to_uniphier_clk_mux(hw);
24433 -       int num_parents = clk_hw_get_num_parents(hw);
24434 +       unsigned int num_parents = clk_hw_get_num_parents(hw);
24435         int ret;
24436         unsigned int val;
24437 -       u8 i;
24438 +       unsigned int i;
24440         ret = regmap_read(mux->regmap, mux->reg, &val);
24441         if (ret)
24442 diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c
24443 index 92f449ed38e5..abe6afbf3407 100644
24444 --- a/drivers/clk/zynqmp/pll.c
24445 +++ b/drivers/clk/zynqmp/pll.c
24446 @@ -14,10 +14,12 @@
24447   * struct zynqmp_pll - PLL clock
24448   * @hw:                Handle between common and hardware-specific interfaces
24449   * @clk_id:    PLL clock ID
24450 + * @set_pll_mode:      Whether an IOCTL_SET_PLL_FRAC_MODE request be sent to ATF
24451   */
24452  struct zynqmp_pll {
24453         struct clk_hw hw;
24454         u32 clk_id;
24455 +       bool set_pll_mode;
24456  };
24458  #define to_zynqmp_pll(_hw)     container_of(_hw, struct zynqmp_pll, hw)
24459 @@ -81,6 +83,8 @@ static inline void zynqmp_pll_set_mode(struct clk_hw *hw, bool on)
24460         if (ret)
24461                 pr_warn_once("%s() PLL set frac mode failed for %s, ret = %d\n",
24462                              __func__, clk_name, ret);
24463 +       else
24464 +               clk->set_pll_mode = true;
24467  /**
24468 @@ -100,9 +104,7 @@ static long zynqmp_pll_round_rate(struct clk_hw *hw, unsigned long rate,
24469         /* Enable the fractional mode if needed */
24470         rate_div = (rate * FRAC_DIV) / *prate;
24471         f = rate_div % FRAC_DIV;
24472 -       zynqmp_pll_set_mode(hw, !!f);
24474 -       if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) {
24475 +       if (f) {
24476                 if (rate > PS_PLL_VCO_MAX) {
24477                         fbdiv = rate / PS_PLL_VCO_MAX;
24478                         rate = rate / (fbdiv + 1);
24479 @@ -173,10 +175,12 @@ static int zynqmp_pll_set_rate(struct clk_hw *hw, unsigned long rate,
24480         long rate_div, frac, m, f;
24481         int ret;
24483 -       if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) {
24484 -               rate_div = (rate * FRAC_DIV) / parent_rate;
24485 +       rate_div = (rate * FRAC_DIV) / parent_rate;
24486 +       f = rate_div % FRAC_DIV;
24487 +       zynqmp_pll_set_mode(hw, !!f);
24489 +       if (f) {
24490                 m = rate_div / FRAC_DIV;
24491 -               f = rate_div % FRAC_DIV;
24492                 m = clamp_t(u32, m, (PLL_FBDIV_MIN), (PLL_FBDIV_MAX));
24493                 rate = parent_rate * m;
24494                 frac = (parent_rate * f) / FRAC_DIV;
24495 @@ -240,9 +244,15 @@ static int zynqmp_pll_enable(struct clk_hw *hw)
24496         u32 clk_id = clk->clk_id;
24497         int ret;
24499 -       if (zynqmp_pll_is_enabled(hw))
24500 +       /*
24501 +        * Don't skip enabling clock if there is an IOCTL_SET_PLL_FRAC_MODE request
24502 +        * that has been sent to ATF.
24503 +        */
24504 +       if (zynqmp_pll_is_enabled(hw) && (!clk->set_pll_mode))
24505                 return 0;
24507 +       clk->set_pll_mode = false;
24509         ret = zynqmp_pm_clock_enable(clk_id);
24510         if (ret)
24511                 pr_warn_once("%s() clock enable failed for %s, ret = %d\n",
24512 diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
24513 index 42e7e43b8fcd..b1e2b697b21b 100644
24514 --- a/drivers/clocksource/dw_apb_timer_of.c
24515 +++ b/drivers/clocksource/dw_apb_timer_of.c
24516 @@ -52,18 +52,34 @@ static int __init timer_get_base_and_rate(struct device_node *np,
24517                 return 0;
24519         timer_clk = of_clk_get_by_name(np, "timer");
24520 -       if (IS_ERR(timer_clk))
24521 -               return PTR_ERR(timer_clk);
24522 +       if (IS_ERR(timer_clk)) {
24523 +               ret = PTR_ERR(timer_clk);
24524 +               goto out_pclk_disable;
24525 +       }
24527         ret = clk_prepare_enable(timer_clk);
24528         if (ret)
24529 -               return ret;
24530 +               goto out_timer_clk_put;
24532         *rate = clk_get_rate(timer_clk);
24533 -       if (!(*rate))
24534 -               return -EINVAL;
24535 +       if (!(*rate)) {
24536 +               ret = -EINVAL;
24537 +               goto out_timer_clk_disable;
24538 +       }
24540         return 0;
24542 +out_timer_clk_disable:
24543 +       clk_disable_unprepare(timer_clk);
24544 +out_timer_clk_put:
24545 +       clk_put(timer_clk);
24546 +out_pclk_disable:
24547 +       if (!IS_ERR(pclk)) {
24548 +               clk_disable_unprepare(pclk);
24549 +               clk_put(pclk);
24550 +       }
24551 +       iounmap(*base);
24552 +       return ret;
24555  static int __init add_clockevent(struct device_node *event_timer)
24556 diff --git a/drivers/clocksource/ingenic-ost.c b/drivers/clocksource/ingenic-ost.c
24557 index 029efc2731b4..6af2470136bd 100644
24558 --- a/drivers/clocksource/ingenic-ost.c
24559 +++ b/drivers/clocksource/ingenic-ost.c
24560 @@ -88,9 +88,9 @@ static int __init ingenic_ost_probe(struct platform_device *pdev)
24561                 return PTR_ERR(ost->regs);
24563         map = device_node_to_regmap(dev->parent->of_node);
24564 -       if (!map) {
24565 +       if (IS_ERR(map)) {
24566                 dev_err(dev, "regmap not found");
24567 -               return -EINVAL;
24568 +               return PTR_ERR(map);
24569         }
24571         ost->clk = devm_clk_get(dev, "ost");
24572 diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
24573 index 33b3e8aa2cc5..b6f97960d8ee 100644
24574 --- a/drivers/clocksource/timer-ti-dm-systimer.c
24575 +++ b/drivers/clocksource/timer-ti-dm-systimer.c
24576 @@ -2,6 +2,7 @@
24577  #include <linux/clk.h>
24578  #include <linux/clocksource.h>
24579  #include <linux/clockchips.h>
24580 +#include <linux/cpuhotplug.h>
24581  #include <linux/interrupt.h>
24582  #include <linux/io.h>
24583  #include <linux/iopoll.h>
24584 @@ -449,13 +450,13 @@ static int dmtimer_set_next_event(unsigned long cycles,
24585         struct dmtimer_systimer *t = &clkevt->t;
24586         void __iomem *pend = t->base + t->pend;
24588 -       writel_relaxed(0xffffffff - cycles, t->base + t->counter);
24589         while (readl_relaxed(pend) & WP_TCRR)
24590                 cpu_relax();
24591 +       writel_relaxed(0xffffffff - cycles, t->base + t->counter);
24593 -       writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
24594         while (readl_relaxed(pend) & WP_TCLR)
24595                 cpu_relax();
24596 +       writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
24598         return 0;
24600 @@ -490,18 +491,18 @@ static int dmtimer_set_periodic(struct clock_event_device *evt)
24601         dmtimer_clockevent_shutdown(evt);
24603         /* Looks like we need to first set the load value separately */
24604 -       writel_relaxed(clkevt->period, t->base + t->load);
24605         while (readl_relaxed(pend) & WP_TLDR)
24606                 cpu_relax();
24607 +       writel_relaxed(clkevt->period, t->base + t->load);
24609 -       writel_relaxed(clkevt->period, t->base + t->counter);
24610         while (readl_relaxed(pend) & WP_TCRR)
24611                 cpu_relax();
24612 +       writel_relaxed(clkevt->period, t->base + t->counter);
24614 -       writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
24615 -                      t->base + t->ctrl);
24616         while (readl_relaxed(pend) & WP_TCLR)
24617                 cpu_relax();
24618 +       writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
24619 +                      t->base + t->ctrl);
24621         return 0;
24623 @@ -530,17 +531,17 @@ static void omap_clockevent_unidle(struct clock_event_device *evt)
24624         writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
24627 -static int __init dmtimer_clockevent_init(struct device_node *np)
24628 +static int __init dmtimer_clkevt_init_common(struct dmtimer_clockevent *clkevt,
24629 +                                            struct device_node *np,
24630 +                                            unsigned int features,
24631 +                                            const struct cpumask *cpumask,
24632 +                                            const char *name,
24633 +                                            int rating)
24635 -       struct dmtimer_clockevent *clkevt;
24636         struct clock_event_device *dev;
24637         struct dmtimer_systimer *t;
24638         int error;
24640 -       clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
24641 -       if (!clkevt)
24642 -               return -ENOMEM;
24644         t = &clkevt->t;
24645         dev = &clkevt->dev;
24647 @@ -548,24 +549,23 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
24648          * We mostly use cpuidle_coupled with ARM local timers for runtime,
24649          * so there's probably no use for CLOCK_EVT_FEAT_DYNIRQ here.
24650          */
24651 -       dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
24652 -       dev->rating = 300;
24653 +       dev->features = features;
24654 +       dev->rating = rating;
24655         dev->set_next_event = dmtimer_set_next_event;
24656         dev->set_state_shutdown = dmtimer_clockevent_shutdown;
24657         dev->set_state_periodic = dmtimer_set_periodic;
24658         dev->set_state_oneshot = dmtimer_clockevent_shutdown;
24659 +       dev->set_state_oneshot_stopped = dmtimer_clockevent_shutdown;
24660         dev->tick_resume = dmtimer_clockevent_shutdown;
24661 -       dev->cpumask = cpu_possible_mask;
24662 +       dev->cpumask = cpumask;
24664         dev->irq = irq_of_parse_and_map(np, 0);
24665 -       if (!dev->irq) {
24666 -               error = -ENXIO;
24667 -               goto err_out_free;
24668 -       }
24669 +       if (!dev->irq)
24670 +               return -ENXIO;
24672         error = dmtimer_systimer_setup(np, &clkevt->t);
24673         if (error)
24674 -               goto err_out_free;
24675 +               return error;
24677         clkevt->period = 0xffffffff - DIV_ROUND_CLOSEST(t->rate, HZ);
24679 @@ -577,38 +577,132 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
24680         writel_relaxed(OMAP_TIMER_CTRL_POSTED, t->base + t->ifctrl);
24682         error = request_irq(dev->irq, dmtimer_clockevent_interrupt,
24683 -                           IRQF_TIMER, "clockevent", clkevt);
24684 +                           IRQF_TIMER, name, clkevt);
24685         if (error)
24686                 goto err_out_unmap;
24688         writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
24689         writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
24691 -       pr_info("TI gptimer clockevent: %s%lu Hz at %pOF\n",
24692 -               of_find_property(np, "ti,timer-alwon", NULL) ?
24693 +       pr_info("TI gptimer %s: %s%lu Hz at %pOF\n",
24694 +               name, of_find_property(np, "ti,timer-alwon", NULL) ?
24695                 "always-on " : "", t->rate, np->parent);
24697 -       clockevents_config_and_register(dev, t->rate,
24698 -                                       3, /* Timer internal resynch latency */
24699 +       return 0;
24701 +err_out_unmap:
24702 +       iounmap(t->base);
24704 +       return error;
24707 +static int __init dmtimer_clockevent_init(struct device_node *np)
24709 +       struct dmtimer_clockevent *clkevt;
24710 +       int error;
24712 +       clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
24713 +       if (!clkevt)
24714 +               return -ENOMEM;
24716 +       error = dmtimer_clkevt_init_common(clkevt, np,
24717 +                                          CLOCK_EVT_FEAT_PERIODIC |
24718 +                                          CLOCK_EVT_FEAT_ONESHOT,
24719 +                                          cpu_possible_mask, "clockevent",
24720 +                                          300);
24721 +       if (error)
24722 +               goto err_out_free;
24724 +       clockevents_config_and_register(&clkevt->dev, clkevt->t.rate,
24725 +                                       3, /* Timer internal resync latency */
24726                                         0xffffffff);
24728         if (of_machine_is_compatible("ti,am33xx") ||
24729             of_machine_is_compatible("ti,am43")) {
24730 -               dev->suspend = omap_clockevent_idle;
24731 -               dev->resume = omap_clockevent_unidle;
24732 +               clkevt->dev.suspend = omap_clockevent_idle;
24733 +               clkevt->dev.resume = omap_clockevent_unidle;
24734         }
24736         return 0;
24738 -err_out_unmap:
24739 -       iounmap(t->base);
24741  err_out_free:
24742         kfree(clkevt);
24744         return error;
24747 +/* Dmtimer as percpu timer. See dra7 ARM architected timer wrap erratum i940 */
24748 +static DEFINE_PER_CPU(struct dmtimer_clockevent, dmtimer_percpu_timer);
24750 +static int __init dmtimer_percpu_timer_init(struct device_node *np, int cpu)
24752 +       struct dmtimer_clockevent *clkevt;
24753 +       int error;
24755 +       if (!cpu_possible(cpu))
24756 +               return -EINVAL;
24758 +       if (!of_property_read_bool(np->parent, "ti,no-reset-on-init") ||
24759 +           !of_property_read_bool(np->parent, "ti,no-idle"))
24760 +               pr_warn("Incomplete dtb for percpu dmtimer %pOF\n", np->parent);
24762 +       clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
24764 +       error = dmtimer_clkevt_init_common(clkevt, np, CLOCK_EVT_FEAT_ONESHOT,
24765 +                                          cpumask_of(cpu), "percpu-dmtimer",
24766 +                                          500);
24767 +       if (error)
24768 +               return error;
24770 +       return 0;
24773 +/* See TRM for timer internal resynch latency */
24774 +static int omap_dmtimer_starting_cpu(unsigned int cpu)
24776 +       struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
24777 +       struct clock_event_device *dev = &clkevt->dev;
24778 +       struct dmtimer_systimer *t = &clkevt->t;
24780 +       clockevents_config_and_register(dev, t->rate, 3, ULONG_MAX);
24781 +       irq_force_affinity(dev->irq, cpumask_of(cpu));
24783 +       return 0;
24786 +static int __init dmtimer_percpu_timer_startup(void)
24788 +       struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, 0);
24789 +       struct dmtimer_systimer *t = &clkevt->t;
24791 +       if (t->sysc) {
24792 +               cpuhp_setup_state(CPUHP_AP_TI_GP_TIMER_STARTING,
24793 +                                 "clockevents/omap/gptimer:starting",
24794 +                                 omap_dmtimer_starting_cpu, NULL);
24795 +       }
24797 +       return 0;
24799 +subsys_initcall(dmtimer_percpu_timer_startup);
24801 +static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa)
24803 +       struct device_node *arm_timer;
24805 +       arm_timer = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
24806 +       if (of_device_is_available(arm_timer)) {
24807 +               pr_warn_once("ARM architected timer wrap issue i940 detected\n");
24808 +               return 0;
24809 +       }
24811 +       if (pa == 0x48034000)           /* dra7 dmtimer3 */
24812 +               return dmtimer_percpu_timer_init(np, 0);
24813 +       else if (pa == 0x48036000)      /* dra7 dmtimer4 */
24814 +               return dmtimer_percpu_timer_init(np, 1);
24816 +       return 0;
24819  /* Clocksource */
24820  static struct dmtimer_clocksource *
24821  to_dmtimer_clocksource(struct clocksource *cs)
24822 @@ -742,6 +836,9 @@ static int __init dmtimer_systimer_init(struct device_node *np)
24823         if (clockevent == pa)
24824                 return dmtimer_clockevent_init(np);
24826 +       if (of_machine_is_compatible("ti,dra7"))
24827 +               return dmtimer_percpu_quirk_init(np, pa);
24829         return 0;
24832 diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
24833 index d1bbc16fba4b..7e7450453714 100644
24834 --- a/drivers/cpufreq/acpi-cpufreq.c
24835 +++ b/drivers/cpufreq/acpi-cpufreq.c
24836 @@ -646,7 +646,11 @@ static u64 get_max_boost_ratio(unsigned int cpu)
24837                 return 0;
24838         }
24840 -       highest_perf = perf_caps.highest_perf;
24841 +       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
24842 +               highest_perf = amd_get_highest_perf();
24843 +       else
24844 +               highest_perf = perf_caps.highest_perf;
24846         nominal_perf = perf_caps.nominal_perf;
24848         if (!highest_perf || !nominal_perf) {
24849 diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
24850 index b4af4094309b..e4782f562e7a 100644
24851 --- a/drivers/cpufreq/armada-37xx-cpufreq.c
24852 +++ b/drivers/cpufreq/armada-37xx-cpufreq.c
24853 @@ -25,6 +25,10 @@
24855  #include "cpufreq-dt.h"
24857 +/* Clk register set */
24858 +#define ARMADA_37XX_CLK_TBG_SEL                0
24859 +#define ARMADA_37XX_CLK_TBG_SEL_CPU_OFF        22
24861  /* Power management in North Bridge register set */
24862  #define ARMADA_37XX_NB_L0L1    0x18
24863  #define ARMADA_37XX_NB_L2L3    0x1C
24864 @@ -69,6 +73,8 @@
24865  #define LOAD_LEVEL_NR  4
24867  #define MIN_VOLT_MV 1000
24868 +#define MIN_VOLT_MV_FOR_L1_1000MHZ 1108
24869 +#define MIN_VOLT_MV_FOR_L1_1200MHZ 1155
24871  /*  AVS value for the corresponding voltage (in mV) */
24872  static int avs_map[] = {
24873 @@ -120,10 +126,15 @@ static struct armada_37xx_dvfs *armada_37xx_cpu_freq_info_get(u32 freq)
24874   * will be configured then the DVFS will be enabled.
24875   */
24876  static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
24877 -                                                struct clk *clk, u8 *divider)
24878 +                                                struct regmap *clk_base, u8 *divider)
24880 +       u32 cpu_tbg_sel;
24881         int load_lvl;
24882 -       struct clk *parent;
24884 +       /* Determine to which TBG clock is CPU connected */
24885 +       regmap_read(clk_base, ARMADA_37XX_CLK_TBG_SEL, &cpu_tbg_sel);
24886 +       cpu_tbg_sel >>= ARMADA_37XX_CLK_TBG_SEL_CPU_OFF;
24887 +       cpu_tbg_sel &= ARMADA_37XX_NB_TBG_SEL_MASK;
24889         for (load_lvl = 0; load_lvl < LOAD_LEVEL_NR; load_lvl++) {
24890                 unsigned int reg, mask, val, offset = 0;
24891 @@ -142,6 +153,11 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
24892                 mask = (ARMADA_37XX_NB_CLK_SEL_MASK
24893                         << ARMADA_37XX_NB_CLK_SEL_OFF);
24895 +               /* Set TBG index, for all levels we use the same TBG */
24896 +               val = cpu_tbg_sel << ARMADA_37XX_NB_TBG_SEL_OFF;
24897 +               mask = (ARMADA_37XX_NB_TBG_SEL_MASK
24898 +                       << ARMADA_37XX_NB_TBG_SEL_OFF);
24900                 /*
24901                  * Set cpu divider based on the pre-computed array in
24902                  * order to have balanced step.
24903 @@ -160,14 +176,6 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
24905                 regmap_update_bits(base, reg, mask, val);
24906         }
24908 -       /*
24909 -        * Set cpu clock source, for all the level we keep the same
24910 -        * clock source that the one already configured. For this one
24911 -        * we need to use the clock framework
24912 -        */
24913 -       parent = clk_get_parent(clk);
24914 -       clk_set_parent(clk, parent);
24917  /*
24918 @@ -202,6 +210,8 @@ static u32 armada_37xx_avs_val_match(int target_vm)
24919   * - L2 & L3 voltage should be about 150mv smaller than L0 voltage.
24920   * This function calculates L1 & L2 & L3 AVS values dynamically based
24921   * on L0 voltage and fill all AVS values to the AVS value table.
24922 + * When base CPU frequency is 1000 or 1200 MHz then there is additional
24923 + * minimal avs value for load L1.
24924   */
24925  static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
24926                                                 struct armada_37xx_dvfs *dvfs)
24927 @@ -233,6 +243,19 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
24928                 for (load_level = 1; load_level < LOAD_LEVEL_NR; load_level++)
24929                         dvfs->avs[load_level] = avs_min;
24931 +               /*
24932 +                * Set the avs values for load L0 and L1 when base CPU frequency
24933 +                * is 1000/1200 MHz to its typical initial values according to
24934 +                * the Armada 3700 Hardware Specifications.
24935 +                */
24936 +               if (dvfs->cpu_freq_max >= 1000*1000*1000) {
24937 +                       if (dvfs->cpu_freq_max >= 1200*1000*1000)
24938 +                               avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ);
24939 +                       else
24940 +                               avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ);
24941 +                       dvfs->avs[0] = dvfs->avs[1] = avs_min;
24942 +               }
24944                 return;
24945         }
24947 @@ -252,6 +275,26 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
24948         target_vm = avs_map[l0_vdd_min] - 150;
24949         target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
24950         dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm);
24952 +       /*
24953 +        * Fix the avs value for load L1 when base CPU frequency is 1000/1200 MHz,
24954 +        * otherwise the CPU gets stuck when switching from load L1 to load L0.
24955 +        * Also ensure that avs value for load L1 is not higher than for L0.
24956 +        */
24957 +       if (dvfs->cpu_freq_max >= 1000*1000*1000) {
24958 +               u32 avs_min_l1;
24960 +               if (dvfs->cpu_freq_max >= 1200*1000*1000)
24961 +                       avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ);
24962 +               else
24963 +                       avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ);
24965 +               if (avs_min_l1 > dvfs->avs[0])
24966 +                       avs_min_l1 = dvfs->avs[0];
24968 +               if (dvfs->avs[1] < avs_min_l1)
24969 +                       dvfs->avs[1] = avs_min_l1;
24970 +       }
24973  static void __init armada37xx_cpufreq_avs_setup(struct regmap *base,
24974 @@ -358,11 +401,16 @@ static int __init armada37xx_cpufreq_driver_init(void)
24975         struct platform_device *pdev;
24976         unsigned long freq;
24977         unsigned int cur_frequency, base_frequency;
24978 -       struct regmap *nb_pm_base, *avs_base;
24979 +       struct regmap *nb_clk_base, *nb_pm_base, *avs_base;
24980         struct device *cpu_dev;
24981         int load_lvl, ret;
24982         struct clk *clk, *parent;
24984 +       nb_clk_base =
24985 +               syscon_regmap_lookup_by_compatible("marvell,armada-3700-periph-clock-nb");
24986 +       if (IS_ERR(nb_clk_base))
24987 +               return -ENODEV;
24989         nb_pm_base =
24990                 syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
24992 @@ -421,7 +469,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
24993                 return -EINVAL;
24994         }
24996 -       dvfs = armada_37xx_cpu_freq_info_get(cur_frequency);
24997 +       dvfs = armada_37xx_cpu_freq_info_get(base_frequency);
24998         if (!dvfs) {
24999                 clk_put(clk);
25000                 return -EINVAL;
25001 @@ -439,7 +487,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
25002         armada37xx_cpufreq_avs_configure(avs_base, dvfs);
25003         armada37xx_cpufreq_avs_setup(avs_base, dvfs);
25005 -       armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider);
25006 +       armada37xx_cpufreq_dvfs_setup(nb_pm_base, nb_clk_base, dvfs->divider);
25007         clk_put(clk);
25009         for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
25010 @@ -473,7 +521,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
25011  remove_opp:
25012         /* clean-up the already added opp before leaving */
25013         while (load_lvl-- > ARMADA_37XX_DVFS_LOAD_0) {
25014 -               freq = cur_frequency / dvfs->divider[load_lvl];
25015 +               freq = base_frequency / dvfs->divider[load_lvl];
25016                 dev_pm_opp_remove(cpu_dev, freq);
25017         }
25019 diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
25020 index aa39ff31ec9f..b3eae5ec17b2 100644
25021 --- a/drivers/cpufreq/cpufreq_conservative.c
25022 +++ b/drivers/cpufreq/cpufreq_conservative.c
25023 @@ -28,8 +28,8 @@ struct cs_dbs_tuners {
25024  };
25026  /* Conservative governor macros */
25027 -#define DEF_FREQUENCY_UP_THRESHOLD             (80)
25028 -#define DEF_FREQUENCY_DOWN_THRESHOLD           (20)
25029 +#define DEF_FREQUENCY_UP_THRESHOLD             (63)
25030 +#define DEF_FREQUENCY_DOWN_THRESHOLD           (26)
25031  #define DEF_FREQUENCY_STEP                     (5)
25032  #define DEF_SAMPLING_DOWN_FACTOR               (1)
25033  #define MAX_SAMPLING_DOWN_FACTOR               (10)
25034 @@ -47,9 +47,9 @@ static inline unsigned int get_freq_step(struct cs_dbs_tuners *cs_tuners,
25037  /*
25038 - * Every sampling_rate, we check, if current idle time is less than 20%
25039 + * Every sampling_rate, we check, if current idle time is less than 37%
25040   * (default), then we try to increase frequency. Every sampling_rate *
25041 - * sampling_down_factor, we check, if current idle time is more than 80%
25042 + * sampling_down_factor, we check, if current idle time is more than 74%
25043   * (default), then we try to decrease frequency
25044   *
25045   * Frequency updates happen at minimum steps of 5% (default) of maximum
25046 diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
25047 index ac361a8b1d3b..611d80122336 100644
25048 --- a/drivers/cpufreq/cpufreq_ondemand.c
25049 +++ b/drivers/cpufreq/cpufreq_ondemand.c
25050 @@ -18,10 +18,10 @@
25051  #include "cpufreq_ondemand.h"
25053  /* On-demand governor macros */
25054 -#define DEF_FREQUENCY_UP_THRESHOLD             (80)
25055 -#define DEF_SAMPLING_DOWN_FACTOR               (1)
25056 +#define DEF_FREQUENCY_UP_THRESHOLD             (63)
25057 +#define DEF_SAMPLING_DOWN_FACTOR               (100)
25058  #define MAX_SAMPLING_DOWN_FACTOR               (100000)
25059 -#define MICRO_FREQUENCY_UP_THRESHOLD           (95)
25060 +#define MICRO_FREQUENCY_UP_THRESHOLD           (70)
25061  #define MICRO_FREQUENCY_MIN_SAMPLE_RATE                (10000)
25062  #define MIN_FREQUENCY_UP_THRESHOLD             (1)
25063  #define MAX_FREQUENCY_UP_THRESHOLD             (100)
25064 @@ -127,7 +127,7 @@ static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
25067  /*
25068 - * Every sampling_rate, we check, if current idle time is less than 20%
25069 + * Every sampling_rate, we check, if current idle time is less than 37%
25070   * (default), then we try to increase frequency. Else, we adjust the frequency
25071   * proportional to load.
25072   */
25073 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
25074 index 5175ae3cac44..34196c107de6 100644
25075 --- a/drivers/cpufreq/intel_pstate.c
25076 +++ b/drivers/cpufreq/intel_pstate.c
25077 @@ -3054,6 +3054,14 @@ static const struct x86_cpu_id hwp_support_ids[] __initconst = {
25078         {}
25079  };
25081 +static bool intel_pstate_hwp_is_enabled(void)
25083 +       u64 value;
25085 +       rdmsrl(MSR_PM_ENABLE, value);
25086 +       return !!(value & 0x1);
25089  static int __init intel_pstate_init(void)
25091         const struct x86_cpu_id *id;
25092 @@ -3072,8 +3080,12 @@ static int __init intel_pstate_init(void)
25093                  * Avoid enabling HWP for processors without EPP support,
25094                  * because that means incomplete HWP implementation which is a
25095                  * corner case and supporting it is generally problematic.
25096 +                *
25097 +                * If HWP is enabled already, though, there is no choice but to
25098 +                * deal with it.
25099                  */
25100 -               if (!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) {
25101 +               if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) ||
25102 +                   intel_pstate_hwp_is_enabled()) {
25103                         hwp_active++;
25104                         hwp_mode_bdw = id->driver_data;
25105                         intel_pstate.attr = hwp_cpufreq_attrs;
25106 diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
25107 index 0844fadc4be8..334f83e56120 100644
25108 --- a/drivers/cpuidle/Kconfig.arm
25109 +++ b/drivers/cpuidle/Kconfig.arm
25110 @@ -107,7 +107,7 @@ config ARM_TEGRA_CPUIDLE
25112  config ARM_QCOM_SPM_CPUIDLE
25113         bool "CPU Idle Driver for Qualcomm Subsystem Power Manager (SPM)"
25114 -       depends on (ARCH_QCOM || COMPILE_TEST) && !ARM64
25115 +       depends on (ARCH_QCOM || COMPILE_TEST) && !ARM64 && MMU
25116         select ARM_CPU_SUSPEND
25117         select CPU_IDLE_MULTIPLE_DRIVERS
25118         select DT_IDLE_STATES
25119 diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
25120 index 191966dc8d02..29c5e83500d3 100644
25121 --- a/drivers/cpuidle/cpuidle-tegra.c
25122 +++ b/drivers/cpuidle/cpuidle-tegra.c
25123 @@ -135,13 +135,13 @@ static int tegra_cpuidle_c7_enter(void)
25125         int err;
25127 -       if (tegra_cpuidle_using_firmware()) {
25128 -               err = call_firmware_op(prepare_idle, TF_PM_MODE_LP2_NOFLUSH_L2);
25129 -               if (err)
25130 -                       return err;
25131 +       err = call_firmware_op(prepare_idle, TF_PM_MODE_LP2_NOFLUSH_L2);
25132 +       if (err && err != -ENOSYS)
25133 +               return err;
25135 -               return call_firmware_op(do_idle, 0);
25136 -       }
25137 +       err = call_firmware_op(do_idle, 0);
25138 +       if (err != -ENOSYS)
25139 +               return err;
25141         return cpu_suspend(0, tegra30_pm_secondary_cpu_suspend);
25143 diff --git a/drivers/crypto/allwinner/Kconfig b/drivers/crypto/allwinner/Kconfig
25144 index 856fb2045656..b8e75210a0e3 100644
25145 --- a/drivers/crypto/allwinner/Kconfig
25146 +++ b/drivers/crypto/allwinner/Kconfig
25147 @@ -71,10 +71,10 @@ config CRYPTO_DEV_SUN8I_CE_DEBUG
25148  config CRYPTO_DEV_SUN8I_CE_HASH
25149         bool "Enable support for hash on sun8i-ce"
25150         depends on CRYPTO_DEV_SUN8I_CE
25151 -       select MD5
25152 -       select SHA1
25153 -       select SHA256
25154 -       select SHA512
25155 +       select CRYPTO_MD5
25156 +       select CRYPTO_SHA1
25157 +       select CRYPTO_SHA256
25158 +       select CRYPTO_SHA512
25159         help
25160           Say y to enable support for hash algorithms.
25162 @@ -132,8 +132,8 @@ config CRYPTO_DEV_SUN8I_SS_PRNG
25163  config CRYPTO_DEV_SUN8I_SS_HASH
25164         bool "Enable support for hash on sun8i-ss"
25165         depends on CRYPTO_DEV_SUN8I_SS
25166 -       select MD5
25167 -       select SHA1
25168 -       select SHA256
25169 +       select CRYPTO_MD5
25170 +       select CRYPTO_SHA1
25171 +       select CRYPTO_SHA256
25172         help
25173           Say y to enable support for hash algorithms.
25174 diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
25175 index c2e6f5ed1d79..dec79fa3ebaf 100644
25176 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
25177 +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
25178 @@ -561,7 +561,7 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
25179                                     sizeof(struct sun4i_cipher_req_ctx) +
25180                                     crypto_skcipher_reqsize(op->fallback_tfm));
25182 -       err = pm_runtime_get_sync(op->ss->dev);
25183 +       err = pm_runtime_resume_and_get(op->ss->dev);
25184         if (err < 0)
25185                 goto error_pm;
25187 diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
25188 index 709905ec4680..02a2d34845f2 100644
25189 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
25190 +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
25191 @@ -459,7 +459,7 @@ static int sun4i_ss_probe(struct platform_device *pdev)
25192          * this info could be useful
25193          */
25195 -       err = pm_runtime_get_sync(ss->dev);
25196 +       err = pm_runtime_resume_and_get(ss->dev);
25197         if (err < 0)
25198                 goto error_pm;
25200 diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
25201 index c1b4585e9bbc..d28292762b32 100644
25202 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
25203 +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
25204 @@ -27,7 +27,7 @@ int sun4i_hash_crainit(struct crypto_tfm *tfm)
25205         algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
25206         op->ss = algt->ss;
25208 -       err = pm_runtime_get_sync(op->ss->dev);
25209 +       err = pm_runtime_resume_and_get(op->ss->dev);
25210         if (err < 0)
25211                 return err;
25213 diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
25214 index 443160a114bb..491fcb7b81b4 100644
25215 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
25216 +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
25217 @@ -29,7 +29,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
25218         algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
25219         ss = algt->ss;
25221 -       err = pm_runtime_get_sync(ss->dev);
25222 +       err = pm_runtime_resume_and_get(ss->dev);
25223         if (err < 0)
25224                 return err;
25226 diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
25227 index 158422ff5695..00194d1d9ae6 100644
25228 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
25229 +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
25230 @@ -932,7 +932,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
25231         if (err)
25232                 goto error_alg;
25234 -       err = pm_runtime_get_sync(ce->dev);
25235 +       err = pm_runtime_resume_and_get(ce->dev);
25236         if (err < 0)
25237                 goto error_alg;
25239 diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
25240 index ed2a69f82e1c..7c355bc2fb06 100644
25241 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
25242 +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
25243 @@ -351,7 +351,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
25244         op->enginectx.op.prepare_request = NULL;
25245         op->enginectx.op.unprepare_request = NULL;
25247 -       err = pm_runtime_get_sync(op->ss->dev);
25248 +       err = pm_runtime_resume_and_get(op->ss->dev);
25249         if (err < 0) {
25250                 dev_err(op->ss->dev, "pm error %d\n", err);
25251                 goto error_pm;
25252 diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
25253 index e0ddc684798d..80e89066dbd1 100644
25254 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
25255 +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
25256 @@ -753,7 +753,7 @@ static int sun8i_ss_probe(struct platform_device *pdev)
25257         if (err)
25258                 goto error_alg;
25260 -       err = pm_runtime_get_sync(ss->dev);
25261 +       err = pm_runtime_resume_and_get(ss->dev);
25262         if (err < 0)
25263                 goto error_alg;
25265 diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
25266 index 11cbcbc83a7b..64446b86c927 100644
25267 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
25268 +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
25269 @@ -348,8 +348,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
25270         bf = (__le32 *)pad;
25272         result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
25273 -       if (!result)
25274 +       if (!result) {
25275 +               kfree(pad);
25276                 return -ENOMEM;
25277 +       }
25279         for (i = 0; i < MAX_SG; i++) {
25280                 rctx->t_dst[i].addr = 0;
25281 @@ -435,11 +437,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
25282         dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
25283         dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
25285 -       kfree(pad);
25287         memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
25288 -       kfree(result);
25289  theend:
25290 +       kfree(pad);
25291 +       kfree(result);
25292         crypto_finalize_hash_request(engine, breq, err);
25293         return 0;
25295 diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
25296 index 08a1473b2145..3191527928e4 100644
25297 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
25298 +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
25299 @@ -103,7 +103,8 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
25300         dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
25301         if (dma_mapping_error(ss->dev, dma_iv)) {
25302                 dev_err(ss->dev, "Cannot DMA MAP IV\n");
25303 -               return -EFAULT;
25304 +               err = -EFAULT;
25305 +               goto err_free;
25306         }
25308         dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE);
25309 @@ -167,6 +168,7 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
25310                 memcpy(ctx->seed, d + dlen, ctx->slen);
25311         }
25312         memzero_explicit(d, todo);
25313 +err_free:
25314         kfree(d);
25316         return err;
25317 diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
25318 index cb9b4c4e371e..3e0d1d6922ba 100644
25319 --- a/drivers/crypto/ccp/sev-dev.c
25320 +++ b/drivers/crypto/ccp/sev-dev.c
25321 @@ -150,6 +150,9 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
25323         sev = psp->sev_data;
25325 +       if (data && WARN_ON_ONCE(!virt_addr_valid(data)))
25326 +               return -EINVAL;
25328         /* Get the physical address of the command buffer */
25329         phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
25330         phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
25331 @@ -987,7 +990,7 @@ int sev_dev_init(struct psp_device *psp)
25332         if (!sev->vdata) {
25333                 ret = -ENODEV;
25334                 dev_err(dev, "sev: missing driver data\n");
25335 -               goto e_err;
25336 +               goto e_sev;
25337         }
25339         psp_set_sev_irq_handler(psp, sev_irq_handler, sev);
25340 @@ -1002,6 +1005,8 @@ int sev_dev_init(struct psp_device *psp)
25342  e_irq:
25343         psp_clear_sev_irq_handler(psp);
25344 +e_sev:
25345 +       devm_kfree(dev, sev);
25346  e_err:
25347         psp->sev_data = NULL;
25349 diff --git a/drivers/crypto/ccp/tee-dev.c b/drivers/crypto/ccp/tee-dev.c
25350 index 5e697a90ea7f..bcb81fef4211 100644
25351 --- a/drivers/crypto/ccp/tee-dev.c
25352 +++ b/drivers/crypto/ccp/tee-dev.c
25353 @@ -36,6 +36,7 @@ static int tee_alloc_ring(struct psp_tee_device *tee, int ring_size)
25354         if (!start_addr)
25355                 return -ENOMEM;
25357 +       memset(start_addr, 0x0, ring_size);
25358         rb_mgr->ring_start = start_addr;
25359         rb_mgr->ring_size = ring_size;
25360         rb_mgr->ring_pa = __psp_pa(start_addr);
25361 @@ -244,41 +245,54 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
25362                           void *buf, size_t len, struct tee_ring_cmd **resp)
25364         struct tee_ring_cmd *cmd;
25365 -       u32 rptr, wptr;
25366         int nloop = 1000, ret = 0;
25367 +       u32 rptr;
25369         *resp = NULL;
25371         mutex_lock(&tee->rb_mgr.mutex);
25373 -       wptr = tee->rb_mgr.wptr;
25375 -       /* Check if ring buffer is full */
25376 +       /* Loop until empty entry found in ring buffer */
25377         do {
25378 +               /* Get pointer to ring buffer command entry */
25379 +               cmd = (struct tee_ring_cmd *)
25380 +                       (tee->rb_mgr.ring_start + tee->rb_mgr.wptr);
25382                 rptr = ioread32(tee->io_regs + tee->vdata->ring_rptr_reg);
25384 -               if (!(wptr + sizeof(struct tee_ring_cmd) == rptr))
25385 +               /* Check if ring buffer is full or command entry is waiting
25386 +                * for response from TEE
25387 +                */
25388 +               if (!(tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
25389 +                     cmd->flag == CMD_WAITING_FOR_RESPONSE))
25390                         break;
25392 -               dev_info(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
25393 -                        rptr, wptr);
25394 +               dev_dbg(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
25395 +                       rptr, tee->rb_mgr.wptr);
25397 -               /* Wait if ring buffer is full */
25398 +               /* Wait if ring buffer is full or TEE is processing data */
25399                 mutex_unlock(&tee->rb_mgr.mutex);
25400                 schedule_timeout_interruptible(msecs_to_jiffies(10));
25401                 mutex_lock(&tee->rb_mgr.mutex);
25403         } while (--nloop);
25405 -       if (!nloop && (wptr + sizeof(struct tee_ring_cmd) == rptr)) {
25406 -               dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
25407 -                       rptr, wptr);
25408 +       if (!nloop &&
25409 +           (tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
25410 +            cmd->flag == CMD_WAITING_FOR_RESPONSE)) {
25411 +               dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u response flag %u\n",
25412 +                       rptr, tee->rb_mgr.wptr, cmd->flag);
25413                 ret = -EBUSY;
25414                 goto unlock;
25415         }
25417 -       /* Pointer to empty data entry in ring buffer */
25418 -       cmd = (struct tee_ring_cmd *)(tee->rb_mgr.ring_start + wptr);
25419 +       /* Do not submit command if PSP got disabled while processing any
25420 +        * command in another thread
25421 +        */
25422 +       if (psp_dead) {
25423 +               ret = -EBUSY;
25424 +               goto unlock;
25425 +       }
25427         /* Write command data into ring buffer */
25428         cmd->cmd_id = cmd_id;
25429 @@ -286,6 +300,9 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
25430         memset(&cmd->buf[0], 0, sizeof(cmd->buf));
25431         memcpy(&cmd->buf[0], buf, len);
25433 +       /* Indicate driver is waiting for response */
25434 +       cmd->flag = CMD_WAITING_FOR_RESPONSE;
25436         /* Update local copy of write pointer */
25437         tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd);
25438         if (tee->rb_mgr.wptr >= tee->rb_mgr.ring_size)
25439 @@ -353,12 +370,16 @@ int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len,
25440                 return ret;
25442         ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT);
25443 -       if (ret)
25444 +       if (ret) {
25445 +               resp->flag = CMD_RESPONSE_TIMEDOUT;
25446                 return ret;
25447 +       }
25449         memcpy(buf, &resp->buf[0], len);
25450         *status = resp->status;
25452 +       resp->flag = CMD_RESPONSE_COPIED;
25454         return 0;
25456  EXPORT_SYMBOL(psp_tee_process_cmd);
25457 diff --git a/drivers/crypto/ccp/tee-dev.h b/drivers/crypto/ccp/tee-dev.h
25458 index f09960112115..49d26158b71e 100644
25459 --- a/drivers/crypto/ccp/tee-dev.h
25460 +++ b/drivers/crypto/ccp/tee-dev.h
25461 @@ -1,6 +1,6 @@
25462  /* SPDX-License-Identifier: MIT */
25463  /*
25464 - * Copyright 2019 Advanced Micro Devices, Inc.
25465 + * Copyright (C) 2019,2021 Advanced Micro Devices, Inc.
25466   *
25467   * Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
25468   * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
25469 @@ -18,7 +18,7 @@
25470  #include <linux/mutex.h>
25472  #define TEE_DEFAULT_TIMEOUT            10
25473 -#define MAX_BUFFER_SIZE                        992
25474 +#define MAX_BUFFER_SIZE                        988
25476  /**
25477   * enum tee_ring_cmd_id - TEE interface commands for ring buffer configuration
25478 @@ -81,6 +81,20 @@ enum tee_cmd_state {
25479         TEE_CMD_STATE_COMPLETED,
25480  };
25482 +/**
25483 + * enum cmd_resp_state - TEE command's response status maintained by driver
25484 + * @CMD_RESPONSE_INVALID:      initial state when no command is written to ring
25485 + * @CMD_WAITING_FOR_RESPONSE:  driver waiting for response from TEE
25486 + * @CMD_RESPONSE_TIMEDOUT:     failed to get response from TEE
25487 + * @CMD_RESPONSE_COPIED:       driver has copied response from TEE
25488 + */
25489 +enum cmd_resp_state {
25490 +       CMD_RESPONSE_INVALID,
25491 +       CMD_WAITING_FOR_RESPONSE,
25492 +       CMD_RESPONSE_TIMEDOUT,
25493 +       CMD_RESPONSE_COPIED,
25496  /**
25497   * struct tee_ring_cmd - Structure of the command buffer in TEE ring
25498   * @cmd_id:      refers to &enum tee_cmd_id. Command id for the ring buffer
25499 @@ -91,6 +105,7 @@ enum tee_cmd_state {
25500   * @pdata:       private data (currently unused)
25501   * @res1:        reserved region
25502   * @buf:         TEE command specific buffer
25503 + * @flag:       refers to &enum cmd_resp_state
25504   */
25505  struct tee_ring_cmd {
25506         u32 cmd_id;
25507 @@ -100,6 +115,7 @@ struct tee_ring_cmd {
25508         u64 pdata;
25509         u32 res1[2];
25510         u8 buf[MAX_BUFFER_SIZE];
25511 +       u32 flag;
25513         /* Total size: 1024 bytes */
25514  } __packed;
25515 diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
25516 index f5a336634daa..405ff957b837 100644
25517 --- a/drivers/crypto/chelsio/chcr_algo.c
25518 +++ b/drivers/crypto/chelsio/chcr_algo.c
25519 @@ -769,13 +769,14 @@ static inline void create_wreq(struct chcr_context *ctx,
25520         struct uld_ctx *u_ctx = ULD_CTX(ctx);
25521         unsigned int tx_channel_id, rx_channel_id;
25522         unsigned int txqidx = 0, rxqidx = 0;
25523 -       unsigned int qid, fid;
25524 +       unsigned int qid, fid, portno;
25526         get_qidxs(req, &txqidx, &rxqidx);
25527         qid = u_ctx->lldi.rxq_ids[rxqidx];
25528         fid = u_ctx->lldi.rxq_ids[0];
25529 +       portno = rxqidx / ctx->rxq_perchan;
25530         tx_channel_id = txqidx / ctx->txq_perchan;
25531 -       rx_channel_id = rxqidx / ctx->rxq_perchan;
25532 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]);
25535         chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
25536 @@ -806,6 +807,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
25538         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
25539         struct chcr_context *ctx = c_ctx(tfm);
25540 +       struct uld_ctx *u_ctx = ULD_CTX(ctx);
25541         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
25542         struct sk_buff *skb = NULL;
25543         struct chcr_wr *chcr_req;
25544 @@ -822,6 +824,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
25545         struct adapter *adap = padap(ctx->dev);
25546         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
25548 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
25549         nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
25550                               reqctx->dst_ofst);
25551         dst_size = get_space_for_phys_dsgl(nents);
25552 @@ -1580,6 +1583,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
25553         int error = 0;
25554         unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
25556 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
25557         transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
25558         req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
25559                                 param->sg_len) <= SGE_MAX_WR_LEN;
25560 @@ -2438,6 +2442,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
25562         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
25563         struct chcr_context *ctx = a_ctx(tfm);
25564 +       struct uld_ctx *u_ctx = ULD_CTX(ctx);
25565         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
25566         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
25567         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
25568 @@ -2457,6 +2462,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
25569         struct adapter *adap = padap(ctx->dev);
25570         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
25572 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
25573         if (req->cryptlen == 0)
25574                 return NULL;
25576 @@ -2710,9 +2716,11 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
25577         struct dsgl_walk dsgl_walk;
25578         unsigned int authsize = crypto_aead_authsize(tfm);
25579         struct chcr_context *ctx = a_ctx(tfm);
25580 +       struct uld_ctx *u_ctx = ULD_CTX(ctx);
25581         u32 temp;
25582         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
25584 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
25585         dsgl_walk_init(&dsgl_walk, phys_cpl);
25586         dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
25587         temp = req->assoclen + req->cryptlen +
25588 @@ -2752,9 +2760,11 @@ void chcr_add_cipher_dst_ent(struct skcipher_request *req,
25589         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
25590         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
25591         struct chcr_context *ctx = c_ctx(tfm);
25592 +       struct uld_ctx *u_ctx = ULD_CTX(ctx);
25593         struct dsgl_walk dsgl_walk;
25594         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
25596 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
25597         dsgl_walk_init(&dsgl_walk, phys_cpl);
25598         dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
25599                          reqctx->dst_ofst);
25600 @@ -2958,6 +2968,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
25602         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
25603         struct chcr_context *ctx = a_ctx(tfm);
25604 +       struct uld_ctx *u_ctx = ULD_CTX(ctx);
25605         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
25606         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
25607         unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
25608 @@ -2967,6 +2978,8 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
25609         unsigned int tag_offset = 0, auth_offset = 0;
25610         unsigned int assoclen;
25612 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
25614         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
25615                 assoclen = req->assoclen - 8;
25616         else
25617 @@ -3127,6 +3140,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
25619         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
25620         struct chcr_context *ctx = a_ctx(tfm);
25621 +       struct uld_ctx *u_ctx = ULD_CTX(ctx);
25622         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
25623         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
25624         struct sk_buff *skb = NULL;
25625 @@ -3143,6 +3157,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
25626         struct adapter *adap = padap(ctx->dev);
25627         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
25629 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
25630         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
25631                 assoclen = req->assoclen - 8;
25633 diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
25634 index 2eaa516b3231..8adcbb327126 100644
25635 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
25636 +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
25637 @@ -546,7 +546,7 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm)
25638         crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
25639         ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
25640         if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
25641 -               dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n");
25642 +               pr_err("get error skcipher iv size!\n");
25643                 return -EINVAL;
25644         }
25646 diff --git a/drivers/crypto/keembay/keembay-ocs-aes-core.c b/drivers/crypto/keembay/keembay-ocs-aes-core.c
25647 index b6b25d994af3..2ef312866338 100644
25648 --- a/drivers/crypto/keembay/keembay-ocs-aes-core.c
25649 +++ b/drivers/crypto/keembay/keembay-ocs-aes-core.c
25650 @@ -1649,8 +1649,10 @@ static int kmb_ocs_aes_probe(struct platform_device *pdev)
25652         /* Initialize crypto engine */
25653         aes_dev->engine = crypto_engine_alloc_init(dev, true);
25654 -       if (!aes_dev->engine)
25655 +       if (!aes_dev->engine) {
25656 +               rc = -ENOMEM;
25657                 goto list_del;
25658 +       }
25660         rc = crypto_engine_start(aes_dev->engine);
25661         if (rc) {
25662 diff --git a/drivers/crypto/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
25663 index c4b97b4160e9..322c51a6936f 100644
25664 --- a/drivers/crypto/keembay/keembay-ocs-hcu-core.c
25665 +++ b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
25666 @@ -1220,8 +1220,10 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev)
25668         /* Initialize crypto engine */
25669         hcu_dev->engine = crypto_engine_alloc_init(dev, 1);
25670 -       if (!hcu_dev->engine)
25671 +       if (!hcu_dev->engine) {
25672 +               rc = -ENOMEM;
25673                 goto list_del;
25674 +       }
25676         rc = crypto_engine_start(hcu_dev->engine);
25677         if (rc) {
25678 diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
25679 index a45bdcf3026d..0dd4c6b157de 100644
25680 --- a/drivers/crypto/omap-aes.c
25681 +++ b/drivers/crypto/omap-aes.c
25682 @@ -103,9 +103,8 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd)
25683                 dd->err = 0;
25684         }
25686 -       err = pm_runtime_get_sync(dd->dev);
25687 +       err = pm_runtime_resume_and_get(dd->dev);
25688         if (err < 0) {
25689 -               pm_runtime_put_noidle(dd->dev);
25690                 dev_err(dd->dev, "failed to get sync: %d\n", err);
25691                 return err;
25692         }
25693 @@ -1134,7 +1133,7 @@ static int omap_aes_probe(struct platform_device *pdev)
25694         pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
25696         pm_runtime_enable(dev);
25697 -       err = pm_runtime_get_sync(dev);
25698 +       err = pm_runtime_resume_and_get(dev);
25699         if (err < 0) {
25700                 dev_err(dev, "%s: failed to get_sync(%d)\n",
25701                         __func__, err);
25702 @@ -1303,7 +1302,7 @@ static int omap_aes_suspend(struct device *dev)
25704  static int omap_aes_resume(struct device *dev)
25706 -       pm_runtime_get_sync(dev);
25707 +       pm_runtime_resume_and_get(dev);
25708         return 0;
25710  #endif
25711 diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
25712 index 1d1532e8fb6d..067ca5e17d38 100644
25713 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
25714 +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
25715 @@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
25716         if (ret)
25717                 goto out_err_free_reg;
25719 -       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
25721         ret = adf_dev_init(accel_dev);
25722         if (ret)
25723                 goto out_err_dev_shutdown;
25725 +       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
25727         ret = adf_dev_start(accel_dev);
25728         if (ret)
25729                 goto out_err_dev_stop;
25730 diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
25731 index 04742a6d91ca..51ea88c0b17d 100644
25732 --- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
25733 +++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
25734 @@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
25735         if (ret)
25736                 goto out_err_free_reg;
25738 -       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
25740         ret = adf_dev_init(accel_dev);
25741         if (ret)
25742                 goto out_err_dev_shutdown;
25744 +       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
25746         ret = adf_dev_start(accel_dev);
25747         if (ret)
25748                 goto out_err_dev_stop;
25749 diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
25750 index c45853463530..e3ad5587be49 100644
25751 --- a/drivers/crypto/qat/qat_common/adf_isr.c
25752 +++ b/drivers/crypto/qat/qat_common/adf_isr.c
25753 @@ -291,19 +291,32 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
25755         ret = adf_isr_alloc_msix_entry_table(accel_dev);
25756         if (ret)
25757 -               return ret;
25758 -       if (adf_enable_msix(accel_dev))
25759                 goto err_out;
25761 -       if (adf_setup_bh(accel_dev))
25762 -               goto err_out;
25763 +       ret = adf_enable_msix(accel_dev);
25764 +       if (ret)
25765 +               goto err_free_msix_table;
25767 -       if (adf_request_irqs(accel_dev))
25768 -               goto err_out;
25769 +       ret = adf_setup_bh(accel_dev);
25770 +       if (ret)
25771 +               goto err_disable_msix;
25773 +       ret = adf_request_irqs(accel_dev);
25774 +       if (ret)
25775 +               goto err_cleanup_bh;
25777         return 0;
25779 +err_cleanup_bh:
25780 +       adf_cleanup_bh(accel_dev);
25782 +err_disable_msix:
25783 +       adf_disable_msix(&accel_dev->accel_pci_dev);
25785 +err_free_msix_table:
25786 +       adf_isr_free_msix_entry_table(accel_dev);
25788  err_out:
25789 -       adf_isr_resource_free(accel_dev);
25790 -       return -EFAULT;
25791 +       return ret;
25793  EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
25794 diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
25795 index 888c1e047295..8ba28409fb74 100644
25796 --- a/drivers/crypto/qat/qat_common/adf_transport.c
25797 +++ b/drivers/crypto/qat/qat_common/adf_transport.c
25798 @@ -172,6 +172,7 @@ static int adf_init_ring(struct adf_etr_ring_data *ring)
25799                 dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
25800                 dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
25801                                   ring->base_addr, ring->dma_addr);
25802 +               ring->base_addr = NULL;
25803                 return -EFAULT;
25804         }
25806 diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
25807 index 38d316a42ba6..888388acb6bd 100644
25808 --- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
25809 +++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
25810 @@ -261,17 +261,26 @@ int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
25811                 goto err_out;
25813         if (adf_setup_pf2vf_bh(accel_dev))
25814 -               goto err_out;
25815 +               goto err_disable_msi;
25817         if (adf_setup_bh(accel_dev))
25818 -               goto err_out;
25819 +               goto err_cleanup_pf2vf_bh;
25821         if (adf_request_msi_irq(accel_dev))
25822 -               goto err_out;
25823 +               goto err_cleanup_bh;
25825         return 0;
25827 +err_cleanup_bh:
25828 +       adf_cleanup_bh(accel_dev);
25830 +err_cleanup_pf2vf_bh:
25831 +       adf_cleanup_pf2vf_bh(accel_dev);
25833 +err_disable_msi:
25834 +       adf_disable_msi(accel_dev);
25836  err_out:
25837 -       adf_vf_isr_resource_free(accel_dev);
25838         return -EFAULT;
25840  EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
25841 diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
25842 index ff78c73c47e3..ea1c6899290d 100644
25843 --- a/drivers/crypto/qat/qat_common/qat_algs.c
25844 +++ b/drivers/crypto/qat/qat_common/qat_algs.c
25845 @@ -719,7 +719,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
25846         struct qat_alg_buf_list *bufl;
25847         struct qat_alg_buf_list *buflout = NULL;
25848         dma_addr_t blp;
25849 -       dma_addr_t bloutp = 0;
25850 +       dma_addr_t bloutp;
25851         struct scatterlist *sg;
25852         size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
25854 @@ -731,6 +731,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
25855         if (unlikely(!bufl))
25856                 return -ENOMEM;
25858 +       for_each_sg(sgl, sg, n, i)
25859 +               bufl->bufers[i].addr = DMA_MAPPING_ERROR;
25861         blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
25862         if (unlikely(dma_mapping_error(dev, blp)))
25863                 goto err_in;
25864 @@ -764,10 +767,14 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
25865                                        dev_to_node(&GET_DEV(inst->accel_dev)));
25866                 if (unlikely(!buflout))
25867                         goto err_in;
25869 +               bufers = buflout->bufers;
25870 +               for_each_sg(sglout, sg, n, i)
25871 +                       bufers[i].addr = DMA_MAPPING_ERROR;
25873                 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
25874                 if (unlikely(dma_mapping_error(dev, bloutp)))
25875                         goto err_out;
25876 -               bufers = buflout->bufers;
25877                 for_each_sg(sglout, sg, n, i) {
25878                         int y = sg_nctr;
25880 diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
25881 index c972554a755e..29999da716cc 100644
25882 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
25883 +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
25884 @@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
25885         if (ret)
25886                 goto out_err_free_reg;
25888 -       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
25890         ret = adf_dev_init(accel_dev);
25891         if (ret)
25892                 goto out_err_dev_shutdown;
25894 +       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
25896         ret = adf_dev_start(accel_dev);
25897         if (ret)
25898                 goto out_err_dev_stop;
25899 diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
25900 index f300b0a5958a..b0f0502a5bb0 100644
25901 --- a/drivers/crypto/sa2ul.c
25902 +++ b/drivers/crypto/sa2ul.c
25903 @@ -1146,8 +1146,10 @@ static int sa_run(struct sa_req *req)
25904                 mapped_sg->sgt.sgl = src;
25905                 mapped_sg->sgt.orig_nents = src_nents;
25906                 ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
25907 -               if (ret)
25908 +               if (ret) {
25909 +                       kfree(rxd);
25910                         return ret;
25911 +               }
25913                 mapped_sg->dir = dir_src;
25914                 mapped_sg->mapped = true;
25915 @@ -1155,8 +1157,10 @@ static int sa_run(struct sa_req *req)
25916                 mapped_sg->sgt.sgl = req->src;
25917                 mapped_sg->sgt.orig_nents = sg_nents;
25918                 ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
25919 -               if (ret)
25920 +               if (ret) {
25921 +                       kfree(rxd);
25922                         return ret;
25923 +               }
25925                 mapped_sg->dir = dir_src;
25926                 mapped_sg->mapped = true;
25927 @@ -2350,7 +2354,7 @@ static int sa_ul_probe(struct platform_device *pdev)
25928         dev_set_drvdata(sa_k3_dev, dev_data);
25930         pm_runtime_enable(dev);
25931 -       ret = pm_runtime_get_sync(dev);
25932 +       ret = pm_runtime_resume_and_get(dev);
25933         if (ret < 0) {
25934                 dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
25935                         ret);
25936 diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
25937 index 2a4793176c71..7389a0536ff0 100644
25938 --- a/drivers/crypto/stm32/stm32-cryp.c
25939 +++ b/drivers/crypto/stm32/stm32-cryp.c
25940 @@ -542,7 +542,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
25941         int ret;
25942         u32 cfg, hw_mode;
25944 -       pm_runtime_get_sync(cryp->dev);
25945 +       pm_runtime_resume_and_get(cryp->dev);
25947         /* Disable interrupt */
25948         stm32_cryp_write(cryp, CRYP_IMSCR, 0);
25949 @@ -2043,7 +2043,7 @@ static int stm32_cryp_remove(struct platform_device *pdev)
25950         if (!cryp)
25951                 return -ENODEV;
25953 -       ret = pm_runtime_get_sync(cryp->dev);
25954 +       ret = pm_runtime_resume_and_get(cryp->dev);
25955         if (ret < 0)
25956                 return ret;
25958 diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
25959 index 7ac0573ef663..389de9e3302d 100644
25960 --- a/drivers/crypto/stm32/stm32-hash.c
25961 +++ b/drivers/crypto/stm32/stm32-hash.c
25962 @@ -813,7 +813,7 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err)
25963  static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
25964                               struct stm32_hash_request_ctx *rctx)
25966 -       pm_runtime_get_sync(hdev->dev);
25967 +       pm_runtime_resume_and_get(hdev->dev);
25969         if (!(HASH_FLAGS_INIT & hdev->flags)) {
25970                 stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
25971 @@ -962,7 +962,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
25972         u32 *preg;
25973         unsigned int i;
25975 -       pm_runtime_get_sync(hdev->dev);
25976 +       pm_runtime_resume_and_get(hdev->dev);
25978         while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
25979                 cpu_relax();
25980 @@ -1000,7 +1000,7 @@ static int stm32_hash_import(struct ahash_request *req, const void *in)
25982         preg = rctx->hw_context;
25984 -       pm_runtime_get_sync(hdev->dev);
25985 +       pm_runtime_resume_and_get(hdev->dev);
25987         stm32_hash_write(hdev, HASH_IMR, *preg++);
25988         stm32_hash_write(hdev, HASH_STR, *preg++);
25989 @@ -1566,7 +1566,7 @@ static int stm32_hash_remove(struct platform_device *pdev)
25990         if (!hdev)
25991                 return -ENODEV;
25993 -       ret = pm_runtime_get_sync(hdev->dev);
25994 +       ret = pm_runtime_resume_and_get(hdev->dev);
25995         if (ret < 0)
25996                 return ret;
25998 diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
25999 index bf3047896e41..59ba59bea0f5 100644
26000 --- a/drivers/devfreq/devfreq.c
26001 +++ b/drivers/devfreq/devfreq.c
26002 @@ -387,7 +387,7 @@ static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq,
26003         devfreq->previous_freq = new_freq;
26005         if (devfreq->suspend_freq)
26006 -               devfreq->resume_freq = cur_freq;
26007 +               devfreq->resume_freq = new_freq;
26009         return err;
26011 @@ -821,7 +821,8 @@ struct devfreq *devfreq_add_device(struct device *dev,
26013         if (devfreq->profile->timer < 0
26014                 || devfreq->profile->timer >= DEVFREQ_TIMER_NUM) {
26015 -               goto err_out;
26016 +               mutex_unlock(&devfreq->lock);
26017 +               goto err_dev;
26018         }
26020         if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
26021 diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
26022 index f264b70c383e..eadd1eaa2fb5 100644
26023 --- a/drivers/dma-buf/dma-buf.c
26024 +++ b/drivers/dma-buf/dma-buf.c
26025 @@ -760,7 +760,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
26027                 if (dma_buf_is_dynamic(attach->dmabuf)) {
26028                         dma_resv_lock(attach->dmabuf->resv, NULL);
26029 -                       ret = dma_buf_pin(attach);
26030 +                       ret = dmabuf->ops->pin(attach);
26031                         if (ret)
26032                                 goto err_unlock;
26033                 }
26034 @@ -786,7 +786,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
26036  err_unpin:
26037         if (dma_buf_is_dynamic(attach->dmabuf))
26038 -               dma_buf_unpin(attach);
26039 +               dmabuf->ops->unpin(attach);
26041  err_unlock:
26042         if (dma_buf_is_dynamic(attach->dmabuf))
26043 @@ -843,7 +843,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
26044                 __unmap_dma_buf(attach, attach->sgt, attach->dir);
26046                 if (dma_buf_is_dynamic(attach->dmabuf)) {
26047 -                       dma_buf_unpin(attach);
26048 +                       dmabuf->ops->unpin(attach);
26049                         dma_resv_unlock(attach->dmabuf->resv);
26050                 }
26051         }
26052 @@ -956,7 +956,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
26053         if (dma_buf_is_dynamic(attach->dmabuf)) {
26054                 dma_resv_assert_held(attach->dmabuf->resv);
26055                 if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
26056 -                       r = dma_buf_pin(attach);
26057 +                       r = attach->dmabuf->ops->pin(attach);
26058                         if (r)
26059                                 return ERR_PTR(r);
26060                 }
26061 @@ -968,7 +968,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
26063         if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
26064              !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
26065 -               dma_buf_unpin(attach);
26066 +               attach->dmabuf->ops->unpin(attach);
26068         if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
26069                 attach->sgt = sg_table;
26070 diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
26071 index 08d71dafa001..58c8cc8fe0e1 100644
26072 --- a/drivers/dma/dw-edma/dw-edma-core.c
26073 +++ b/drivers/dma/dw-edma/dw-edma-core.c
26074 @@ -937,22 +937,21 @@ int dw_edma_remove(struct dw_edma_chip *chip)
26075         /* Power management */
26076         pm_runtime_disable(dev);
26078 +       /* Deregister eDMA device */
26079 +       dma_async_device_unregister(&dw->wr_edma);
26080         list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
26081                                  vc.chan.device_node) {
26082 -               list_del(&chan->vc.chan.device_node);
26083                 tasklet_kill(&chan->vc.task);
26084 +               list_del(&chan->vc.chan.device_node);
26085         }
26087 +       dma_async_device_unregister(&dw->rd_edma);
26088         list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
26089                                  vc.chan.device_node) {
26090 -               list_del(&chan->vc.chan.device_node);
26091                 tasklet_kill(&chan->vc.task);
26092 +               list_del(&chan->vc.chan.device_node);
26093         }
26095 -       /* Deregister eDMA device */
26096 -       dma_async_device_unregister(&dw->wr_edma);
26097 -       dma_async_device_unregister(&dw->rd_edma);
26099         /* Turn debugfs off */
26100         dw_edma_v0_core_debugfs_off();
26102 diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
26103 index 0db9b82ed8cf..1d8a3876b745 100644
26104 --- a/drivers/dma/idxd/cdev.c
26105 +++ b/drivers/dma/idxd/cdev.c
26106 @@ -39,15 +39,15 @@ struct idxd_user_context {
26107         struct iommu_sva *sva;
26108  };
26110 -enum idxd_cdev_cleanup {
26111 -       CDEV_NORMAL = 0,
26112 -       CDEV_FAILED,
26115  static void idxd_cdev_dev_release(struct device *dev)
26117 -       dev_dbg(dev, "releasing cdev device\n");
26118 -       kfree(dev);
26119 +       struct idxd_cdev *idxd_cdev = container_of(dev, struct idxd_cdev, dev);
26120 +       struct idxd_cdev_context *cdev_ctx;
26121 +       struct idxd_wq *wq = idxd_cdev->wq;
26123 +       cdev_ctx = &ictx[wq->idxd->type];
26124 +       ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
26125 +       kfree(idxd_cdev);
26128  static struct device_type idxd_cdev_device_type = {
26129 @@ -62,14 +62,11 @@ static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode)
26130         return container_of(cdev, struct idxd_cdev, cdev);
26133 -static inline struct idxd_wq *idxd_cdev_wq(struct idxd_cdev *idxd_cdev)
26135 -       return container_of(idxd_cdev, struct idxd_wq, idxd_cdev);
26138  static inline struct idxd_wq *inode_wq(struct inode *inode)
26140 -       return idxd_cdev_wq(inode_idxd_cdev(inode));
26141 +       struct idxd_cdev *idxd_cdev = inode_idxd_cdev(inode);
26143 +       return idxd_cdev->wq;
26146  static int idxd_cdev_open(struct inode *inode, struct file *filp)
26147 @@ -220,11 +217,10 @@ static __poll_t idxd_cdev_poll(struct file *filp,
26148         struct idxd_user_context *ctx = filp->private_data;
26149         struct idxd_wq *wq = ctx->wq;
26150         struct idxd_device *idxd = wq->idxd;
26151 -       struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
26152         unsigned long flags;
26153         __poll_t out = 0;
26155 -       poll_wait(filp, &idxd_cdev->err_queue, wait);
26156 +       poll_wait(filp, &wq->err_queue, wait);
26157         spin_lock_irqsave(&idxd->dev_lock, flags);
26158         if (idxd->sw_err.valid)
26159                 out = EPOLLIN | EPOLLRDNORM;
26160 @@ -246,98 +242,67 @@ int idxd_cdev_get_major(struct idxd_device *idxd)
26161         return MAJOR(ictx[idxd->type].devt);
26164 -static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
26165 +int idxd_wq_add_cdev(struct idxd_wq *wq)
26167         struct idxd_device *idxd = wq->idxd;
26168 -       struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
26169 -       struct idxd_cdev_context *cdev_ctx;
26170 +       struct idxd_cdev *idxd_cdev;
26171 +       struct cdev *cdev;
26172         struct device *dev;
26173 -       int minor, rc;
26174 +       struct idxd_cdev_context *cdev_ctx;
26175 +       int rc, minor;
26177 -       idxd_cdev->dev = kzalloc(sizeof(*idxd_cdev->dev), GFP_KERNEL);
26178 -       if (!idxd_cdev->dev)
26179 +       idxd_cdev = kzalloc(sizeof(*idxd_cdev), GFP_KERNEL);
26180 +       if (!idxd_cdev)
26181                 return -ENOMEM;
26183 -       dev = idxd_cdev->dev;
26184 -       dev->parent = &idxd->pdev->dev;
26185 -       dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
26186 -                    idxd->id, wq->id);
26187 -       dev->bus = idxd_get_bus_type(idxd);
26189 +       idxd_cdev->wq = wq;
26190 +       cdev = &idxd_cdev->cdev;
26191 +       dev = &idxd_cdev->dev;
26192         cdev_ctx = &ictx[wq->idxd->type];
26193         minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
26194         if (minor < 0) {
26195 -               rc = minor;
26196 -               kfree(dev);
26197 -               goto ida_err;
26198 -       }
26200 -       dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
26201 -       dev->type = &idxd_cdev_device_type;
26202 -       rc = device_register(dev);
26203 -       if (rc < 0) {
26204 -               dev_err(&idxd->pdev->dev, "device register failed\n");
26205 -               goto dev_reg_err;
26206 +               kfree(idxd_cdev);
26207 +               return minor;
26208         }
26209         idxd_cdev->minor = minor;
26211 -       return 0;
26213 - dev_reg_err:
26214 -       ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt));
26215 -       put_device(dev);
26216 - ida_err:
26217 -       idxd_cdev->dev = NULL;
26218 -       return rc;
26221 -static void idxd_wq_cdev_cleanup(struct idxd_wq *wq,
26222 -                                enum idxd_cdev_cleanup cdev_state)
26224 -       struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
26225 -       struct idxd_cdev_context *cdev_ctx;
26227 -       cdev_ctx = &ictx[wq->idxd->type];
26228 -       if (cdev_state == CDEV_NORMAL)
26229 -               cdev_del(&idxd_cdev->cdev);
26230 -       device_unregister(idxd_cdev->dev);
26231 -       /*
26232 -        * The device_type->release() will be called on the device and free
26233 -        * the allocated struct device. We can just forget it.
26234 -        */
26235 -       ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
26236 -       idxd_cdev->dev = NULL;
26237 -       idxd_cdev->minor = -1;
26240 -int idxd_wq_add_cdev(struct idxd_wq *wq)
26242 -       struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
26243 -       struct cdev *cdev = &idxd_cdev->cdev;
26244 -       struct device *dev;
26245 -       int rc;
26246 +       device_initialize(dev);
26247 +       dev->parent = &wq->conf_dev;
26248 +       dev->bus = idxd_get_bus_type(idxd);
26249 +       dev->type = &idxd_cdev_device_type;
26250 +       dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
26252 -       rc = idxd_wq_cdev_dev_setup(wq);
26253 +       rc = dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
26254 +                         idxd->id, wq->id);
26255         if (rc < 0)
26256 -               return rc;
26257 +               goto err;
26259 -       dev = idxd_cdev->dev;
26260 +       wq->idxd_cdev = idxd_cdev;
26261         cdev_init(cdev, &idxd_cdev_fops);
26262 -       cdev_set_parent(cdev, &dev->kobj);
26263 -       rc = cdev_add(cdev, dev->devt, 1);
26264 +       rc = cdev_device_add(cdev, dev);
26265         if (rc) {
26266                 dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
26267 -               idxd_wq_cdev_cleanup(wq, CDEV_FAILED);
26268 -               return rc;
26269 +               goto err;
26270         }
26272 -       init_waitqueue_head(&idxd_cdev->err_queue);
26273         return 0;
26275 + err:
26276 +       put_device(dev);
26277 +       wq->idxd_cdev = NULL;
26278 +       return rc;
26281  void idxd_wq_del_cdev(struct idxd_wq *wq)
26283 -       idxd_wq_cdev_cleanup(wq, CDEV_NORMAL);
26284 +       struct idxd_cdev *idxd_cdev;
26285 +       struct idxd_cdev_context *cdev_ctx;
26287 +       cdev_ctx = &ictx[wq->idxd->type];
26288 +       idxd_cdev = wq->idxd_cdev;
26289 +       wq->idxd_cdev = NULL;
26290 +       cdev_device_del(&idxd_cdev->cdev, &idxd_cdev->dev);
26291 +       put_device(&idxd_cdev->dev);
26294  int idxd_cdev_register(void)
26295 diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
26296 index 31c819544a22..4fef57717049 100644
26297 --- a/drivers/dma/idxd/device.c
26298 +++ b/drivers/dma/idxd/device.c
26299 @@ -19,7 +19,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
26300  /* Interrupt control bits */
26301  void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
26303 -       struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
26304 +       struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
26306         pci_msi_mask_irq(data);
26308 @@ -36,7 +36,7 @@ void idxd_mask_msix_vectors(struct idxd_device *idxd)
26310  void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
26312 -       struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
26313 +       struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
26315         pci_msi_unmask_irq(data);
26317 @@ -186,8 +186,6 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
26318                 desc->id = i;
26319                 desc->wq = wq;
26320                 desc->cpu = -1;
26321 -               dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan);
26322 -               desc->txd.tx_submit = idxd_dma_tx_submit;
26323         }
26325         return 0;
26326 @@ -451,7 +449,8 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
26328         if (idxd_device_is_halted(idxd)) {
26329                 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
26330 -               *status = IDXD_CMDSTS_HW_ERR;
26331 +               if (status)
26332 +                       *status = IDXD_CMDSTS_HW_ERR;
26333                 return;
26334         }
26336 @@ -521,7 +520,7 @@ void idxd_device_wqs_clear_state(struct idxd_device *idxd)
26337         lockdep_assert_held(&idxd->dev_lock);
26339         for (i = 0; i < idxd->max_wqs; i++) {
26340 -               struct idxd_wq *wq = &idxd->wqs[i];
26341 +               struct idxd_wq *wq = idxd->wqs[i];
26343                 if (wq->state == IDXD_WQ_ENABLED) {
26344                         idxd_wq_disable_cleanup(wq);
26345 @@ -660,7 +659,7 @@ static int idxd_groups_config_write(struct idxd_device *idxd)
26346                 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
26348         for (i = 0; i < idxd->max_groups; i++) {
26349 -               struct idxd_group *group = &idxd->groups[i];
26350 +               struct idxd_group *group = idxd->groups[i];
26352                 idxd_group_config_write(group);
26353         }
26354 @@ -739,7 +738,7 @@ static int idxd_wqs_config_write(struct idxd_device *idxd)
26355         int i, rc;
26357         for (i = 0; i < idxd->max_wqs; i++) {
26358 -               struct idxd_wq *wq = &idxd->wqs[i];
26359 +               struct idxd_wq *wq = idxd->wqs[i];
26361                 rc = idxd_wq_config_write(wq);
26362                 if (rc < 0)
26363 @@ -755,7 +754,7 @@ static void idxd_group_flags_setup(struct idxd_device *idxd)
26365         /* TC-A 0 and TC-B 1 should be defaults */
26366         for (i = 0; i < idxd->max_groups; i++) {
26367 -               struct idxd_group *group = &idxd->groups[i];
26368 +               struct idxd_group *group = idxd->groups[i];
26370                 if (group->tc_a == -1)
26371                         group->tc_a = group->grpcfg.flags.tc_a = 0;
26372 @@ -782,12 +781,12 @@ static int idxd_engines_setup(struct idxd_device *idxd)
26373         struct idxd_group *group;
26375         for (i = 0; i < idxd->max_groups; i++) {
26376 -               group = &idxd->groups[i];
26377 +               group = idxd->groups[i];
26378                 group->grpcfg.engines = 0;
26379         }
26381         for (i = 0; i < idxd->max_engines; i++) {
26382 -               eng = &idxd->engines[i];
26383 +               eng = idxd->engines[i];
26384                 group = eng->group;
26386                 if (!group)
26387 @@ -811,13 +810,13 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
26388         struct device *dev = &idxd->pdev->dev;
26390         for (i = 0; i < idxd->max_groups; i++) {
26391 -               group = &idxd->groups[i];
26392 +               group = idxd->groups[i];
26393                 for (j = 0; j < 4; j++)
26394                         group->grpcfg.wqs[j] = 0;
26395         }
26397         for (i = 0; i < idxd->max_wqs; i++) {
26398 -               wq = &idxd->wqs[i];
26399 +               wq = idxd->wqs[i];
26400                 group = wq->group;
26402                 if (!wq->group)
26403 diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
26404 index a15e50126434..77439b645044 100644
26405 --- a/drivers/dma/idxd/dma.c
26406 +++ b/drivers/dma/idxd/dma.c
26407 @@ -14,7 +14,10 @@
26409  static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
26411 -       return container_of(c, struct idxd_wq, dma_chan);
26412 +       struct idxd_dma_chan *idxd_chan;
26414 +       idxd_chan = container_of(c, struct idxd_dma_chan, chan);
26415 +       return idxd_chan->wq;
26418  void idxd_dma_complete_txd(struct idxd_desc *desc,
26419 @@ -135,7 +138,7 @@ static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
26423 -dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
26424 +static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
26426         struct dma_chan *c = tx->chan;
26427         struct idxd_wq *wq = to_idxd_wq(c);
26428 @@ -156,14 +159,25 @@ dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
26430  static void idxd_dma_release(struct dma_device *device)
26432 +       struct idxd_dma_dev *idxd_dma = container_of(device, struct idxd_dma_dev, dma);
26434 +       kfree(idxd_dma);
26437  int idxd_register_dma_device(struct idxd_device *idxd)
26439 -       struct dma_device *dma = &idxd->dma_dev;
26440 +       struct idxd_dma_dev *idxd_dma;
26441 +       struct dma_device *dma;
26442 +       struct device *dev = &idxd->pdev->dev;
26443 +       int rc;
26445 +       idxd_dma = kzalloc_node(sizeof(*idxd_dma), GFP_KERNEL, dev_to_node(dev));
26446 +       if (!idxd_dma)
26447 +               return -ENOMEM;
26449 +       dma = &idxd_dma->dma;
26450         INIT_LIST_HEAD(&dma->channels);
26451 -       dma->dev = &idxd->pdev->dev;
26452 +       dma->dev = dev;
26454         dma_cap_set(DMA_PRIVATE, dma->cap_mask);
26455         dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
26456 @@ -179,35 +193,72 @@ int idxd_register_dma_device(struct idxd_device *idxd)
26457         dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
26458         dma->device_free_chan_resources = idxd_dma_free_chan_resources;
26460 -       return dma_async_device_register(&idxd->dma_dev);
26461 +       rc = dma_async_device_register(dma);
26462 +       if (rc < 0) {
26463 +               kfree(idxd_dma);
26464 +               return rc;
26465 +       }
26467 +       idxd_dma->idxd = idxd;
26468 +       /*
26469 +        * This pointer is protected by the refs taken by the dma_chan. It will remain valid
26470 +        * as long as there are outstanding channels.
26471 +        */
26472 +       idxd->idxd_dma = idxd_dma;
26473 +       return 0;
26476  void idxd_unregister_dma_device(struct idxd_device *idxd)
26478 -       dma_async_device_unregister(&idxd->dma_dev);
26479 +       dma_async_device_unregister(&idxd->idxd_dma->dma);
26482  int idxd_register_dma_channel(struct idxd_wq *wq)
26484         struct idxd_device *idxd = wq->idxd;
26485 -       struct dma_device *dma = &idxd->dma_dev;
26486 -       struct dma_chan *chan = &wq->dma_chan;
26487 -       int rc;
26488 +       struct dma_device *dma = &idxd->idxd_dma->dma;
26489 +       struct device *dev = &idxd->pdev->dev;
26490 +       struct idxd_dma_chan *idxd_chan;
26491 +       struct dma_chan *chan;
26492 +       int rc, i;
26494 +       idxd_chan = kzalloc_node(sizeof(*idxd_chan), GFP_KERNEL, dev_to_node(dev));
26495 +       if (!idxd_chan)
26496 +               return -ENOMEM;
26498 -       memset(&wq->dma_chan, 0, sizeof(struct dma_chan));
26499 +       chan = &idxd_chan->chan;
26500         chan->device = dma;
26501         list_add_tail(&chan->device_node, &dma->channels);
26503 +       for (i = 0; i < wq->num_descs; i++) {
26504 +               struct idxd_desc *desc = wq->descs[i];
26506 +               dma_async_tx_descriptor_init(&desc->txd, chan);
26507 +               desc->txd.tx_submit = idxd_dma_tx_submit;
26508 +       }
26510         rc = dma_async_device_channel_register(dma, chan);
26511 -       if (rc < 0)
26512 +       if (rc < 0) {
26513 +               kfree(idxd_chan);
26514                 return rc;
26515 +       }
26517 +       wq->idxd_chan = idxd_chan;
26518 +       idxd_chan->wq = wq;
26519 +       get_device(&wq->conf_dev);
26521         return 0;
26524  void idxd_unregister_dma_channel(struct idxd_wq *wq)
26526 -       struct dma_chan *chan = &wq->dma_chan;
26527 +       struct idxd_dma_chan *idxd_chan = wq->idxd_chan;
26528 +       struct dma_chan *chan = &idxd_chan->chan;
26529 +       struct idxd_dma_dev *idxd_dma = wq->idxd->idxd_dma;
26531 -       dma_async_device_channel_unregister(&wq->idxd->dma_dev, chan);
26532 +       dma_async_device_channel_unregister(&idxd_dma->dma, chan);
26533         list_del(&chan->device_node);
26534 +       kfree(wq->idxd_chan);
26535 +       wq->idxd_chan = NULL;
26536 +       put_device(&wq->conf_dev);
26538 diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
26539 index 76014c14f473..89daf746d121 100644
26540 --- a/drivers/dma/idxd/idxd.h
26541 +++ b/drivers/dma/idxd/idxd.h
26542 @@ -8,12 +8,16 @@
26543  #include <linux/percpu-rwsem.h>
26544  #include <linux/wait.h>
26545  #include <linux/cdev.h>
26546 +#include <linux/idr.h>
26547  #include "registers.h"
26549  #define IDXD_DRIVER_VERSION    "1.00"
26551  extern struct kmem_cache *idxd_desc_pool;
26553 +struct idxd_device;
26554 +struct idxd_wq;
26556  #define IDXD_REG_TIMEOUT       50
26557  #define IDXD_DRAIN_TIMEOUT     5000
26559 @@ -33,6 +37,7 @@ struct idxd_device_driver {
26560  struct idxd_irq_entry {
26561         struct idxd_device *idxd;
26562         int id;
26563 +       int vector;
26564         struct llist_head pending_llist;
26565         struct list_head work_list;
26566         /*
26567 @@ -75,10 +80,10 @@ enum idxd_wq_type {
26568  };
26570  struct idxd_cdev {
26571 +       struct idxd_wq *wq;
26572         struct cdev cdev;
26573 -       struct device *dev;
26574 +       struct device dev;
26575         int minor;
26576 -       struct wait_queue_head err_queue;
26577  };
26579  #define IDXD_ALLOCATED_BATCH_SIZE      128U
26580 @@ -96,10 +101,16 @@ enum idxd_complete_type {
26581         IDXD_COMPLETE_DEV_FAIL,
26582  };
26584 +struct idxd_dma_chan {
26585 +       struct dma_chan chan;
26586 +       struct idxd_wq *wq;
26589  struct idxd_wq {
26590         void __iomem *portal;
26591         struct device conf_dev;
26592 -       struct idxd_cdev idxd_cdev;
26593 +       struct idxd_cdev *idxd_cdev;
26594 +       struct wait_queue_head err_queue;
26595         struct idxd_device *idxd;
26596         int id;
26597         enum idxd_wq_type type;
26598 @@ -125,7 +136,7 @@ struct idxd_wq {
26599         int compls_size;
26600         struct idxd_desc **descs;
26601         struct sbitmap_queue sbq;
26602 -       struct dma_chan dma_chan;
26603 +       struct idxd_dma_chan *idxd_chan;
26604         char name[WQ_NAME_SIZE + 1];
26605         u64 max_xfer_bytes;
26606         u32 max_batch_size;
26607 @@ -162,6 +173,11 @@ enum idxd_device_flag {
26608         IDXD_FLAG_PASID_ENABLED,
26609  };
26611 +struct idxd_dma_dev {
26612 +       struct idxd_device *idxd;
26613 +       struct dma_device dma;
26616  struct idxd_device {
26617         enum idxd_type type;
26618         struct device conf_dev;
26619 @@ -178,9 +194,9 @@ struct idxd_device {
26621         spinlock_t dev_lock;    /* spinlock for device */
26622         struct completion *cmd_done;
26623 -       struct idxd_group *groups;
26624 -       struct idxd_wq *wqs;
26625 -       struct idxd_engine *engines;
26626 +       struct idxd_group **groups;
26627 +       struct idxd_wq **wqs;
26628 +       struct idxd_engine **engines;
26630         struct iommu_sva *sva;
26631         unsigned int pasid;
26632 @@ -206,11 +222,10 @@ struct idxd_device {
26634         union sw_err_reg sw_err;
26635         wait_queue_head_t cmd_waitq;
26636 -       struct msix_entry *msix_entries;
26637         int num_wq_irqs;
26638         struct idxd_irq_entry *irq_entries;
26640 -       struct dma_device dma_dev;
26641 +       struct idxd_dma_dev *idxd_dma;
26642         struct workqueue_struct *wq;
26643         struct work_struct work;
26644  };
26645 @@ -242,6 +257,43 @@ extern struct bus_type dsa_bus_type;
26646  extern struct bus_type iax_bus_type;
26648  extern bool support_enqcmd;
26649 +extern struct device_type dsa_device_type;
26650 +extern struct device_type iax_device_type;
26651 +extern struct device_type idxd_wq_device_type;
26652 +extern struct device_type idxd_engine_device_type;
26653 +extern struct device_type idxd_group_device_type;
26655 +static inline bool is_dsa_dev(struct device *dev)
26657 +       return dev->type == &dsa_device_type;
26660 +static inline bool is_iax_dev(struct device *dev)
26662 +       return dev->type == &iax_device_type;
26665 +static inline bool is_idxd_dev(struct device *dev)
26667 +       return is_dsa_dev(dev) || is_iax_dev(dev);
26670 +static inline bool is_idxd_wq_dev(struct device *dev)
26672 +       return dev->type == &idxd_wq_device_type;
26675 +static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
26677 +       if (wq->type == IDXD_WQT_KERNEL && strcmp(wq->name, "dmaengine") == 0)
26678 +               return true;
26679 +       return false;
26682 +static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
26684 +       return wq->type == IDXD_WQT_USER;
26687  static inline bool wq_dedicated(struct idxd_wq *wq)
26689 @@ -279,18 +331,6 @@ static inline int idxd_get_wq_portal_full_offset(int wq_id,
26690         return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
26693 -static inline void idxd_set_type(struct idxd_device *idxd)
26695 -       struct pci_dev *pdev = idxd->pdev;
26697 -       if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
26698 -               idxd->type = IDXD_TYPE_DSA;
26699 -       else if (pdev->device == PCI_DEVICE_ID_INTEL_IAX_SPR0)
26700 -               idxd->type = IDXD_TYPE_IAX;
26701 -       else
26702 -               idxd->type = IDXD_TYPE_UNKNOWN;
26705  static inline void idxd_wq_get(struct idxd_wq *wq)
26707         wq->client_count++;
26708 @@ -306,14 +346,16 @@ static inline int idxd_wq_refcount(struct idxd_wq *wq)
26709         return wq->client_count;
26710  };
26712 +struct ida *idxd_ida(struct idxd_device *idxd);
26713  const char *idxd_get_dev_name(struct idxd_device *idxd);
26714  int idxd_register_bus_type(void);
26715  void idxd_unregister_bus_type(void);
26716 -int idxd_setup_sysfs(struct idxd_device *idxd);
26717 -void idxd_cleanup_sysfs(struct idxd_device *idxd);
26718 +int idxd_register_devices(struct idxd_device *idxd);
26719 +void idxd_unregister_devices(struct idxd_device *idxd);
26720  int idxd_register_driver(void);
26721  void idxd_unregister_driver(void);
26722  struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
26723 +struct device_type *idxd_get_device_type(struct idxd_device *idxd);
26725  /* device interrupt control */
26726  void idxd_msix_perm_setup(struct idxd_device *idxd);
26727 @@ -363,7 +405,6 @@ void idxd_unregister_dma_channel(struct idxd_wq *wq);
26728  void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
26729  void idxd_dma_complete_txd(struct idxd_desc *desc,
26730                            enum idxd_complete_type comp_type);
26731 -dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx);
26733  /* cdev */
26734  int idxd_cdev_register(void);
26735 diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
26736 index 6584b0ec07d5..07cf7977a045 100644
26737 --- a/drivers/dma/idxd/init.c
26738 +++ b/drivers/dma/idxd/init.c
26739 @@ -34,8 +34,7 @@ MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
26741  bool support_enqcmd;
26743 -static struct idr idxd_idrs[IDXD_TYPE_MAX];
26744 -static DEFINE_MUTEX(idxd_idr_lock);
26745 +static struct ida idxd_idas[IDXD_TYPE_MAX];
26747  static struct pci_device_id idxd_pci_tbl[] = {
26748         /* DSA ver 1.0 platforms */
26749 @@ -52,6 +51,11 @@ static char *idxd_name[] = {
26750         "iax"
26751  };
26753 +struct ida *idxd_ida(struct idxd_device *idxd)
26755 +       return &idxd_idas[idxd->type];
26758  const char *idxd_get_dev_name(struct idxd_device *idxd)
26760         return idxd_name[idxd->type];
26761 @@ -61,7 +65,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
26763         struct pci_dev *pdev = idxd->pdev;
26764         struct device *dev = &pdev->dev;
26765 -       struct msix_entry *msix;
26766         struct idxd_irq_entry *irq_entry;
26767         int i, msixcnt;
26768         int rc = 0;
26769 @@ -69,23 +72,13 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
26770         msixcnt = pci_msix_vec_count(pdev);
26771         if (msixcnt < 0) {
26772                 dev_err(dev, "Not MSI-X interrupt capable.\n");
26773 -               goto err_no_irq;
26774 -       }
26776 -       idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
26777 -                       msixcnt, GFP_KERNEL);
26778 -       if (!idxd->msix_entries) {
26779 -               rc = -ENOMEM;
26780 -               goto err_no_irq;
26781 +               return -ENOSPC;
26782         }
26784 -       for (i = 0; i < msixcnt; i++)
26785 -               idxd->msix_entries[i].entry = i;
26787 -       rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
26788 -       if (rc) {
26789 -               dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
26790 -               goto err_no_irq;
26791 +       rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
26792 +       if (rc != msixcnt) {
26793 +               dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc);
26794 +               return -ENOSPC;
26795         }
26796         dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
26798 @@ -93,119 +86,236 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
26799          * We implement 1 completion list per MSI-X entry except for
26800          * entry 0, which is for errors and others.
26801          */
26802 -       idxd->irq_entries = devm_kcalloc(dev, msixcnt,
26803 -                                        sizeof(struct idxd_irq_entry),
26804 -                                        GFP_KERNEL);
26805 +       idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry),
26806 +                                        GFP_KERNEL, dev_to_node(dev));
26807         if (!idxd->irq_entries) {
26808                 rc = -ENOMEM;
26809 -               goto err_no_irq;
26810 +               goto err_irq_entries;
26811         }
26813         for (i = 0; i < msixcnt; i++) {
26814                 idxd->irq_entries[i].id = i;
26815                 idxd->irq_entries[i].idxd = idxd;
26816 +               idxd->irq_entries[i].vector = pci_irq_vector(pdev, i);
26817                 spin_lock_init(&idxd->irq_entries[i].list_lock);
26818         }
26820 -       msix = &idxd->msix_entries[0];
26821         irq_entry = &idxd->irq_entries[0];
26822 -       rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
26823 -                                      idxd_misc_thread, 0, "idxd-misc",
26824 -                                      irq_entry);
26825 +       rc = request_threaded_irq(irq_entry->vector, idxd_irq_handler, idxd_misc_thread,
26826 +                                 0, "idxd-misc", irq_entry);
26827         if (rc < 0) {
26828                 dev_err(dev, "Failed to allocate misc interrupt.\n");
26829 -               goto err_no_irq;
26830 +               goto err_misc_irq;
26831         }
26833 -       dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
26834 -               msix->vector);
26835 +       dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector);
26837         /* first MSI-X entry is not for wq interrupts */
26838         idxd->num_wq_irqs = msixcnt - 1;
26840         for (i = 1; i < msixcnt; i++) {
26841 -               msix = &idxd->msix_entries[i];
26842                 irq_entry = &idxd->irq_entries[i];
26844                 init_llist_head(&idxd->irq_entries[i].pending_llist);
26845                 INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
26846 -               rc = devm_request_threaded_irq(dev, msix->vector,
26847 -                                              idxd_irq_handler,
26848 -                                              idxd_wq_thread, 0,
26849 -                                              "idxd-portal", irq_entry);
26850 +               rc = request_threaded_irq(irq_entry->vector, idxd_irq_handler,
26851 +                                         idxd_wq_thread, 0, "idxd-portal", irq_entry);
26852                 if (rc < 0) {
26853 -                       dev_err(dev, "Failed to allocate irq %d.\n",
26854 -                               msix->vector);
26855 -                       goto err_no_irq;
26856 +                       dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector);
26857 +                       goto err_wq_irqs;
26858                 }
26859 -               dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
26860 -                       i, msix->vector);
26861 +               dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector);
26862         }
26864         idxd_unmask_error_interrupts(idxd);
26865         idxd_msix_perm_setup(idxd);
26866         return 0;
26868 - err_no_irq:
26869 + err_wq_irqs:
26870 +       while (--i >= 0) {
26871 +               irq_entry = &idxd->irq_entries[i];
26872 +               free_irq(irq_entry->vector, irq_entry);
26873 +       }
26874 + err_misc_irq:
26875         /* Disable error interrupt generation */
26876         idxd_mask_error_interrupts(idxd);
26877 -       pci_disable_msix(pdev);
26878 + err_irq_entries:
26879 +       pci_free_irq_vectors(pdev);
26880         dev_err(dev, "No usable interrupts\n");
26881         return rc;
26884 -static int idxd_setup_internals(struct idxd_device *idxd)
26885 +static int idxd_setup_wqs(struct idxd_device *idxd)
26887         struct device *dev = &idxd->pdev->dev;
26888 -       int i;
26890 -       init_waitqueue_head(&idxd->cmd_waitq);
26891 -       idxd->groups = devm_kcalloc(dev, idxd->max_groups,
26892 -                                   sizeof(struct idxd_group), GFP_KERNEL);
26893 -       if (!idxd->groups)
26894 -               return -ENOMEM;
26896 -       for (i = 0; i < idxd->max_groups; i++) {
26897 -               idxd->groups[i].idxd = idxd;
26898 -               idxd->groups[i].id = i;
26899 -               idxd->groups[i].tc_a = -1;
26900 -               idxd->groups[i].tc_b = -1;
26901 -       }
26902 +       struct idxd_wq *wq;
26903 +       int i, rc;
26905 -       idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
26906 -                                GFP_KERNEL);
26907 +       idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
26908 +                                GFP_KERNEL, dev_to_node(dev));
26909         if (!idxd->wqs)
26910                 return -ENOMEM;
26912 -       idxd->engines = devm_kcalloc(dev, idxd->max_engines,
26913 -                                    sizeof(struct idxd_engine), GFP_KERNEL);
26914 -       if (!idxd->engines)
26915 -               return -ENOMEM;
26917         for (i = 0; i < idxd->max_wqs; i++) {
26918 -               struct idxd_wq *wq = &idxd->wqs[i];
26919 +               wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
26920 +               if (!wq) {
26921 +                       rc = -ENOMEM;
26922 +                       goto err;
26923 +               }
26925                 wq->id = i;
26926                 wq->idxd = idxd;
26927 +               device_initialize(&wq->conf_dev);
26928 +               wq->conf_dev.parent = &idxd->conf_dev;
26929 +               wq->conf_dev.bus = idxd_get_bus_type(idxd);
26930 +               wq->conf_dev.type = &idxd_wq_device_type;
26931 +               rc = dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
26932 +               if (rc < 0) {
26933 +                       put_device(&wq->conf_dev);
26934 +                       goto err;
26935 +               }
26937                 mutex_init(&wq->wq_lock);
26938 -               wq->idxd_cdev.minor = -1;
26939 +               init_waitqueue_head(&wq->err_queue);
26940                 wq->max_xfer_bytes = idxd->max_xfer_bytes;
26941                 wq->max_batch_size = idxd->max_batch_size;
26942 -               wq->wqcfg = devm_kzalloc(dev, idxd->wqcfg_size, GFP_KERNEL);
26943 -               if (!wq->wqcfg)
26944 -                       return -ENOMEM;
26945 +               wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
26946 +               if (!wq->wqcfg) {
26947 +                       put_device(&wq->conf_dev);
26948 +                       rc = -ENOMEM;
26949 +                       goto err;
26950 +               }
26951 +               idxd->wqs[i] = wq;
26952         }
26954 +       return 0;
26956 + err:
26957 +       while (--i >= 0)
26958 +               put_device(&idxd->wqs[i]->conf_dev);
26959 +       return rc;
26962 +static int idxd_setup_engines(struct idxd_device *idxd)
26964 +       struct idxd_engine *engine;
26965 +       struct device *dev = &idxd->pdev->dev;
26966 +       int i, rc;
26968 +       idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
26969 +                                    GFP_KERNEL, dev_to_node(dev));
26970 +       if (!idxd->engines)
26971 +               return -ENOMEM;
26973         for (i = 0; i < idxd->max_engines; i++) {
26974 -               idxd->engines[i].idxd = idxd;
26975 -               idxd->engines[i].id = i;
26976 +               engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev));
26977 +               if (!engine) {
26978 +                       rc = -ENOMEM;
26979 +                       goto err;
26980 +               }
26982 +               engine->id = i;
26983 +               engine->idxd = idxd;
26984 +               device_initialize(&engine->conf_dev);
26985 +               engine->conf_dev.parent = &idxd->conf_dev;
26986 +               engine->conf_dev.type = &idxd_engine_device_type;
26987 +               rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
26988 +               if (rc < 0) {
26989 +                       put_device(&engine->conf_dev);
26990 +                       goto err;
26991 +               }
26993 +               idxd->engines[i] = engine;
26994         }
26996 -       idxd->wq = create_workqueue(dev_name(dev));
26997 -       if (!idxd->wq)
26998 +       return 0;
27000 + err:
27001 +       while (--i >= 0)
27002 +               put_device(&idxd->engines[i]->conf_dev);
27003 +       return rc;
27006 +static int idxd_setup_groups(struct idxd_device *idxd)
27008 +       struct device *dev = &idxd->pdev->dev;
27009 +       struct idxd_group *group;
27010 +       int i, rc;
27012 +       idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *),
27013 +                                   GFP_KERNEL, dev_to_node(dev));
27014 +       if (!idxd->groups)
27015                 return -ENOMEM;
27017 +       for (i = 0; i < idxd->max_groups; i++) {
27018 +               group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev));
27019 +               if (!group) {
27020 +                       rc = -ENOMEM;
27021 +                       goto err;
27022 +               }
27024 +               group->id = i;
27025 +               group->idxd = idxd;
27026 +               device_initialize(&group->conf_dev);
27027 +               group->conf_dev.parent = &idxd->conf_dev;
27028 +               group->conf_dev.bus = idxd_get_bus_type(idxd);
27029 +               group->conf_dev.type = &idxd_group_device_type;
27030 +               rc = dev_set_name(&group->conf_dev, "group%d.%d", idxd->id, group->id);
27031 +               if (rc < 0) {
27032 +                       put_device(&group->conf_dev);
27033 +                       goto err;
27034 +               }
27036 +               idxd->groups[i] = group;
27037 +               group->tc_a = -1;
27038 +               group->tc_b = -1;
27039 +       }
27041 +       return 0;
27043 + err:
27044 +       while (--i >= 0)
27045 +               put_device(&idxd->groups[i]->conf_dev);
27046 +       return rc;
27049 +static int idxd_setup_internals(struct idxd_device *idxd)
27051 +       struct device *dev = &idxd->pdev->dev;
27052 +       int rc, i;
27054 +       init_waitqueue_head(&idxd->cmd_waitq);
27056 +       rc = idxd_setup_wqs(idxd);
27057 +       if (rc < 0)
27058 +               return rc;
27060 +       rc = idxd_setup_engines(idxd);
27061 +       if (rc < 0)
27062 +               goto err_engine;
27064 +       rc = idxd_setup_groups(idxd);
27065 +       if (rc < 0)
27066 +               goto err_group;
27068 +       idxd->wq = create_workqueue(dev_name(dev));
27069 +       if (!idxd->wq) {
27070 +               rc = -ENOMEM;
27071 +               goto err_wkq_create;
27072 +       }
27074         return 0;
27076 + err_wkq_create:
27077 +       for (i = 0; i < idxd->max_groups; i++)
27078 +               put_device(&idxd->groups[i]->conf_dev);
27079 + err_group:
27080 +       for (i = 0; i < idxd->max_engines; i++)
27081 +               put_device(&idxd->engines[i]->conf_dev);
27082 + err_engine:
27083 +       for (i = 0; i < idxd->max_wqs; i++)
27084 +               put_device(&idxd->wqs[i]->conf_dev);
27085 +       return rc;
27088  static void idxd_read_table_offsets(struct idxd_device *idxd)
27089 @@ -275,16 +385,44 @@ static void idxd_read_caps(struct idxd_device *idxd)
27090         }
27093 +static inline void idxd_set_type(struct idxd_device *idxd)
27095 +       struct pci_dev *pdev = idxd->pdev;
27097 +       if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
27098 +               idxd->type = IDXD_TYPE_DSA;
27099 +       else if (pdev->device == PCI_DEVICE_ID_INTEL_IAX_SPR0)
27100 +               idxd->type = IDXD_TYPE_IAX;
27101 +       else
27102 +               idxd->type = IDXD_TYPE_UNKNOWN;
27105  static struct idxd_device *idxd_alloc(struct pci_dev *pdev)
27107         struct device *dev = &pdev->dev;
27108         struct idxd_device *idxd;
27109 +       int rc;
27111 -       idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
27112 +       idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev));
27113         if (!idxd)
27114                 return NULL;
27116         idxd->pdev = pdev;
27117 +       idxd_set_type(idxd);
27118 +       idxd->id = ida_alloc(idxd_ida(idxd), GFP_KERNEL);
27119 +       if (idxd->id < 0)
27120 +               return NULL;
27122 +       device_initialize(&idxd->conf_dev);
27123 +       idxd->conf_dev.parent = dev;
27124 +       idxd->conf_dev.bus = idxd_get_bus_type(idxd);
27125 +       idxd->conf_dev.type = idxd_get_device_type(idxd);
27126 +       rc = dev_set_name(&idxd->conf_dev, "%s%d", idxd_get_dev_name(idxd), idxd->id);
27127 +       if (rc < 0) {
27128 +               put_device(&idxd->conf_dev);
27129 +               return NULL;
27130 +       }
27132         spin_lock_init(&idxd->dev_lock);
27134         return idxd;
27135 @@ -352,31 +490,20 @@ static int idxd_probe(struct idxd_device *idxd)
27137         rc = idxd_setup_internals(idxd);
27138         if (rc)
27139 -               goto err_setup;
27140 +               goto err;
27142         rc = idxd_setup_interrupts(idxd);
27143         if (rc)
27144 -               goto err_setup;
27145 +               goto err;
27147         dev_dbg(dev, "IDXD interrupt setup complete.\n");
27149 -       mutex_lock(&idxd_idr_lock);
27150 -       idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL);
27151 -       mutex_unlock(&idxd_idr_lock);
27152 -       if (idxd->id < 0) {
27153 -               rc = -ENOMEM;
27154 -               goto err_idr_fail;
27155 -       }
27157         idxd->major = idxd_cdev_get_major(idxd);
27159         dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
27160         return 0;
27162 - err_idr_fail:
27163 -       idxd_mask_error_interrupts(idxd);
27164 -       idxd_mask_msix_vectors(idxd);
27165 - err_setup:
27166 + err:
27167         if (device_pasid_enabled(idxd))
27168                 idxd_disable_system_pasid(idxd);
27169         return rc;
27170 @@ -396,34 +523,37 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
27171         struct idxd_device *idxd;
27172         int rc;
27174 -       rc = pcim_enable_device(pdev);
27175 +       rc = pci_enable_device(pdev);
27176         if (rc)
27177                 return rc;
27179         dev_dbg(dev, "Alloc IDXD context\n");
27180         idxd = idxd_alloc(pdev);
27181 -       if (!idxd)
27182 -               return -ENOMEM;
27183 +       if (!idxd) {
27184 +               rc = -ENOMEM;
27185 +               goto err_idxd_alloc;
27186 +       }
27188         dev_dbg(dev, "Mapping BARs\n");
27189 -       idxd->reg_base = pcim_iomap(pdev, IDXD_MMIO_BAR, 0);
27190 -       if (!idxd->reg_base)
27191 -               return -ENOMEM;
27192 +       idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
27193 +       if (!idxd->reg_base) {
27194 +               rc = -ENOMEM;
27195 +               goto err_iomap;
27196 +       }
27198         dev_dbg(dev, "Set DMA masks\n");
27199         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
27200         if (rc)
27201                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
27202         if (rc)
27203 -               return rc;
27204 +               goto err;
27206         rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
27207         if (rc)
27208                 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
27209         if (rc)
27210 -               return rc;
27211 +               goto err;
27213 -       idxd_set_type(idxd);
27215         idxd_type_init(idxd);
27217 @@ -435,13 +565,13 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
27218         rc = idxd_probe(idxd);
27219         if (rc) {
27220                 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
27221 -               return -ENODEV;
27222 +               goto err;
27223         }
27225 -       rc = idxd_setup_sysfs(idxd);
27226 +       rc = idxd_register_devices(idxd);
27227         if (rc) {
27228                 dev_err(dev, "IDXD sysfs setup failed\n");
27229 -               return -ENODEV;
27230 +               goto err;
27231         }
27233         idxd->state = IDXD_DEV_CONF_READY;
27234 @@ -450,6 +580,14 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
27235                  idxd->hw.version);
27237         return 0;
27239 + err:
27240 +       pci_iounmap(pdev, idxd->reg_base);
27241 + err_iomap:
27242 +       put_device(&idxd->conf_dev);
27243 + err_idxd_alloc:
27244 +       pci_disable_device(pdev);
27245 +       return rc;
27248  static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
27249 @@ -495,7 +633,8 @@ static void idxd_shutdown(struct pci_dev *pdev)
27251         for (i = 0; i < msixcnt; i++) {
27252                 irq_entry = &idxd->irq_entries[i];
27253 -               synchronize_irq(idxd->msix_entries[i].vector);
27254 +               synchronize_irq(irq_entry->vector);
27255 +               free_irq(irq_entry->vector, irq_entry);
27256                 if (i == 0)
27257                         continue;
27258                 idxd_flush_pending_llist(irq_entry);
27259 @@ -503,6 +642,9 @@ static void idxd_shutdown(struct pci_dev *pdev)
27260         }
27262         idxd_msix_perm_clear(idxd);
27263 +       pci_free_irq_vectors(pdev);
27264 +       pci_iounmap(pdev, idxd->reg_base);
27265 +       pci_disable_device(pdev);
27266         destroy_workqueue(idxd->wq);
27269 @@ -511,13 +653,10 @@ static void idxd_remove(struct pci_dev *pdev)
27270         struct idxd_device *idxd = pci_get_drvdata(pdev);
27272         dev_dbg(&pdev->dev, "%s called\n", __func__);
27273 -       idxd_cleanup_sysfs(idxd);
27274         idxd_shutdown(pdev);
27275         if (device_pasid_enabled(idxd))
27276                 idxd_disable_system_pasid(idxd);
27277 -       mutex_lock(&idxd_idr_lock);
27278 -       idr_remove(&idxd_idrs[idxd->type], idxd->id);
27279 -       mutex_unlock(&idxd_idr_lock);
27280 +       idxd_unregister_devices(idxd);
27283  static struct pci_driver idxd_pci_driver = {
27284 @@ -547,7 +686,7 @@ static int __init idxd_init_module(void)
27285                 support_enqcmd = true;
27287         for (i = 0; i < IDXD_TYPE_MAX; i++)
27288 -               idr_init(&idxd_idrs[i]);
27289 +               ida_init(&idxd_idas[i]);
27291         err = idxd_register_bus_type();
27292         if (err < 0)
27293 diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
27294 index f1463fc58112..fc0781e3f36d 100644
27295 --- a/drivers/dma/idxd/irq.c
27296 +++ b/drivers/dma/idxd/irq.c
27297 @@ -45,7 +45,7 @@ static void idxd_device_reinit(struct work_struct *work)
27298                 goto out;
27300         for (i = 0; i < idxd->max_wqs; i++) {
27301 -               struct idxd_wq *wq = &idxd->wqs[i];
27302 +               struct idxd_wq *wq = idxd->wqs[i];
27304                 if (wq->state == IDXD_WQ_ENABLED) {
27305                         rc = idxd_wq_enable(wq);
27306 @@ -130,18 +130,18 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
27308                 if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
27309                         int id = idxd->sw_err.wq_idx;
27310 -                       struct idxd_wq *wq = &idxd->wqs[id];
27311 +                       struct idxd_wq *wq = idxd->wqs[id];
27313                         if (wq->type == IDXD_WQT_USER)
27314 -                               wake_up_interruptible(&wq->idxd_cdev.err_queue);
27315 +                               wake_up_interruptible(&wq->err_queue);
27316                 } else {
27317                         int i;
27319                         for (i = 0; i < idxd->max_wqs; i++) {
27320 -                               struct idxd_wq *wq = &idxd->wqs[i];
27321 +                               struct idxd_wq *wq = idxd->wqs[i];
27323                                 if (wq->type == IDXD_WQT_USER)
27324 -                                       wake_up_interruptible(&wq->idxd_cdev.err_queue);
27325 +                                       wake_up_interruptible(&wq->err_queue);
27326                         }
27327                 }
27329 diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
27330 index 18bf4d148989..9586b55abce5 100644
27331 --- a/drivers/dma/idxd/sysfs.c
27332 +++ b/drivers/dma/idxd/sysfs.c
27333 @@ -16,69 +16,6 @@ static char *idxd_wq_type_names[] = {
27334         [IDXD_WQT_USER]         = "user",
27335  };
27337 -static void idxd_conf_device_release(struct device *dev)
27339 -       dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
27342 -static struct device_type idxd_group_device_type = {
27343 -       .name = "group",
27344 -       .release = idxd_conf_device_release,
27347 -static struct device_type idxd_wq_device_type = {
27348 -       .name = "wq",
27349 -       .release = idxd_conf_device_release,
27352 -static struct device_type idxd_engine_device_type = {
27353 -       .name = "engine",
27354 -       .release = idxd_conf_device_release,
27357 -static struct device_type dsa_device_type = {
27358 -       .name = "dsa",
27359 -       .release = idxd_conf_device_release,
27362 -static struct device_type iax_device_type = {
27363 -       .name = "iax",
27364 -       .release = idxd_conf_device_release,
27367 -static inline bool is_dsa_dev(struct device *dev)
27369 -       return dev ? dev->type == &dsa_device_type : false;
27372 -static inline bool is_iax_dev(struct device *dev)
27374 -       return dev ? dev->type == &iax_device_type : false;
27377 -static inline bool is_idxd_dev(struct device *dev)
27379 -       return is_dsa_dev(dev) || is_iax_dev(dev);
27382 -static inline bool is_idxd_wq_dev(struct device *dev)
27384 -       return dev ? dev->type == &idxd_wq_device_type : false;
27387 -static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
27389 -       if (wq->type == IDXD_WQT_KERNEL &&
27390 -           strcmp(wq->name, "dmaengine") == 0)
27391 -               return true;
27392 -       return false;
27395 -static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
27397 -       return wq->type == IDXD_WQT_USER;
27400  static int idxd_config_bus_match(struct device *dev,
27401                                  struct device_driver *drv)
27403 @@ -322,7 +259,7 @@ static int idxd_config_bus_remove(struct device *dev)
27404                 dev_dbg(dev, "%s removing dev %s\n", __func__,
27405                         dev_name(&idxd->conf_dev));
27406                 for (i = 0; i < idxd->max_wqs; i++) {
27407 -                       struct idxd_wq *wq = &idxd->wqs[i];
27408 +                       struct idxd_wq *wq = idxd->wqs[i];
27410                         if (wq->state == IDXD_WQ_DISABLED)
27411                                 continue;
27412 @@ -334,7 +271,7 @@ static int idxd_config_bus_remove(struct device *dev)
27413                 idxd_unregister_dma_device(idxd);
27414                 rc = idxd_device_disable(idxd);
27415                 for (i = 0; i < idxd->max_wqs; i++) {
27416 -                       struct idxd_wq *wq = &idxd->wqs[i];
27417 +                       struct idxd_wq *wq = idxd->wqs[i];
27419                         mutex_lock(&wq->wq_lock);
27420                         idxd_wq_disable_cleanup(wq);
27421 @@ -405,7 +342,7 @@ struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
27422         return idxd_bus_types[idxd->type];
27425 -static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
27426 +struct device_type *idxd_get_device_type(struct idxd_device *idxd)
27428         if (idxd->type == IDXD_TYPE_DSA)
27429                 return &dsa_device_type;
27430 @@ -488,7 +425,7 @@ static ssize_t engine_group_id_store(struct device *dev,
27432         if (prevg)
27433                 prevg->num_engines--;
27434 -       engine->group = &idxd->groups[id];
27435 +       engine->group = idxd->groups[id];
27436         engine->group->num_engines++;
27438         return count;
27439 @@ -512,6 +449,19 @@ static const struct attribute_group *idxd_engine_attribute_groups[] = {
27440         NULL,
27441  };
27443 +static void idxd_conf_engine_release(struct device *dev)
27445 +       struct idxd_engine *engine = container_of(dev, struct idxd_engine, conf_dev);
27447 +       kfree(engine);
27450 +struct device_type idxd_engine_device_type = {
27451 +       .name = "engine",
27452 +       .release = idxd_conf_engine_release,
27453 +       .groups = idxd_engine_attribute_groups,
27456  /* Group attributes */
27458  static void idxd_set_free_tokens(struct idxd_device *idxd)
27459 @@ -519,7 +469,7 @@ static void idxd_set_free_tokens(struct idxd_device *idxd)
27460         int i, tokens;
27462         for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
27463 -               struct idxd_group *g = &idxd->groups[i];
27464 +               struct idxd_group *g = idxd->groups[i];
27466                 tokens += g->tokens_reserved;
27467         }
27468 @@ -674,7 +624,7 @@ static ssize_t group_engines_show(struct device *dev,
27469         struct idxd_device *idxd = group->idxd;
27471         for (i = 0; i < idxd->max_engines; i++) {
27472 -               struct idxd_engine *engine = &idxd->engines[i];
27473 +               struct idxd_engine *engine = idxd->engines[i];
27475                 if (!engine->group)
27476                         continue;
27477 @@ -703,7 +653,7 @@ static ssize_t group_work_queues_show(struct device *dev,
27478         struct idxd_device *idxd = group->idxd;
27480         for (i = 0; i < idxd->max_wqs; i++) {
27481 -               struct idxd_wq *wq = &idxd->wqs[i];
27482 +               struct idxd_wq *wq = idxd->wqs[i];
27484                 if (!wq->group)
27485                         continue;
27486 @@ -824,6 +774,19 @@ static const struct attribute_group *idxd_group_attribute_groups[] = {
27487         NULL,
27488  };
27490 +static void idxd_conf_group_release(struct device *dev)
27492 +       struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev);
27494 +       kfree(group);
27497 +struct device_type idxd_group_device_type = {
27498 +       .name = "group",
27499 +       .release = idxd_conf_group_release,
27500 +       .groups = idxd_group_attribute_groups,
27503  /* IDXD work queue attribs */
27504  static ssize_t wq_clients_show(struct device *dev,
27505                                struct device_attribute *attr, char *buf)
27506 @@ -896,7 +859,7 @@ static ssize_t wq_group_id_store(struct device *dev,
27507                 return count;
27508         }
27510 -       group = &idxd->groups[id];
27511 +       group = idxd->groups[id];
27512         prevg = wq->group;
27514         if (prevg)
27515 @@ -960,7 +923,7 @@ static int total_claimed_wq_size(struct idxd_device *idxd)
27516         int wq_size = 0;
27518         for (i = 0; i < idxd->max_wqs; i++) {
27519 -               struct idxd_wq *wq = &idxd->wqs[i];
27520 +               struct idxd_wq *wq = idxd->wqs[i];
27522                 wq_size += wq->size;
27523         }
27524 @@ -1206,8 +1169,16 @@ static ssize_t wq_cdev_minor_show(struct device *dev,
27525                                   struct device_attribute *attr, char *buf)
27527         struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
27528 +       int minor = -1;
27530 -       return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
27531 +       mutex_lock(&wq->wq_lock);
27532 +       if (wq->idxd_cdev)
27533 +               minor = wq->idxd_cdev->minor;
27534 +       mutex_unlock(&wq->wq_lock);
27536 +       if (minor == -1)
27537 +               return -ENXIO;
27538 +       return sysfs_emit(buf, "%d\n", minor);
27541  static struct device_attribute dev_attr_wq_cdev_minor =
27542 @@ -1356,6 +1327,20 @@ static const struct attribute_group *idxd_wq_attribute_groups[] = {
27543         NULL,
27544  };
27546 +static void idxd_conf_wq_release(struct device *dev)
27548 +       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
27550 +       kfree(wq->wqcfg);
27551 +       kfree(wq);
27554 +struct device_type idxd_wq_device_type = {
27555 +       .name = "wq",
27556 +       .release = idxd_conf_wq_release,
27557 +       .groups = idxd_wq_attribute_groups,
27560  /* IDXD device attribs */
27561  static ssize_t version_show(struct device *dev, struct device_attribute *attr,
27562                             char *buf)
27563 @@ -1486,7 +1471,7 @@ static ssize_t clients_show(struct device *dev,
27565         spin_lock_irqsave(&idxd->dev_lock, flags);
27566         for (i = 0; i < idxd->max_wqs; i++) {
27567 -               struct idxd_wq *wq = &idxd->wqs[i];
27568 +               struct idxd_wq *wq = idxd->wqs[i];
27570                 count += wq->client_count;
27571         }
27572 @@ -1644,183 +1629,160 @@ static const struct attribute_group *idxd_attribute_groups[] = {
27573         NULL,
27574  };
27576 -static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
27577 +static void idxd_conf_device_release(struct device *dev)
27579 -       struct device *dev = &idxd->pdev->dev;
27580 -       int i, rc;
27581 +       struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
27583 +       kfree(idxd->groups);
27584 +       kfree(idxd->wqs);
27585 +       kfree(idxd->engines);
27586 +       kfree(idxd->irq_entries);
27587 +       ida_free(idxd_ida(idxd), idxd->id);
27588 +       kfree(idxd);
27591 +struct device_type dsa_device_type = {
27592 +       .name = "dsa",
27593 +       .release = idxd_conf_device_release,
27594 +       .groups = idxd_attribute_groups,
27597 +struct device_type iax_device_type = {
27598 +       .name = "iax",
27599 +       .release = idxd_conf_device_release,
27600 +       .groups = idxd_attribute_groups,
27603 +static int idxd_register_engine_devices(struct idxd_device *idxd)
27605 +       int i, j, rc;
27607         for (i = 0; i < idxd->max_engines; i++) {
27608 -               struct idxd_engine *engine = &idxd->engines[i];
27610 -               engine->conf_dev.parent = &idxd->conf_dev;
27611 -               dev_set_name(&engine->conf_dev, "engine%d.%d",
27612 -                            idxd->id, engine->id);
27613 -               engine->conf_dev.bus = idxd_get_bus_type(idxd);
27614 -               engine->conf_dev.groups = idxd_engine_attribute_groups;
27615 -               engine->conf_dev.type = &idxd_engine_device_type;
27616 -               dev_dbg(dev, "Engine device register: %s\n",
27617 -                       dev_name(&engine->conf_dev));
27618 -               rc = device_register(&engine->conf_dev);
27619 -               if (rc < 0) {
27620 -                       put_device(&engine->conf_dev);
27621 +               struct idxd_engine *engine = idxd->engines[i];
27623 +               rc = device_add(&engine->conf_dev);
27624 +               if (rc < 0)
27625                         goto cleanup;
27626 -               }
27627         }
27629         return 0;
27631  cleanup:
27632 -       while (i--) {
27633 -               struct idxd_engine *engine = &idxd->engines[i];
27634 +       j = i - 1;
27635 +       for (; i < idxd->max_engines; i++)
27636 +               put_device(&idxd->engines[i]->conf_dev);
27638 -               device_unregister(&engine->conf_dev);
27639 -       }
27640 +       while (j--)
27641 +               device_unregister(&idxd->engines[j]->conf_dev);
27642         return rc;
27645 -static int idxd_setup_group_sysfs(struct idxd_device *idxd)
27646 +static int idxd_register_group_devices(struct idxd_device *idxd)
27648 -       struct device *dev = &idxd->pdev->dev;
27649 -       int i, rc;
27650 +       int i, j, rc;
27652         for (i = 0; i < idxd->max_groups; i++) {
27653 -               struct idxd_group *group = &idxd->groups[i];
27655 -               group->conf_dev.parent = &idxd->conf_dev;
27656 -               dev_set_name(&group->conf_dev, "group%d.%d",
27657 -                            idxd->id, group->id);
27658 -               group->conf_dev.bus = idxd_get_bus_type(idxd);
27659 -               group->conf_dev.groups = idxd_group_attribute_groups;
27660 -               group->conf_dev.type = &idxd_group_device_type;
27661 -               dev_dbg(dev, "Group device register: %s\n",
27662 -                       dev_name(&group->conf_dev));
27663 -               rc = device_register(&group->conf_dev);
27664 -               if (rc < 0) {
27665 -                       put_device(&group->conf_dev);
27666 +               struct idxd_group *group = idxd->groups[i];
27668 +               rc = device_add(&group->conf_dev);
27669 +               if (rc < 0)
27670                         goto cleanup;
27671 -               }
27672         }
27674         return 0;
27676  cleanup:
27677 -       while (i--) {
27678 -               struct idxd_group *group = &idxd->groups[i];
27679 +       j = i - 1;
27680 +       for (; i < idxd->max_groups; i++)
27681 +               put_device(&idxd->groups[i]->conf_dev);
27683 -               device_unregister(&group->conf_dev);
27684 -       }
27685 +       while (j--)
27686 +               device_unregister(&idxd->groups[j]->conf_dev);
27687         return rc;
27690 -static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
27691 +static int idxd_register_wq_devices(struct idxd_device *idxd)
27693 -       struct device *dev = &idxd->pdev->dev;
27694 -       int i, rc;
27695 +       int i, rc, j;
27697         for (i = 0; i < idxd->max_wqs; i++) {
27698 -               struct idxd_wq *wq = &idxd->wqs[i];
27700 -               wq->conf_dev.parent = &idxd->conf_dev;
27701 -               dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
27702 -               wq->conf_dev.bus = idxd_get_bus_type(idxd);
27703 -               wq->conf_dev.groups = idxd_wq_attribute_groups;
27704 -               wq->conf_dev.type = &idxd_wq_device_type;
27705 -               dev_dbg(dev, "WQ device register: %s\n",
27706 -                       dev_name(&wq->conf_dev));
27707 -               rc = device_register(&wq->conf_dev);
27708 -               if (rc < 0) {
27709 -                       put_device(&wq->conf_dev);
27710 +               struct idxd_wq *wq = idxd->wqs[i];
27712 +               rc = device_add(&wq->conf_dev);
27713 +               if (rc < 0)
27714                         goto cleanup;
27715 -               }
27716         }
27718         return 0;
27720  cleanup:
27721 -       while (i--) {
27722 -               struct idxd_wq *wq = &idxd->wqs[i];
27723 +       j = i - 1;
27724 +       for (; i < idxd->max_wqs; i++)
27725 +               put_device(&idxd->wqs[i]->conf_dev);
27727 -               device_unregister(&wq->conf_dev);
27728 -       }
27729 +       while (j--)
27730 +               device_unregister(&idxd->wqs[j]->conf_dev);
27731         return rc;
27734 -static int idxd_setup_device_sysfs(struct idxd_device *idxd)
27735 +int idxd_register_devices(struct idxd_device *idxd)
27737         struct device *dev = &idxd->pdev->dev;
27738 -       int rc;
27739 -       char devname[IDXD_NAME_SIZE];
27741 -       sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
27742 -       idxd->conf_dev.parent = dev;
27743 -       dev_set_name(&idxd->conf_dev, "%s", devname);
27744 -       idxd->conf_dev.bus = idxd_get_bus_type(idxd);
27745 -       idxd->conf_dev.groups = idxd_attribute_groups;
27746 -       idxd->conf_dev.type = idxd_get_device_type(idxd);
27748 -       dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
27749 -       rc = device_register(&idxd->conf_dev);
27750 -       if (rc < 0) {
27751 -               put_device(&idxd->conf_dev);
27752 -               return rc;
27753 -       }
27754 +       int rc, i;
27756 -       return 0;
27759 -int idxd_setup_sysfs(struct idxd_device *idxd)
27761 -       struct device *dev = &idxd->pdev->dev;
27762 -       int rc;
27764 -       rc = idxd_setup_device_sysfs(idxd);
27765 -       if (rc < 0) {
27766 -               dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
27767 +       rc = device_add(&idxd->conf_dev);
27768 +       if (rc < 0)
27769                 return rc;
27770 -       }
27772 -       rc = idxd_setup_wq_sysfs(idxd);
27773 +       rc = idxd_register_wq_devices(idxd);
27774         if (rc < 0) {
27775 -               /* unregister conf dev */
27776 -               dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
27777 -               return rc;
27778 +               dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
27779 +               goto err_wq;
27780         }
27782 -       rc = idxd_setup_group_sysfs(idxd);
27783 +       rc = idxd_register_engine_devices(idxd);
27784         if (rc < 0) {
27785 -               /* unregister conf dev */
27786 -               dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
27787 -               return rc;
27788 +               dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
27789 +               goto err_engine;
27790         }
27792 -       rc = idxd_setup_engine_sysfs(idxd);
27793 +       rc = idxd_register_group_devices(idxd);
27794         if (rc < 0) {
27795 -               /* unregister conf dev */
27796 -               dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
27797 -               return rc;
27798 +               dev_dbg(dev, "Group device registering failed: %d\n", rc);
27799 +               goto err_group;
27800         }
27802         return 0;
27804 + err_group:
27805 +       for (i = 0; i < idxd->max_engines; i++)
27806 +               device_unregister(&idxd->engines[i]->conf_dev);
27807 + err_engine:
27808 +       for (i = 0; i < idxd->max_wqs; i++)
27809 +               device_unregister(&idxd->wqs[i]->conf_dev);
27810 + err_wq:
27811 +       device_del(&idxd->conf_dev);
27812 +       return rc;
27815 -void idxd_cleanup_sysfs(struct idxd_device *idxd)
27816 +void idxd_unregister_devices(struct idxd_device *idxd)
27818         int i;
27820         for (i = 0; i < idxd->max_wqs; i++) {
27821 -               struct idxd_wq *wq = &idxd->wqs[i];
27822 +               struct idxd_wq *wq = idxd->wqs[i];
27824                 device_unregister(&wq->conf_dev);
27825         }
27827         for (i = 0; i < idxd->max_engines; i++) {
27828 -               struct idxd_engine *engine = &idxd->engines[i];
27829 +               struct idxd_engine *engine = idxd->engines[i];
27831                 device_unregister(&engine->conf_dev);
27832         }
27834         for (i = 0; i < idxd->max_groups; i++) {
27835 -               struct idxd_group *group = &idxd->groups[i];
27836 +               struct idxd_group *group = idxd->groups[i];
27838                 device_unregister(&group->conf_dev);
27839         }
27840 diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
27841 index aae82db542a5..76aacbac5869 100644
27842 --- a/drivers/extcon/extcon-arizona.c
27843 +++ b/drivers/extcon/extcon-arizona.c
27844 @@ -601,7 +601,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
27845         struct arizona *arizona = info->arizona;
27846         int id_gpio = arizona->pdata.hpdet_id_gpio;
27847         unsigned int report = EXTCON_JACK_HEADPHONE;
27848 -       int ret, reading;
27849 +       int ret, reading, state;
27850         bool mic = false;
27852         mutex_lock(&info->lock);
27853 @@ -614,12 +614,11 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
27854         }
27856         /* If the cable was removed while measuring ignore the result */
27857 -       ret = extcon_get_state(info->edev, EXTCON_MECHANICAL);
27858 -       if (ret < 0) {
27859 -               dev_err(arizona->dev, "Failed to check cable state: %d\n",
27860 -                       ret);
27861 +       state = extcon_get_state(info->edev, EXTCON_MECHANICAL);
27862 +       if (state < 0) {
27863 +               dev_err(arizona->dev, "Failed to check cable state: %d\n", state);
27864                 goto out;
27865 -       } else if (!ret) {
27866 +       } else if (!state) {
27867                 dev_dbg(arizona->dev, "Ignoring HPDET for removed cable\n");
27868                 goto done;
27869         }
27870 @@ -667,7 +666,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
27871                 gpio_set_value_cansleep(id_gpio, 0);
27873         /* If we have a mic then reenable MICDET */
27874 -       if (mic || info->mic)
27875 +       if (state && (mic || info->mic))
27876                 arizona_start_mic(info);
27878         if (info->hpdet_active) {
27879 @@ -675,7 +674,9 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
27880                 info->hpdet_active = false;
27881         }
27883 -       info->hpdet_done = true;
27884 +       /* Do not set hp_det done when the cable has been unplugged */
27885 +       if (state)
27886 +               info->hpdet_done = true;
27888  out:
27889         mutex_unlock(&info->lock);
27890 @@ -1759,25 +1760,6 @@ static int arizona_extcon_remove(struct platform_device *pdev)
27891         bool change;
27892         int ret;
27894 -       ret = regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
27895 -                                      ARIZONA_MICD_ENA, 0,
27896 -                                      &change);
27897 -       if (ret < 0) {
27898 -               dev_err(&pdev->dev, "Failed to disable micd on remove: %d\n",
27899 -                       ret);
27900 -       } else if (change) {
27901 -               regulator_disable(info->micvdd);
27902 -               pm_runtime_put(info->dev);
27903 -       }
27905 -       gpiod_put(info->micd_pol_gpio);
27907 -       pm_runtime_disable(&pdev->dev);
27909 -       regmap_update_bits(arizona->regmap,
27910 -                          ARIZONA_MICD_CLAMP_CONTROL,
27911 -                          ARIZONA_MICD_CLAMP_MODE_MASK, 0);
27913         if (info->micd_clamp) {
27914                 jack_irq_rise = ARIZONA_IRQ_MICD_CLAMP_RISE;
27915                 jack_irq_fall = ARIZONA_IRQ_MICD_CLAMP_FALL;
27916 @@ -1793,10 +1775,31 @@ static int arizona_extcon_remove(struct platform_device *pdev)
27917         arizona_free_irq(arizona, jack_irq_rise, info);
27918         arizona_free_irq(arizona, jack_irq_fall, info);
27919         cancel_delayed_work_sync(&info->hpdet_work);
27920 +       cancel_delayed_work_sync(&info->micd_detect_work);
27921 +       cancel_delayed_work_sync(&info->micd_timeout_work);
27923 +       ret = regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
27924 +                                      ARIZONA_MICD_ENA, 0,
27925 +                                      &change);
27926 +       if (ret < 0) {
27927 +               dev_err(&pdev->dev, "Failed to disable micd on remove: %d\n",
27928 +                       ret);
27929 +       } else if (change) {
27930 +               regulator_disable(info->micvdd);
27931 +               pm_runtime_put(info->dev);
27932 +       }
27934 +       regmap_update_bits(arizona->regmap,
27935 +                          ARIZONA_MICD_CLAMP_CONTROL,
27936 +                          ARIZONA_MICD_CLAMP_MODE_MASK, 0);
27937         regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_ANALOGUE,
27938                            ARIZONA_JD1_ENA, 0);
27939         arizona_clk32k_disable(arizona);
27941 +       gpiod_put(info->micd_pol_gpio);
27943 +       pm_runtime_disable(&pdev->dev);
27945         return 0;
27948 diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
27949 index 3f14dffb9669..5dd19dbd67a3 100644
27950 --- a/drivers/firmware/Kconfig
27951 +++ b/drivers/firmware/Kconfig
27952 @@ -237,6 +237,7 @@ config INTEL_STRATIX10_RSU
27953  config QCOM_SCM
27954         bool
27955         depends on ARM || ARM64
27956 +       depends on HAVE_ARM_SMCCC
27957         select RESET_CONTROLLER
27959  config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
27960 diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
27961 index d0dee37ad522..4ceba5ef7895 100644
27962 --- a/drivers/firmware/arm_scpi.c
27963 +++ b/drivers/firmware/arm_scpi.c
27964 @@ -552,8 +552,10 @@ static unsigned long scpi_clk_get_val(u16 clk_id)
27966         ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
27967                                 sizeof(le_clk_id), &rate, sizeof(rate));
27968 +       if (ret)
27969 +               return 0;
27971 -       return ret ? ret : le32_to_cpu(rate);
27972 +       return le32_to_cpu(rate);
27975  static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
27976 diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
27977 index c23466e05e60..d0537573501e 100644
27978 --- a/drivers/firmware/efi/libstub/Makefile
27979 +++ b/drivers/firmware/efi/libstub/Makefile
27980 @@ -13,7 +13,8 @@ cflags-$(CONFIG_X86)          += -m$(BITS) -D__KERNEL__ \
27981                                    -Wno-pointer-sign \
27982                                    $(call cc-disable-warning, address-of-packed-member) \
27983                                    $(call cc-disable-warning, gnu) \
27984 -                                  -fno-asynchronous-unwind-tables
27985 +                                  -fno-asynchronous-unwind-tables \
27986 +                                  $(CLANG_FLAGS)
27988  # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
27989  # disable the stackleak plugin
27990 diff --git a/drivers/firmware/qcom_scm-smc.c b/drivers/firmware/qcom_scm-smc.c
27991 index 497c13ba98d6..d111833364ba 100644
27992 --- a/drivers/firmware/qcom_scm-smc.c
27993 +++ b/drivers/firmware/qcom_scm-smc.c
27994 @@ -77,8 +77,10 @@ static void __scm_smc_do(const struct arm_smccc_args *smc,
27995         }  while (res->a0 == QCOM_SCM_V2_EBUSY);
27998 -int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
27999 -                struct qcom_scm_res *res, bool atomic)
28001 +int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
28002 +                  enum qcom_scm_convention qcom_convention,
28003 +                  struct qcom_scm_res *res, bool atomic)
28005         int arglen = desc->arginfo & 0xf;
28006         int i;
28007 @@ -87,9 +89,8 @@ int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
28008         size_t alloc_len;
28009         gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
28010         u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL;
28011 -       u32 qcom_smccc_convention =
28012 -                       (qcom_scm_convention == SMC_CONVENTION_ARM_32) ?
28013 -                       ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
28014 +       u32 qcom_smccc_convention = (qcom_convention == SMC_CONVENTION_ARM_32) ?
28015 +                                   ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
28016         struct arm_smccc_res smc_res;
28017         struct arm_smccc_args smc = {0};
28019 @@ -148,4 +149,5 @@ int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
28020         }
28022         return (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0;
28025 diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
28026 index f57779fc7ee9..9ac84b5d6ce0 100644
28027 --- a/drivers/firmware/qcom_scm.c
28028 +++ b/drivers/firmware/qcom_scm.c
28029 @@ -113,14 +113,10 @@ static void qcom_scm_clk_disable(void)
28030         clk_disable_unprepare(__scm->bus_clk);
28033 -static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
28034 -                                       u32 cmd_id);
28035 +enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
28036 +static DEFINE_SPINLOCK(scm_query_lock);
28038 -enum qcom_scm_convention qcom_scm_convention;
28039 -static bool has_queried __read_mostly;
28040 -static DEFINE_SPINLOCK(query_lock);
28042 -static void __query_convention(void)
28043 +static enum qcom_scm_convention __get_convention(void)
28045         unsigned long flags;
28046         struct qcom_scm_desc desc = {
28047 @@ -133,36 +129,50 @@ static void __query_convention(void)
28048                 .owner = ARM_SMCCC_OWNER_SIP,
28049         };
28050         struct qcom_scm_res res;
28051 +       enum qcom_scm_convention probed_convention;
28052         int ret;
28053 +       bool forced = false;
28055 -       spin_lock_irqsave(&query_lock, flags);
28056 -       if (has_queried)
28057 -               goto out;
28058 +       if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
28059 +               return qcom_scm_convention;
28061 -       qcom_scm_convention = SMC_CONVENTION_ARM_64;
28062 -       // Device isn't required as there is only one argument - no device
28063 -       // needed to dma_map_single to secure world
28064 -       ret = scm_smc_call(NULL, &desc, &res, true);
28065 +       /*
28066 +        * Device isn't required as there is only one argument - no device
28067 +        * needed to dma_map_single to secure world
28068 +        */
28069 +       probed_convention = SMC_CONVENTION_ARM_64;
28070 +       ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
28071         if (!ret && res.result[0] == 1)
28072 -               goto out;
28073 +               goto found;
28075 +       /*
28076 +        * Some SC7180 firmwares didn't implement the
28077 +        * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
28078 +        * calling conventions on these firmwares. Luckily we don't make any
28079 +        * early calls into the firmware on these SoCs so the device pointer
28080 +        * will be valid here to check if the compatible matches.
28081 +        */
28082 +       if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
28083 +               forced = true;
28084 +               goto found;
28085 +       }
28087 -       qcom_scm_convention = SMC_CONVENTION_ARM_32;
28088 -       ret = scm_smc_call(NULL, &desc, &res, true);
28089 +       probed_convention = SMC_CONVENTION_ARM_32;
28090 +       ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
28091         if (!ret && res.result[0] == 1)
28092 -               goto out;
28094 -       qcom_scm_convention = SMC_CONVENTION_LEGACY;
28095 -out:
28096 -       has_queried = true;
28097 -       spin_unlock_irqrestore(&query_lock, flags);
28098 -       pr_info("qcom_scm: convention: %s\n",
28099 -               qcom_scm_convention_names[qcom_scm_convention]);
28101 +               goto found;
28103 +       probed_convention = SMC_CONVENTION_LEGACY;
28104 +found:
28105 +       spin_lock_irqsave(&scm_query_lock, flags);
28106 +       if (probed_convention != qcom_scm_convention) {
28107 +               qcom_scm_convention = probed_convention;
28108 +               pr_info("qcom_scm: convention: %s%s\n",
28109 +                       qcom_scm_convention_names[qcom_scm_convention],
28110 +                       forced ? " (forced)" : "");
28111 +       }
28112 +       spin_unlock_irqrestore(&scm_query_lock, flags);
28114 -static inline enum qcom_scm_convention __get_convention(void)
28116 -       if (unlikely(!has_queried))
28117 -               __query_convention();
28118         return qcom_scm_convention;
28121 @@ -219,8 +229,8 @@ static int qcom_scm_call_atomic(struct device *dev,
28122         }
28125 -static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
28126 -                                       u32 cmd_id)
28127 +static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
28128 +                                        u32 cmd_id)
28130         int ret;
28131         struct qcom_scm_desc desc = {
28132 @@ -247,7 +257,7 @@ static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
28134         ret = qcom_scm_call(dev, &desc, &res);
28136 -       return ret ? : res.result[0];
28137 +       return ret ? false : !!res.result[0];
28140  /**
28141 @@ -585,9 +595,8 @@ bool qcom_scm_pas_supported(u32 peripheral)
28142         };
28143         struct qcom_scm_res res;
28145 -       ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
28146 -                                          QCOM_SCM_PIL_PAS_IS_SUPPORTED);
28147 -       if (ret <= 0)
28148 +       if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
28149 +                                         QCOM_SCM_PIL_PAS_IS_SUPPORTED))
28150                 return false;
28152         ret = qcom_scm_call(__scm->dev, &desc, &res);
28153 @@ -1060,17 +1069,18 @@ EXPORT_SYMBOL(qcom_scm_ice_set_key);
28154   */
28155  bool qcom_scm_hdcp_available(void)
28157 +       bool avail;
28158         int ret = qcom_scm_clk_enable();
28160         if (ret)
28161                 return ret;
28163 -       ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
28164 +       avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
28165                                                 QCOM_SCM_HDCP_INVOKE);
28167         qcom_scm_clk_disable();
28169 -       return ret > 0;
28170 +       return avail;
28172  EXPORT_SYMBOL(qcom_scm_hdcp_available);
28174 @@ -1242,7 +1252,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
28175         __scm = scm;
28176         __scm->dev = &pdev->dev;
28178 -       __query_convention();
28179 +       __get_convention();
28181         /*
28182          * If requested enable "download mode", from this point on warmboot
28183 diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
28184 index 95cd1ac30ab0..632fe3142462 100644
28185 --- a/drivers/firmware/qcom_scm.h
28186 +++ b/drivers/firmware/qcom_scm.h
28187 @@ -61,8 +61,11 @@ struct qcom_scm_res {
28188  };
28190  #define SCM_SMC_FNID(s, c)     ((((s) & 0xFF) << 8) | ((c) & 0xFF))
28191 -extern int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
28192 -                       struct qcom_scm_res *res, bool atomic);
28193 +extern int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
28194 +                         enum qcom_scm_convention qcom_convention,
28195 +                         struct qcom_scm_res *res, bool atomic);
28196 +#define scm_smc_call(dev, desc, res, atomic) \
28197 +       __scm_smc_call((dev), (desc), qcom_scm_convention, (res), (atomic))
28199  #define SCM_LEGACY_FNID(s, c)  (((s) << 10) | ((c) & 0x3ff))
28200  extern int scm_legacy_call_atomic(struct device *dev,
28201 diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
28202 index 7eb9958662dd..83082e2f2e44 100644
28203 --- a/drivers/firmware/xilinx/zynqmp.c
28204 +++ b/drivers/firmware/xilinx/zynqmp.c
28205 @@ -2,7 +2,7 @@
28206  /*
28207   * Xilinx Zynq MPSoC Firmware layer
28208   *
28209 - *  Copyright (C) 2014-2020 Xilinx, Inc.
28210 + *  Copyright (C) 2014-2021 Xilinx, Inc.
28211   *
28212   *  Michal Simek <michal.simek@xilinx.com>
28213   *  Davorin Mista <davorin.mista@aggios.com>
28214 @@ -1280,12 +1280,13 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
28215  static int zynqmp_firmware_remove(struct platform_device *pdev)
28217         struct pm_api_feature_data *feature_data;
28218 +       struct hlist_node *tmp;
28219         int i;
28221         mfd_remove_devices(&pdev->dev);
28222         zynqmp_pm_api_debugfs_exit();
28224 -       hash_for_each(pm_api_features_map, i, feature_data, hentry) {
28225 +       hash_for_each_safe(pm_api_features_map, i, tmp, feature_data, hentry) {
28226                 hash_del(&feature_data->hentry);
28227                 kfree(feature_data);
28228         }
28229 diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
28230 index 04e47e266f26..b44523ea8c91 100644
28231 --- a/drivers/fpga/dfl-pci.c
28232 +++ b/drivers/fpga/dfl-pci.c
28233 @@ -69,14 +69,16 @@ static void cci_pci_free_irq(struct pci_dev *pcidev)
28236  /* PCI Device ID */
28237 -#define PCIE_DEVICE_ID_PF_INT_5_X      0xBCBD
28238 -#define PCIE_DEVICE_ID_PF_INT_6_X      0xBCC0
28239 -#define PCIE_DEVICE_ID_PF_DSC_1_X      0x09C4
28240 -#define PCIE_DEVICE_ID_INTEL_PAC_N3000 0x0B30
28241 +#define PCIE_DEVICE_ID_PF_INT_5_X              0xBCBD
28242 +#define PCIE_DEVICE_ID_PF_INT_6_X              0xBCC0
28243 +#define PCIE_DEVICE_ID_PF_DSC_1_X              0x09C4
28244 +#define PCIE_DEVICE_ID_INTEL_PAC_N3000         0x0B30
28245 +#define PCIE_DEVICE_ID_INTEL_PAC_D5005         0x0B2B
28246  /* VF Device */
28247 -#define PCIE_DEVICE_ID_VF_INT_5_X      0xBCBF
28248 -#define PCIE_DEVICE_ID_VF_INT_6_X      0xBCC1
28249 -#define PCIE_DEVICE_ID_VF_DSC_1_X      0x09C5
28250 +#define PCIE_DEVICE_ID_VF_INT_5_X              0xBCBF
28251 +#define PCIE_DEVICE_ID_VF_INT_6_X              0xBCC1
28252 +#define PCIE_DEVICE_ID_VF_DSC_1_X              0x09C5
28253 +#define PCIE_DEVICE_ID_INTEL_PAC_D5005_VF      0x0B2C
28255  static struct pci_device_id cci_pcie_id_tbl[] = {
28256         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
28257 @@ -86,6 +88,8 @@ static struct pci_device_id cci_pcie_id_tbl[] = {
28258         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
28259         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
28260         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_N3000),},
28261 +       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005),},
28262 +       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),},
28263         {0,}
28264  };
28265  MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
28266 diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c
28267 index 27defa98092d..fee4d0abf6bf 100644
28268 --- a/drivers/fpga/xilinx-spi.c
28269 +++ b/drivers/fpga/xilinx-spi.c
28270 @@ -233,25 +233,19 @@ static int xilinx_spi_probe(struct spi_device *spi)
28272         /* PROGRAM_B is active low */
28273         conf->prog_b = devm_gpiod_get(&spi->dev, "prog_b", GPIOD_OUT_LOW);
28274 -       if (IS_ERR(conf->prog_b)) {
28275 -               dev_err(&spi->dev, "Failed to get PROGRAM_B gpio: %ld\n",
28276 -                       PTR_ERR(conf->prog_b));
28277 -               return PTR_ERR(conf->prog_b);
28278 -       }
28279 +       if (IS_ERR(conf->prog_b))
28280 +               return dev_err_probe(&spi->dev, PTR_ERR(conf->prog_b),
28281 +                                    "Failed to get PROGRAM_B gpio\n");
28283         conf->init_b = devm_gpiod_get_optional(&spi->dev, "init-b", GPIOD_IN);
28284 -       if (IS_ERR(conf->init_b)) {
28285 -               dev_err(&spi->dev, "Failed to get INIT_B gpio: %ld\n",
28286 -                       PTR_ERR(conf->init_b));
28287 -               return PTR_ERR(conf->init_b);
28288 -       }
28289 +       if (IS_ERR(conf->init_b))
28290 +               return dev_err_probe(&spi->dev, PTR_ERR(conf->init_b),
28291 +                                    "Failed to get INIT_B gpio\n");
28293         conf->done = devm_gpiod_get(&spi->dev, "done", GPIOD_IN);
28294 -       if (IS_ERR(conf->done)) {
28295 -               dev_err(&spi->dev, "Failed to get DONE gpio: %ld\n",
28296 -                       PTR_ERR(conf->done));
28297 -               return PTR_ERR(conf->done);
28298 -       }
28299 +       if (IS_ERR(conf->done))
28300 +               return dev_err_probe(&spi->dev, PTR_ERR(conf->done),
28301 +                                    "Failed to get DONE gpio\n");
28303         mgr = devm_fpga_mgr_create(&spi->dev,
28304                                    "Xilinx Slave Serial FPGA Manager",
28305 diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
28306 index 1bd9e44df718..05974b760796 100644
28307 --- a/drivers/gpio/gpio-tegra186.c
28308 +++ b/drivers/gpio/gpio-tegra186.c
28309 @@ -444,16 +444,6 @@ static int tegra186_irq_set_wake(struct irq_data *data, unsigned int on)
28310         return 0;
28313 -static int tegra186_irq_set_affinity(struct irq_data *data,
28314 -                                    const struct cpumask *dest,
28315 -                                    bool force)
28317 -       if (data->parent_data)
28318 -               return irq_chip_set_affinity_parent(data, dest, force);
28320 -       return -EINVAL;
28323  static void tegra186_gpio_irq(struct irq_desc *desc)
28325         struct tegra_gpio *gpio = irq_desc_get_handler_data(desc);
28326 @@ -700,7 +690,6 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
28327         gpio->intc.irq_unmask = tegra186_irq_unmask;
28328         gpio->intc.irq_set_type = tegra186_irq_set_type;
28329         gpio->intc.irq_set_wake = tegra186_irq_set_wake;
28330 -       gpio->intc.irq_set_affinity = tegra186_irq_set_affinity;
28332         irq = &gpio->gpio.irq;
28333         irq->chip = &gpio->intc;
28334 diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
28335 index 1aacd2a5a1fd..174839f3772f 100644
28336 --- a/drivers/gpio/gpiolib-acpi.c
28337 +++ b/drivers/gpio/gpiolib-acpi.c
28338 @@ -1438,6 +1438,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
28339                         .no_edge_events_on_boot = true,
28340                 },
28341         },
28342 +       {
28343 +               /*
28344 +                * The Dell Venue 10 Pro 5055, with Bay Trail SoC + TI PMIC uses an
28345 +                * external embedded-controller connected via I2C + an ACPI GPIO
28346 +                * event handler on INT33FFC:02 pin 12, causing spurious wakeups.
28347 +                */
28348 +               .matches = {
28349 +                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
28350 +                       DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
28351 +               },
28352 +               .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
28353 +                       .ignore_wake = "INT33FC:02@12",
28354 +               },
28355 +       },
28356         {
28357                 /*
28358                  * HP X2 10 models with Cherry Trail SoC + TI PMIC use an
28359 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
28360 index 8a5a8ff5d362..5eee251e3335 100644
28361 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
28362 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
28363 @@ -3613,6 +3613,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
28365         dev_info(adev->dev, "amdgpu: finishing device.\n");
28366         flush_delayed_work(&adev->delayed_init_work);
28367 +       ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
28368         adev->shutdown = true;
28370         kfree(adev->pci_state);
28371 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
28372 index f753e04fee99..a2ac44cc2a6d 100644
28373 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
28374 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
28375 @@ -1355,7 +1355,7 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
28376                         }
28377                 }
28378         }
28379 -       return r;
28380 +       return 0;
28383  int amdgpu_display_resume_helper(struct amdgpu_device *adev)
28384 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
28385 index d56f4023ebb3..7e8e46c39dbd 100644
28386 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
28387 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
28388 @@ -533,6 +533,8 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
28390                 if (!ring || !ring->fence_drv.initialized)
28391                         continue;
28392 +               if (!ring->no_scheduler)
28393 +                       drm_sched_fini(&ring->sched);
28394                 r = amdgpu_fence_wait_empty(ring);
28395                 if (r) {
28396                         /* no need to trigger GPU reset as we are unloading */
28397 @@ -541,8 +543,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
28398                 if (ring->fence_drv.irq_src)
28399                         amdgpu_irq_put(adev, ring->fence_drv.irq_src,
28400                                        ring->fence_drv.irq_type);
28401 -               if (!ring->no_scheduler)
28402 -                       drm_sched_fini(&ring->sched);
28404                 del_timer_sync(&ring->fence_drv.fallback_timer);
28405                 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
28406                         dma_fence_put(ring->fence_drv.fences[j]);
28407 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
28408 index 7645223ea0ef..97c11aa47ad0 100644
28409 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
28410 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
28411 @@ -77,6 +77,8 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
28412                 }
28414                 ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
28415 +               /* flush the cache before commit the IB */
28416 +               ib->flags = AMDGPU_IB_FLAG_EMIT_MEM_SYNC;
28418                 if (!vm)
28419                         ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
28420 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
28421 index 94b069630db3..b4971e90b98c 100644
28422 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
28423 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
28424 @@ -215,7 +215,11 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
28425         /* Check if we have an idle VMID */
28426         i = 0;
28427         list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
28428 -               fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
28429 +               /* Don't use per engine and per process VMID at the same time */
28430 +               struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
28431 +                       NULL : ring;
28433 +               fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
28434                 if (!fences[i])
28435                         break;
28436                 ++i;
28437 @@ -281,7 +285,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
28438         if (updates && (*id)->flushed_updates &&
28439             updates->context == (*id)->flushed_updates->context &&
28440             !dma_fence_is_later(updates, (*id)->flushed_updates))
28441 -           updates = NULL;
28442 +               updates = NULL;
28444         if ((*id)->owner != vm->immediate.fence_context ||
28445             job->vm_pd_addr != (*id)->pd_gpu_addr ||
28446 @@ -290,6 +294,10 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
28447              !dma_fence_is_signaled((*id)->last_flush))) {
28448                 struct dma_fence *tmp;
28450 +               /* Don't use per engine and per process VMID at the same time */
28451 +               if (adev->vm_manager.concurrent_flush)
28452 +                       ring = NULL;
28454                 /* to prevent one context starved by another context */
28455                 (*id)->pd_gpu_addr = 0;
28456                 tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
28457 @@ -365,12 +373,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
28458                 if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
28459                         needs_flush = true;
28461 -               /* Concurrent flushes are only possible starting with Vega10 and
28462 -                * are broken on Navi10 and Navi14.
28463 -                */
28464 -               if (needs_flush && (adev->asic_type < CHIP_VEGA10 ||
28465 -                                   adev->asic_type == CHIP_NAVI10 ||
28466 -                                   adev->asic_type == CHIP_NAVI14))
28467 +               if (needs_flush && !adev->vm_manager.concurrent_flush)
28468                         continue;
28470                 /* Good, we can use this VMID. Remember this submission as
28471 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
28472 index afbbec82a289..9be945d8e72f 100644
28473 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
28474 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
28475 @@ -535,7 +535,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
28476                 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
28477                         struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
28479 -                       if (!src)
28480 +                       if (!src || !src->funcs || !src->funcs->set)
28481                                 continue;
28482                         for (k = 0; k < src->num_types; k++)
28483                                 amdgpu_irq_update(adev, src, k);
28484 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
28485 index 19c0a3655228..82e9ecf84352 100644
28486 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
28487 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
28488 @@ -519,8 +519,10 @@ static int init_pmu_entry_by_type_and_add(struct amdgpu_pmu_entry *pmu_entry,
28489         pmu_entry->pmu.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
28490                                                                 GFP_KERNEL);
28492 -       if (!pmu_entry->pmu.attr_groups)
28493 +       if (!pmu_entry->pmu.attr_groups) {
28494 +               ret = -ENOMEM;
28495                 goto err_attr_group;
28496 +       }
28498         snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d", pmu_entry->pmu_file_prefix,
28499                                 adev_to_drm(pmu_entry->adev)->primary->index);
28500 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
28501 index 5efa331e3ee8..6b14626c148e 100644
28502 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
28503 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
28504 @@ -267,7 +267,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
28505         *addr += offset & ~PAGE_MASK;
28507         num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
28508 -       num_bytes = num_pages * 8;
28509 +       num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
28511         r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
28512                                      AMDGPU_IB_POOL_DELAYED, &job);
28513 @@ -942,7 +942,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev,
28514                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
28516         /* double check that we don't free the table twice */
28517 -       if (!ttm->sg->sgl)
28518 +       if (!ttm->sg || !ttm->sg->sgl)
28519                 return;
28521         /* unmap the pages mapped to the device */
28522 @@ -1162,13 +1162,13 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
28523         struct amdgpu_ttm_tt *gtt = (void *)ttm;
28524         int r;
28526 -       if (!gtt->bound)
28527 -               return;
28529         /* if the pages have userptr pinning then clear that first */
28530         if (gtt->userptr)
28531                 amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
28533 +       if (!gtt->bound)
28534 +               return;
28536         if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
28537                 return;
28539 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
28540 index e2ed4689118a..c6dbc0801604 100644
28541 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
28542 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
28543 @@ -259,7 +259,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
28544                 if ((adev->asic_type == CHIP_POLARIS10 ||
28545                      adev->asic_type == CHIP_POLARIS11) &&
28546                     (adev->uvd.fw_version < FW_1_66_16))
28547 -                       DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
28548 +                       DRM_ERROR("POLARIS10/11 UVD firmware version %u.%u is too old.\n",
28549                                   version_major, version_minor);
28550         } else {
28551                 unsigned int enc_major, enc_minor, dec_minor;
28552 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
28553 index 326dae31b675..a566bbe26bdd 100644
28554 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
28555 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
28556 @@ -92,13 +92,13 @@ struct amdgpu_prt_cb {
28557  static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
28559         mutex_lock(&vm->eviction_lock);
28560 -       vm->saved_flags = memalloc_nofs_save();
28561 +       vm->saved_flags = memalloc_noreclaim_save();
28564  static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
28566         if (mutex_trylock(&vm->eviction_lock)) {
28567 -               vm->saved_flags = memalloc_nofs_save();
28568 +               vm->saved_flags = memalloc_noreclaim_save();
28569                 return 1;
28570         }
28571         return 0;
28572 @@ -106,7 +106,7 @@ static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
28574  static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
28576 -       memalloc_nofs_restore(vm->saved_flags);
28577 +       memalloc_noreclaim_restore(vm->saved_flags);
28578         mutex_unlock(&vm->eviction_lock);
28581 @@ -3147,6 +3147,12 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
28583         unsigned i;
28585 +       /* Concurrent flushes are only possible starting with Vega10 and
28586 +        * are broken on Navi10 and Navi14.
28587 +        */
28588 +       adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
28589 +                                             adev->asic_type == CHIP_NAVI10 ||
28590 +                                             adev->asic_type == CHIP_NAVI14);
28591         amdgpu_vmid_mgr_init(adev);
28593         adev->vm_manager.fence_context =
28594 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
28595 index 976a12e5a8b9..4e140288159c 100644
28596 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
28597 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
28598 @@ -331,6 +331,7 @@ struct amdgpu_vm_manager {
28599         /* Handling of VMIDs */
28600         struct amdgpu_vmid_mgr                  id_mgr[AMDGPU_MAX_VMHUBS];
28601         unsigned int                            first_kfd_vmid;
28602 +       bool                                    concurrent_flush;
28604         /* Handling of VM fences */
28605         u64                                     fence_context;
28606 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
28607 index 659b385b27b5..4d3a24fdeb9c 100644
28608 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
28609 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
28610 @@ -468,15 +468,22 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
28615 + * NOTE psp_xgmi_node_info.num_hops layout is as follows:
28616 + * num_hops[7:6] = link type (0 = xGMI2, 1 = xGMI3, 2/3 = reserved)
28617 + * num_hops[5:3] = reserved
28618 + * num_hops[2:0] = number of hops
28619 + */
28620  int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
28621                 struct amdgpu_device *peer_adev)
28623         struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
28624 +       uint8_t num_hops_mask = 0x7;
28625         int i;
28627         for (i = 0 ; i < top->num_nodes; ++i)
28628                 if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
28629 -                       return top->nodes[i].num_hops;
28630 +                       return top->nodes[i].num_hops & num_hops_mask;
28631         return  -EINVAL;
28634 diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
28635 index 63691deb7df3..2342c5d216f9 100644
28636 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
28637 +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
28638 @@ -1391,9 +1391,10 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
28639         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
28640         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
28641         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
28642 -       SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000),
28643 +       SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04900000),
28644         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
28645         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
28646 +       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1800ff, 0x00000044),
28647         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
28648         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x00007fff, 0x000001fe),
28649         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
28650 @@ -1411,12 +1412,13 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
28651         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000820, 0x00000820),
28652         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
28653         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
28654 +       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070104),
28655         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
28656         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
28657         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
28658         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
28659         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0xffdf80ff, 0x479c0010),
28660 -       SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
28661 +       SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000)
28662  };
28664  static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
28665 diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
28666 index 65db88bb6cbc..d2c020a91c0b 100644
28667 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
28668 +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
28669 @@ -4864,7 +4864,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
28670         amdgpu_gfx_rlc_enter_safe_mode(adev);
28672         /* Enable 3D CGCG/CGLS */
28673 -       if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
28674 +       if (enable) {
28675                 /* write cmd to clear cgcg/cgls ov */
28676                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
28677                 /* unset CGCG override */
28678 @@ -4876,8 +4876,12 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
28679                 /* enable 3Dcgcg FSM(0x0000363f) */
28680                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
28682 -               data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
28683 -                       RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
28684 +               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
28685 +                       data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
28686 +                               RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
28687 +               else
28688 +                       data = 0x0 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT;
28690                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
28691                         data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
28692                                 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
28693 diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
28694 index 2d832fc23119..421d6069c509 100644
28695 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
28696 +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
28697 @@ -59,6 +59,7 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
28698  MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
28699  MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
28700  MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
28701 +MODULE_FIRMWARE("amdgpu/polaris12_32_mc.bin");
28702  MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
28703  MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
28704  MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
28705 @@ -243,10 +244,16 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
28706                         chip_name = "polaris10";
28707                 break;
28708         case CHIP_POLARIS12:
28709 -               if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision))
28710 +               if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) {
28711                         chip_name = "polaris12_k";
28712 -               else
28713 -                       chip_name = "polaris12";
28714 +               } else {
28715 +                       WREG32(mmMC_SEQ_IO_DEBUG_INDEX, ixMC_IO_DEBUG_UP_159);
28716 +                       /* Polaris12 32bit ASIC needs a special MC firmware */
28717 +                       if (RREG32(mmMC_SEQ_IO_DEBUG_DATA) == 0x05b4dc40)
28718 +                               chip_name = "polaris12_32";
28719 +                       else
28720 +                               chip_name = "polaris12";
28721 +               }
28722                 break;
28723         case CHIP_FIJI:
28724         case CHIP_CARRIZO:
28725 diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
28726 index d345e324837d..2a27fe26232b 100644
28727 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
28728 +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
28729 @@ -123,6 +123,10 @@ static const struct soc15_reg_golden golden_settings_sdma_nv14[] = {
28731  static const struct soc15_reg_golden golden_settings_sdma_nv12[] = {
28732         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
28733 +       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
28734 +       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
28735 +       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
28736 +       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
28737         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
28738  };
28740 diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
28741 index 1221aa6b40a9..d1045a9b37d9 100644
28742 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c
28743 +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
28744 @@ -1151,7 +1151,6 @@ static int soc15_common_early_init(void *handle)
28745                         adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
28746                                 AMD_CG_SUPPORT_GFX_MGLS |
28747                                 AMD_CG_SUPPORT_GFX_CP_LS |
28748 -                               AMD_CG_SUPPORT_GFX_3D_CGCG |
28749                                 AMD_CG_SUPPORT_GFX_3D_CGLS |
28750                                 AMD_CG_SUPPORT_GFX_CGCG |
28751                                 AMD_CG_SUPPORT_GFX_CGLS |
28752 @@ -1170,7 +1169,6 @@ static int soc15_common_early_init(void *handle)
28753                                 AMD_CG_SUPPORT_GFX_MGLS |
28754                                 AMD_CG_SUPPORT_GFX_RLC_LS |
28755                                 AMD_CG_SUPPORT_GFX_CP_LS |
28756 -                               AMD_CG_SUPPORT_GFX_3D_CGCG |
28757                                 AMD_CG_SUPPORT_GFX_3D_CGLS |
28758                                 AMD_CG_SUPPORT_GFX_CGCG |
28759                                 AMD_CG_SUPPORT_GFX_CGLS |
28760 diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
28761 index def583916294..9b844e9fb16f 100644
28762 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
28763 +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
28764 @@ -584,6 +584,10 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
28765         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
28766                         VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0),
28767                         AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
28769 +       /* VCN global tiling registers */
28770 +       WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
28771 +               UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
28774  static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
28775 diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
28776 index 88626d83e07b..ca8efa5c6978 100644
28777 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
28778 +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
28779 @@ -220,10 +220,8 @@ static int vega10_ih_enable_ring(struct amdgpu_device *adev,
28780         tmp = vega10_ih_rb_cntl(ih, tmp);
28781         if (ih == &adev->irq.ih)
28782                 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
28783 -       if (ih == &adev->irq.ih1) {
28784 -               tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
28785 +       if (ih == &adev->irq.ih1)
28786                 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
28787 -       }
28788         if (amdgpu_sriov_vf(adev)) {
28789                 if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
28790                         dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
28791 @@ -265,7 +263,6 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
28792         u32 ih_chicken;
28793         int ret;
28794         int i;
28795 -       u32 tmp;
28797         /* disable irqs */
28798         ret = vega10_ih_toggle_interrupts(adev, false);
28799 @@ -291,15 +288,6 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
28800                 }
28801         }
28803 -       tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
28804 -       tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
28805 -                           CLIENT18_IS_STORM_CLIENT, 1);
28806 -       WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
28808 -       tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
28809 -       tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
28810 -       WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
28812         pci_set_master(adev->pdev);
28814         /* enable interrupts */
28815 @@ -345,11 +333,17 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
28816         u32 wptr, tmp;
28817         struct amdgpu_ih_regs *ih_regs;
28819 -       wptr = le32_to_cpu(*ih->wptr_cpu);
28820 -       ih_regs = &ih->ih_regs;
28821 +       if (ih == &adev->irq.ih) {
28822 +               /* Only ring0 supports writeback. On other rings fall back
28823 +                * to register-based code with overflow checking below.
28824 +                */
28825 +               wptr = le32_to_cpu(*ih->wptr_cpu);
28827 -       if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
28828 -               goto out;
28829 +               if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
28830 +                       goto out;
28831 +       }
28833 +       ih_regs = &ih->ih_regs;
28835         /* Double check that the overflow wasn't already cleared. */
28836         wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
28837 @@ -440,15 +434,11 @@ static int vega10_ih_self_irq(struct amdgpu_device *adev,
28838                               struct amdgpu_irq_src *source,
28839                               struct amdgpu_iv_entry *entry)
28841 -       uint32_t wptr = cpu_to_le32(entry->src_data[0]);
28843         switch (entry->ring_id) {
28844         case 1:
28845 -               *adev->irq.ih1.wptr_cpu = wptr;
28846                 schedule_work(&adev->irq.ih1_work);
28847                 break;
28848         case 2:
28849 -               *adev->irq.ih2.wptr_cpu = wptr;
28850                 schedule_work(&adev->irq.ih2_work);
28851                 break;
28852         default: break;
28853 diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
28854 index 5a3c867d5881..86dcf448e0c2 100644
28855 --- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
28856 +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
28857 @@ -104,6 +104,8 @@ static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
28859         tmp = RREG32(ih_regs->ih_rb_cntl);
28860         tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
28861 +       tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1);
28863         /* enable_intr field is only valid in ring0 */
28864         if (ih == &adev->irq.ih)
28865                 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
28866 @@ -220,10 +222,8 @@ static int vega20_ih_enable_ring(struct amdgpu_device *adev,
28867         tmp = vega20_ih_rb_cntl(ih, tmp);
28868         if (ih == &adev->irq.ih)
28869                 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
28870 -       if (ih == &adev->irq.ih1) {
28871 -               tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
28872 +       if (ih == &adev->irq.ih1)
28873                 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
28874 -       }
28875         if (amdgpu_sriov_vf(adev)) {
28876                 if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
28877                         dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
28878 @@ -297,7 +297,6 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
28879         u32 ih_chicken;
28880         int ret;
28881         int i;
28882 -       u32 tmp;
28884         /* disable irqs */
28885         ret = vega20_ih_toggle_interrupts(adev, false);
28886 @@ -326,15 +325,6 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
28887                 }
28888         }
28890 -       tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
28891 -       tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
28892 -                           CLIENT18_IS_STORM_CLIENT, 1);
28893 -       WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
28895 -       tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
28896 -       tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
28897 -       WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
28899         pci_set_master(adev->pdev);
28901         /* enable interrupts */
28902 @@ -380,11 +370,17 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
28903         u32 wptr, tmp;
28904         struct amdgpu_ih_regs *ih_regs;
28906 -       wptr = le32_to_cpu(*ih->wptr_cpu);
28907 -       ih_regs = &ih->ih_regs;
28908 +       if (ih == &adev->irq.ih) {
28909 +               /* Only ring0 supports writeback. On other rings fall back
28910 +                * to register-based code with overflow checking below.
28911 +                */
28912 +               wptr = le32_to_cpu(*ih->wptr_cpu);
28914 -       if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
28915 -               goto out;
28916 +               if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
28917 +                       goto out;
28918 +       }
28920 +       ih_regs = &ih->ih_regs;
28922         /* Double check that the overflow wasn't already cleared. */
28923         wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
28924 @@ -476,15 +472,11 @@ static int vega20_ih_self_irq(struct amdgpu_device *adev,
28925                               struct amdgpu_irq_src *source,
28926                               struct amdgpu_iv_entry *entry)
28928 -       uint32_t wptr = cpu_to_le32(entry->src_data[0]);
28930         switch (entry->ring_id) {
28931         case 1:
28932 -               *adev->irq.ih1.wptr_cpu = wptr;
28933                 schedule_work(&adev->irq.ih1_work);
28934                 break;
28935         case 2:
28936 -               *adev->irq.ih2.wptr_cpu = wptr;
28937                 schedule_work(&adev->irq.ih2_work);
28938                 break;
28939         default: break;
28940 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
28941 index 511712c2e382..673d5e34f213 100644
28942 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
28943 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
28944 @@ -33,6 +33,11 @@ static int kfd_debugfs_open(struct inode *inode, struct file *file)
28946         return single_open(file, show, NULL);
28948 +static int kfd_debugfs_hang_hws_read(struct seq_file *m, void *data)
28950 +       seq_printf(m, "echo gpu_id > hang_hws\n");
28951 +       return 0;
28954  static ssize_t kfd_debugfs_hang_hws_write(struct file *file,
28955         const char __user *user_buf, size_t size, loff_t *ppos)
28956 @@ -94,7 +99,7 @@ void kfd_debugfs_init(void)
28957         debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
28958                             kfd_debugfs_rls_by_device, &kfd_debugfs_fops);
28959         debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root,
28960 -                           NULL, &kfd_debugfs_hang_hws_fops);
28961 +                           kfd_debugfs_hang_hws_read, &kfd_debugfs_hang_hws_fops);
28964  void kfd_debugfs_fini(void)
28965 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
28966 index 4598a9a58125..a4266c4bca13 100644
28967 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
28968 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
28969 @@ -1128,6 +1128,9 @@ static int set_sched_resources(struct device_queue_manager *dqm)
28971  static int initialize_cpsch(struct device_queue_manager *dqm)
28973 +       uint64_t num_sdma_queues;
28974 +       uint64_t num_xgmi_sdma_queues;
28976         pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
28978         mutex_init(&dqm->lock_hidden);
28979 @@ -1136,8 +1139,18 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
28980         dqm->active_cp_queue_count = 0;
28981         dqm->gws_queue_count = 0;
28982         dqm->active_runlist = false;
28983 -       dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
28984 -       dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
28986 +       num_sdma_queues = get_num_sdma_queues(dqm);
28987 +       if (num_sdma_queues >= BITS_PER_TYPE(dqm->sdma_bitmap))
28988 +               dqm->sdma_bitmap = ULLONG_MAX;
28989 +       else
28990 +               dqm->sdma_bitmap = (BIT_ULL(num_sdma_queues) - 1);
28992 +       num_xgmi_sdma_queues = get_num_xgmi_sdma_queues(dqm);
28993 +       if (num_xgmi_sdma_queues >= BITS_PER_TYPE(dqm->xgmi_sdma_bitmap))
28994 +               dqm->xgmi_sdma_bitmap = ULLONG_MAX;
28995 +       else
28996 +               dqm->xgmi_sdma_bitmap = (BIT_ULL(num_xgmi_sdma_queues) - 1);
28998         INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
29000 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
29001 index 66bbca61e3ef..9318936aa805 100644
29002 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
29003 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
29004 @@ -20,6 +20,10 @@
29005   * OTHER DEALINGS IN THE SOFTWARE.
29006   */
29008 +#include <linux/kconfig.h>
29010 +#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2)
29012  #include <linux/printk.h>
29013  #include <linux/device.h>
29014  #include <linux/slab.h>
29015 @@ -355,3 +359,5 @@ int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
29017         return 0;
29020 +#endif
29021 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
29022 index dd23d9fdf6a8..afd420b01a0c 100644
29023 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
29024 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
29025 @@ -23,7 +23,9 @@
29026  #ifndef __KFD_IOMMU_H__
29027  #define __KFD_IOMMU_H__
29029 -#if defined(CONFIG_AMD_IOMMU_V2_MODULE) || defined(CONFIG_AMD_IOMMU_V2)
29030 +#include <linux/kconfig.h>
29032 +#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2)
29034  #define KFD_SUPPORT_IOMMU_V2
29036 @@ -46,6 +48,9 @@ static inline int kfd_iommu_check_device(struct kfd_dev *kfd)
29038  static inline int kfd_iommu_device_init(struct kfd_dev *kfd)
29040 +#if IS_MODULE(CONFIG_AMD_IOMMU_V2)
29041 +       WARN_ONCE(1, "iommu_v2 module is not usable by built-in KFD");
29042 +#endif
29043         return 0;
29046 @@ -73,6 +78,6 @@ static inline int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
29047         return 0;
29050 -#endif /* defined(CONFIG_AMD_IOMMU_V2) */
29051 +#endif /* IS_REACHABLE(CONFIG_AMD_IOMMU_V2) */
29053  #endif /* __KFD_IOMMU_H__ */
29054 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
29055 index d699a5cf6c11..b63f55ea8758 100644
29056 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
29057 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
29058 @@ -1191,6 +1191,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
29059         if (adev->dm.dc)
29060                 dc_deinit_callbacks(adev->dm.dc);
29061  #endif
29063 +#if defined(CONFIG_DRM_AMD_DC_DCN)
29064 +       if (adev->dm.vblank_workqueue) {
29065 +               adev->dm.vblank_workqueue->dm = NULL;
29066 +               kfree(adev->dm.vblank_workqueue);
29067 +               adev->dm.vblank_workqueue = NULL;
29068 +       }
29069 +#endif
29071         if (adev->dm.dc->ctx->dmub_srv) {
29072                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
29073                 adev->dm.dc->ctx->dmub_srv = NULL;
29074 @@ -3841,6 +3850,23 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
29075         scaling_info->src_rect.x = state->src_x >> 16;
29076         scaling_info->src_rect.y = state->src_y >> 16;
29078 +       /*
29079 +        * For reasons we don't (yet) fully understand a non-zero
29080 +        * src_y coordinate into an NV12 buffer can cause a
29081 +        * system hang. To avoid hangs (and maybe be overly cautious)
29082 +        * let's reject both non-zero src_x and src_y.
29083 +        *
29084 +        * We currently know of only one use-case to reproduce a
29085 +        * scenario with non-zero src_x and src_y for NV12, which
29086 +        * is to gesture the YouTube Android app into full screen
29087 +        * on ChromeOS.
29088 +        */
29089 +       if (state->fb &&
29090 +           state->fb->format->format == DRM_FORMAT_NV12 &&
29091 +           (scaling_info->src_rect.x != 0 ||
29092 +            scaling_info->src_rect.y != 0))
29093 +               return -EINVAL;
29095         scaling_info->src_rect.width = state->src_w >> 16;
29096         if (scaling_info->src_rect.width == 0)
29097                 return -EINVAL;
29098 @@ -5863,6 +5889,15 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
29100         } while (stream == NULL && requested_bpc >= 6);
29102 +       if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
29103 +               DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
29105 +               aconnector->force_yuv420_output = true;
29106 +               stream = create_validate_stream_for_sink(aconnector, drm_mode,
29107 +                                               dm_state, old_stream);
29108 +               aconnector->force_yuv420_output = false;
29109 +       }
29111         return stream;
29114 @@ -7417,10 +7452,6 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
29115         int x, y;
29116         int xorigin = 0, yorigin = 0;
29118 -       position->enable = false;
29119 -       position->x = 0;
29120 -       position->y = 0;
29122         if (!crtc || !plane->state->fb)
29123                 return 0;
29125 @@ -7467,7 +7498,7 @@ static void handle_cursor_update(struct drm_plane *plane,
29126         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
29127         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
29128         uint64_t address = afb ? afb->address : 0;
29129 -       struct dc_cursor_position position;
29130 +       struct dc_cursor_position position = {0};
29131         struct dc_cursor_attributes attributes;
29132         int ret;
29134 @@ -9264,7 +9295,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
29136         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
29137         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
29138 -       if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
29139 +       if (!new_cursor_state || !new_primary_state ||
29140 +           !new_cursor_state->fb || !new_primary_state->fb) {
29141                 return 0;
29142         }
29144 @@ -9312,6 +9344,53 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
29146  #endif
29148 +static int validate_overlay(struct drm_atomic_state *state)
29150 +       int i;
29151 +       struct drm_plane *plane;
29152 +       struct drm_plane_state *old_plane_state, *new_plane_state;
29153 +       struct drm_plane_state *primary_state, *overlay_state = NULL;
29155 +       /* Check if primary plane is contained inside overlay */
29156 +       for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
29157 +               if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
29158 +                       if (drm_atomic_plane_disabling(plane->state, new_plane_state))
29159 +                               return 0;
29161 +                       overlay_state = new_plane_state;
29162 +                       continue;
29163 +               }
29164 +       }
29166 +       /* check if we're making changes to the overlay plane */
29167 +       if (!overlay_state)
29168 +               return 0;
29170 +       /* check if overlay plane is enabled */
29171 +       if (!overlay_state->crtc)
29172 +               return 0;
29174 +       /* find the primary plane for the CRTC that the overlay is enabled on */
29175 +       primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
29176 +       if (IS_ERR(primary_state))
29177 +               return PTR_ERR(primary_state);
29179 +       /* check if primary plane is enabled */
29180 +       if (!primary_state->crtc)
29181 +               return 0;
29183 +       /* Perform the bounds check to ensure the overlay plane covers the primary */
29184 +       if (primary_state->crtc_x < overlay_state->crtc_x ||
29185 +           primary_state->crtc_y < overlay_state->crtc_y ||
29186 +           primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
29187 +           primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
29188 +               DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
29189 +               return -EINVAL;
29190 +       }
29192 +       return 0;
29195  /**
29196   * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
29197   * @dev: The DRM device
29198 @@ -9383,7 +9462,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
29199         }
29201  #if defined(CONFIG_DRM_AMD_DC_DCN)
29202 -       if (adev->asic_type >= CHIP_NAVI10) {
29203 +       if (dc_resource_is_dsc_encoding_supported(dc)) {
29204                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
29205                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
29206                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
29207 @@ -9486,6 +9565,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
29208                         goto fail;
29209         }
29211 +       ret = validate_overlay(state);
29212 +       if (ret)
29213 +               goto fail;
29215         /* Add new/modified planes */
29216         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
29217                 ret = dm_update_plane_state(dc, state, plane,
29218 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
29219 index 8bfe901cf237..52cc81705280 100644
29220 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
29221 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
29222 @@ -68,18 +68,6 @@ struct common_irq_params {
29223         enum dc_irq_source irq_src;
29224  };
29226 -/**
29227 - * struct irq_list_head - Linked-list for low context IRQ handlers.
29228 - *
29229 - * @head: The list_head within &struct handler_data
29230 - * @work: A work_struct containing the deferred handler work
29231 - */
29232 -struct irq_list_head {
29233 -       struct list_head head;
29234 -       /* In case this interrupt needs post-processing, 'work' will be queued*/
29235 -       struct work_struct work;
29238  /**
29239   * struct dm_compressor_info - Buffer info used by frame buffer compression
29240   * @cpu_addr: MMIO cpu addr
29241 @@ -293,7 +281,7 @@ struct amdgpu_display_manager {
29242          * Note that handlers are called in the same order as they were
29243          * registered (FIFO).
29244          */
29245 -       struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
29246 +       struct list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
29248         /**
29249          * @irq_handler_list_high_tab:
29250 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
29251 index 360952129b6d..29139b34dbe2 100644
29252 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
29253 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
29254 @@ -150,7 +150,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
29255   *
29256   * --- to get dp configuration
29257   *
29258 - * cat link_settings
29259 + * cat /sys/kernel/debug/dri/0/DP-x/link_settings
29260   *
29261   * It will list current, verified, reported, preferred dp configuration.
29262   * current -- for current video mode
29263 @@ -163,7 +163,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
29264   * echo <lane_count>  <link_rate> > link_settings
29265   *
29266   * for example, to force to  2 lane, 2.7GHz,
29267 - * echo 4 0xa > link_settings
29268 + * echo 4 0xa > /sys/kernel/debug/dri/0/DP-x/link_settings
29269   *
29270   * spread_spectrum could not be changed dynamically.
29271   *
29272 @@ -171,7 +171,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
29273   * done. please check link settings after force operation to see if HW get
29274   * programming.
29275   *
29276 - * cat link_settings
29277 + * cat /sys/kernel/debug/dri/0/DP-x/link_settings
29278   *
29279   * check current and preferred settings.
29280   *
29281 @@ -255,7 +255,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
29282         int max_param_num = 2;
29283         uint8_t param_nums = 0;
29284         long param[2];
29285 -       bool valid_input = false;
29286 +       bool valid_input = true;
29288         if (size == 0)
29289                 return -EINVAL;
29290 @@ -282,9 +282,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
29291         case LANE_COUNT_ONE:
29292         case LANE_COUNT_TWO:
29293         case LANE_COUNT_FOUR:
29294 -               valid_input = true;
29295                 break;
29296         default:
29297 +               valid_input = false;
29298                 break;
29299         }
29301 @@ -294,9 +294,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
29302         case LINK_RATE_RBR2:
29303         case LINK_RATE_HIGH2:
29304         case LINK_RATE_HIGH3:
29305 -               valid_input = true;
29306                 break;
29307         default:
29308 +               valid_input = false;
29309                 break;
29310         }
29312 @@ -310,10 +310,11 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
29313          * spread spectrum will not be changed
29314          */
29315         prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
29316 +       prefer_link_settings.use_link_rate_set = false;
29317         prefer_link_settings.lane_count = param[0];
29318         prefer_link_settings.link_rate = param[1];
29320 -       dc_link_set_preferred_link_settings(dc, &prefer_link_settings, link);
29321 +       dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true);
29323         kfree(wr_buf);
29324         return size;
29325 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
29326 index 0cdbfcd475ec..71a15f68514b 100644
29327 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
29328 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
29329 @@ -644,6 +644,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
29331         /* File created at /sys/class/drm/card0/device/hdcp_srm*/
29332         hdcp_work[0].attr = data_attr;
29333 +       sysfs_bin_attr_init(&hdcp_work[0].attr);
29335         if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr))
29336                 DRM_WARN("Failed to create device file hdcp_srm");
29337 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
29338 index e0000c180ed1..8ce10d0973c5 100644
29339 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
29340 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
29341 @@ -82,6 +82,7 @@ struct amdgpu_dm_irq_handler_data {
29342         struct amdgpu_display_manager *dm;
29343         /* DAL irq source which registered for this interrupt. */
29344         enum dc_irq_source irq_source;
29345 +       struct work_struct work;
29346  };
29348  #define DM_IRQ_TABLE_LOCK(adev, flags) \
29349 @@ -111,20 +112,10 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
29350   */
29351  static void dm_irq_work_func(struct work_struct *work)
29353 -       struct irq_list_head *irq_list_head =
29354 -               container_of(work, struct irq_list_head, work);
29355 -       struct list_head *handler_list = &irq_list_head->head;
29356 -       struct amdgpu_dm_irq_handler_data *handler_data;
29358 -       list_for_each_entry(handler_data, handler_list, list) {
29359 -               DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
29360 -                               handler_data->irq_source);
29361 +       struct amdgpu_dm_irq_handler_data *handler_data =
29362 +               container_of(work, struct amdgpu_dm_irq_handler_data, work);
29364 -               DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
29365 -                       handler_data->irq_source);
29367 -               handler_data->handler(handler_data->handler_arg);
29368 -       }
29369 +       handler_data->handler(handler_data->handler_arg);
29371         /* Call a DAL subcomponent which registered for interrupt notification
29372          * at INTERRUPT_LOW_IRQ_CONTEXT.
29373 @@ -156,7 +147,7 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
29374                 break;
29375         case INTERRUPT_LOW_IRQ_CONTEXT:
29376         default:
29377 -               hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
29378 +               hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
29379                 break;
29380         }
29382 @@ -290,7 +281,8 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
29383                 break;
29384         case INTERRUPT_LOW_IRQ_CONTEXT:
29385         default:
29386 -               hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
29387 +               hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
29388 +               INIT_WORK(&handler_data->work, dm_irq_work_func);
29389                 break;
29390         }
29392 @@ -372,7 +364,7 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
29393  int amdgpu_dm_irq_init(struct amdgpu_device *adev)
29395         int src;
29396 -       struct irq_list_head *lh;
29397 +       struct list_head *lh;
29399         DRM_DEBUG_KMS("DM_IRQ\n");
29401 @@ -381,9 +373,7 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
29402         for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
29403                 /* low context handler list init */
29404                 lh = &adev->dm.irq_handler_list_low_tab[src];
29405 -               INIT_LIST_HEAD(&lh->head);
29406 -               INIT_WORK(&lh->work, dm_irq_work_func);
29408 +               INIT_LIST_HEAD(lh);
29409                 /* high context handler init */
29410                 INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
29411         }
29412 @@ -400,8 +390,11 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
29413  void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
29415         int src;
29416 -       struct irq_list_head *lh;
29417 +       struct list_head *lh;
29418 +       struct list_head *entry, *tmp;
29419 +       struct amdgpu_dm_irq_handler_data *handler;
29420         unsigned long irq_table_flags;
29422         DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
29423         for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
29424                 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
29425 @@ -410,7 +403,16 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
29426                  * (because no code can schedule a new one). */
29427                 lh = &adev->dm.irq_handler_list_low_tab[src];
29428                 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
29429 -               flush_work(&lh->work);
29431 +               if (!list_empty(lh)) {
29432 +                       list_for_each_safe(entry, tmp, lh) {
29433 +                               handler = list_entry(
29434 +                                       entry,
29435 +                                       struct amdgpu_dm_irq_handler_data,
29436 +                                       list);
29437 +                               flush_work(&handler->work);
29438 +                       }
29439 +               }
29440         }
29443 @@ -420,6 +422,8 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
29444         struct list_head *hnd_list_h;
29445         struct list_head *hnd_list_l;
29446         unsigned long irq_table_flags;
29447 +       struct list_head *entry, *tmp;
29448 +       struct amdgpu_dm_irq_handler_data *handler;
29450         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
29452 @@ -430,14 +434,22 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
29453          * will be disabled from manage_dm_interrupts on disable CRTC.
29454          */
29455         for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
29456 -               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
29457 +               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
29458                 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
29459                 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
29460                         dc_interrupt_set(adev->dm.dc, src, false);
29462                 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
29463 -               flush_work(&adev->dm.irq_handler_list_low_tab[src].work);
29465 +               if (!list_empty(hnd_list_l)) {
29466 +                       list_for_each_safe (entry, tmp, hnd_list_l) {
29467 +                               handler = list_entry(
29468 +                                       entry,
29469 +                                       struct amdgpu_dm_irq_handler_data,
29470 +                                       list);
29471 +                               flush_work(&handler->work);
29472 +                       }
29473 +               }
29474                 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
29475         }
29477 @@ -457,7 +469,7 @@ int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
29479         /* re-enable short pulse interrupts HW interrupt */
29480         for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
29481 -               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
29482 +               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
29483                 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
29484                 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
29485                         dc_interrupt_set(adev->dm.dc, src, true);
29486 @@ -483,7 +495,7 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
29487          * will be enabled from manage_dm_interrupts on enable CRTC.
29488          */
29489         for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
29490 -               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
29491 +               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
29492                 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
29493                 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
29494                         dc_interrupt_set(adev->dm.dc, src, true);
29495 @@ -500,22 +512,53 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
29496  static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
29497                                         enum dc_irq_source irq_source)
29499 -       unsigned long irq_table_flags;
29500 -       struct work_struct *work = NULL;
29501 +       struct  list_head *handler_list = &adev->dm.irq_handler_list_low_tab[irq_source];
29502 +       struct  amdgpu_dm_irq_handler_data *handler_data;
29503 +       bool    work_queued = false;
29505 -       DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
29506 +       if (list_empty(handler_list))
29507 +               return;
29509 +       list_for_each_entry (handler_data, handler_list, list) {
29510 +               if (!queue_work(system_highpri_wq, &handler_data->work)) {
29511 +                       continue;
29512 +               } else {
29513 +                       work_queued = true;
29514 +                       break;
29515 +               }
29516 +       }
29518 -       if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head))
29519 -               work = &adev->dm.irq_handler_list_low_tab[irq_source].work;
29520 +       if (!work_queued) {
29521 +               struct  amdgpu_dm_irq_handler_data *handler_data_add;
29522 +               /*get the amdgpu_dm_irq_handler_data of first item pointed by handler_list*/
29523 +               handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);
29525 -       DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
29526 +               /*allocate a new amdgpu_dm_irq_handler_data*/
29527 +               handler_data_add = kzalloc(sizeof(*handler_data), GFP_KERNEL);
29528 +               if (!handler_data_add) {
29529 +                       DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
29530 +                       return;
29531 +               }
29533 -       if (work) {
29534 -               if (!schedule_work(work))
29535 -                       DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n",
29536 -                                               irq_source);
29537 -       }
29538 +               /*copy new amdgpu_dm_irq_handler_data members from handler_data*/
29539 +               handler_data_add->handler       = handler_data->handler;
29540 +               handler_data_add->handler_arg   = handler_data->handler_arg;
29541 +               handler_data_add->dm            = handler_data->dm;
29542 +               handler_data_add->irq_source    = irq_source;
29544 +               list_add_tail(&handler_data_add->list, handler_list);
29546 +               INIT_WORK(&handler_data_add->work, dm_irq_work_func);
29548 +               if (queue_work(system_highpri_wq, &handler_data_add->work))
29549 +                       DRM_DEBUG("Queued work for handling interrupt from "
29550 +                                 "display for IRQ source %d\n",
29551 +                                 irq_source);
29552 +               else
29553 +                       DRM_ERROR("Failed to queue work for handling interrupt "
29554 +                                 "from display for IRQ source %d\n",
29555 +                                 irq_source);
29556 +       }
29559  /*
29560 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
29561 index 995ffbbf64e7..1ee27f2f28f1 100644
29562 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
29563 +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
29564 @@ -217,6 +217,9 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
29565                 if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
29566                         dcn3_clk_mgr_destroy(clk_mgr);
29567                 }
29568 +               if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
29569 +                       dcn3_clk_mgr_destroy(clk_mgr);
29570 +               }
29571                 break;
29573         case FAMILY_VGH:
29574 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
29575 index c7e5a64e06af..81ea5d3a1947 100644
29576 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
29577 +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
29578 @@ -252,6 +252,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
29579         bool force_reset = false;
29580         bool update_uclk = false;
29581         bool p_state_change_support;
29582 +       int total_plane_count;
29584         if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present)
29585                 return;
29586 @@ -292,7 +293,8 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
29587                 clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
29589         clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
29590 -       p_state_change_support = new_clocks->p_state_change_support || (display_count == 0);
29591 +       total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
29592 +       p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
29593         if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
29594                 clk_mgr_base->clks.p_state_change_support = p_state_change_support;
29596 diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
29597 index 8f8a13c7cf73..4781279024a9 100644
29598 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c
29599 +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
29600 @@ -2398,7 +2398,8 @@ static void commit_planes_do_stream_update(struct dc *dc,
29601                                         if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
29602                                                 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
29604 -                                       dc->hwss.optimize_bandwidth(dc, dc->current_state);
29605 +                                       dc->optimized_required = true;
29607                                 } else {
29608                                         if (dc->optimize_seamless_boot_streams == 0)
29609                                                 dc->hwss.prepare_bandwidth(dc, dc->current_state);
29610 @@ -2545,6 +2546,10 @@ static void commit_planes_for_stream(struct dc *dc,
29611                                                 plane_state->triplebuffer_flips = true;
29612                                 }
29613                         }
29614 +                       if (update_type == UPDATE_TYPE_FULL) {
29615 +                               /* force vsync flip when reconfiguring pipes to prevent underflow */
29616 +                               plane_state->flip_immediate = false;
29617 +                       }
29618                 }
29619         }
29621 diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
29622 index bd0101013ec8..440bf0a0e12a 100644
29623 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
29624 +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
29625 @@ -1603,6 +1603,7 @@ static bool dc_link_construct(struct dc_link *link,
29626         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
29628         DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__);
29629 +       kfree(info);
29630         return true;
29631  device_tag_fail:
29632         link->link_enc->funcs->destroy(&link->link_enc);
29633 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
29634 index 4e87e70237e3..874b132fe1d7 100644
29635 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
29636 +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
29637 @@ -283,7 +283,7 @@ struct abm *dce_abm_create(
29638         const struct dce_abm_shift *abm_shift,
29639         const struct dce_abm_mask *abm_mask)
29641 -       struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
29642 +       struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_ATOMIC);
29644         if (abm_dce == NULL) {
29645                 BREAK_TO_DEBUGGER();
29646 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
29647 index 277484cf853e..d4be5954d7aa 100644
29648 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
29649 +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
29650 @@ -99,7 +99,6 @@ struct dce110_aux_registers {
29651         AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
29652         AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
29653         AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
29654 -       AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
29655         AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
29656         AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
29657         AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
29658 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
29659 index ddc789daf3b1..09d4cb5c97b6 100644
29660 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
29661 +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
29662 @@ -1049,7 +1049,7 @@ struct dmcu *dcn10_dmcu_create(
29663         const struct dce_dmcu_shift *dmcu_shift,
29664         const struct dce_dmcu_mask *dmcu_mask)
29666 -       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
29667 +       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
29669         if (dmcu_dce == NULL) {
29670                 BREAK_TO_DEBUGGER();
29671 @@ -1070,7 +1070,7 @@ struct dmcu *dcn20_dmcu_create(
29672         const struct dce_dmcu_shift *dmcu_shift,
29673         const struct dce_dmcu_mask *dmcu_mask)
29675 -       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
29676 +       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
29678         if (dmcu_dce == NULL) {
29679                 BREAK_TO_DEBUGGER();
29680 @@ -1091,7 +1091,7 @@ struct dmcu *dcn21_dmcu_create(
29681         const struct dce_dmcu_shift *dmcu_shift,
29682         const struct dce_dmcu_mask *dmcu_mask)
29684 -       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
29685 +       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
29687         if (dmcu_dce == NULL) {
29688                 BREAK_TO_DEBUGGER();
29689 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
29690 index 69e34bef274c..febccb35ddad 100644
29691 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
29692 +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
29693 @@ -81,13 +81,18 @@ static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state)
29695         struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
29696         uint32_t raw_state;
29697 +       enum dmub_status status = DMUB_STATUS_INVALID;
29699         // Send gpint command and wait for ack
29700 -       dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
29702 -       dmub_srv_get_gpint_response(srv, &raw_state);
29704 -       *state = convert_psr_state(raw_state);
29705 +       status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
29707 +       if (status == DMUB_STATUS_OK) {
29708 +               // GPINT was executed, get response
29709 +               dmub_srv_get_gpint_response(srv, &raw_state);
29710 +               *state = convert_psr_state(raw_state);
29711 +       } else
29712 +               // Return invalid state when GPINT times out
29713 +               *state = 0xFF;
29716  /*
29717 diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
29718 index 62cc2651e00c..8774406120fc 100644
29719 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
29720 +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
29721 @@ -112,7 +112,7 @@ struct dccg *dccg2_create(
29722         const struct dccg_shift *dccg_shift,
29723         const struct dccg_mask *dccg_mask)
29725 -       struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
29726 +       struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_ATOMIC);
29727         struct dccg *base;
29729         if (dccg_dcn == NULL) {
29730 diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
29731 index bec7059f6d5d..a1318c31bcfa 100644
29732 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
29733 +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
29734 @@ -1,5 +1,5 @@
29735  /*
29736 - * Copyright 2012-17 Advanced Micro Devices, Inc.
29737 + * Copyright 2012-2021 Advanced Micro Devices, Inc.
29738   *
29739   * Permission is hereby granted, free of charge, to any person obtaining a
29740   * copy of this software and associated documentation files (the "Software"),
29741 @@ -181,11 +181,14 @@ void hubp2_vready_at_or_After_vsync(struct hubp *hubp,
29742         else
29743                 Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 0
29744         */
29745 -       if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
29746 -               + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
29747 -               value = 1;
29748 -       } else
29749 -               value = 0;
29750 +       if (pipe_dest->htotal != 0) {
29751 +               if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
29752 +                       + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
29753 +                       value = 1;
29754 +               } else
29755 +                       value = 0;
29756 +       }
29758         REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);
29761 diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
29762 index 2c2dbfcd8957..bfbc23b76cd5 100644
29763 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
29764 +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
29765 @@ -1104,7 +1104,7 @@ struct dpp *dcn20_dpp_create(
29766         uint32_t inst)
29768         struct dcn20_dpp *dpp =
29769 -               kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL);
29770 +               kzalloc(sizeof(struct dcn20_dpp), GFP_ATOMIC);
29772         if (!dpp)
29773                 return NULL;
29774 @@ -1122,7 +1122,7 @@ struct input_pixel_processor *dcn20_ipp_create(
29775         struct dc_context *ctx, uint32_t inst)
29777         struct dcn10_ipp *ipp =
29778 -               kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
29779 +               kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC);
29781         if (!ipp) {
29782                 BREAK_TO_DEBUGGER();
29783 @@ -1139,7 +1139,7 @@ struct output_pixel_processor *dcn20_opp_create(
29784         struct dc_context *ctx, uint32_t inst)
29786         struct dcn20_opp *opp =
29787 -               kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
29788 +               kzalloc(sizeof(struct dcn20_opp), GFP_ATOMIC);
29790         if (!opp) {
29791                 BREAK_TO_DEBUGGER();
29792 @@ -1156,7 +1156,7 @@ struct dce_aux *dcn20_aux_engine_create(
29793         uint32_t inst)
29795         struct aux_engine_dce110 *aux_engine =
29796 -               kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
29797 +               kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
29799         if (!aux_engine)
29800                 return NULL;
29801 @@ -1194,7 +1194,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
29802         uint32_t inst)
29804         struct dce_i2c_hw *dce_i2c_hw =
29805 -               kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
29806 +               kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
29808         if (!dce_i2c_hw)
29809                 return NULL;
29810 @@ -1207,7 +1207,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
29811  struct mpc *dcn20_mpc_create(struct dc_context *ctx)
29813         struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
29814 -                                         GFP_KERNEL);
29815 +                                         GFP_ATOMIC);
29817         if (!mpc20)
29818                 return NULL;
29819 @@ -1225,7 +1225,7 @@ struct hubbub *dcn20_hubbub_create(struct dc_context *ctx)
29821         int i;
29822         struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
29823 -                                         GFP_KERNEL);
29824 +                                         GFP_ATOMIC);
29826         if (!hubbub)
29827                 return NULL;
29828 @@ -1253,7 +1253,7 @@ struct timing_generator *dcn20_timing_generator_create(
29829                 uint32_t instance)
29831         struct optc *tgn10 =
29832 -               kzalloc(sizeof(struct optc), GFP_KERNEL);
29833 +               kzalloc(sizeof(struct optc), GFP_ATOMIC);
29835         if (!tgn10)
29836                 return NULL;
29837 @@ -1332,7 +1332,7 @@ static struct clock_source *dcn20_clock_source_create(
29838         bool dp_clk_src)
29840         struct dce110_clk_src *clk_src =
29841 -               kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
29842 +               kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC);
29844         if (!clk_src)
29845                 return NULL;
29846 @@ -1438,7 +1438,7 @@ struct display_stream_compressor *dcn20_dsc_create(
29847         struct dc_context *ctx, uint32_t inst)
29849         struct dcn20_dsc *dsc =
29850 -               kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
29851 +               kzalloc(sizeof(struct dcn20_dsc), GFP_ATOMIC);
29853         if (!dsc) {
29854                 BREAK_TO_DEBUGGER();
29855 @@ -1572,7 +1572,7 @@ struct hubp *dcn20_hubp_create(
29856         uint32_t inst)
29858         struct dcn20_hubp *hubp2 =
29859 -               kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
29860 +               kzalloc(sizeof(struct dcn20_hubp), GFP_ATOMIC);
29862         if (!hubp2)
29863                 return NULL;
29864 @@ -3390,7 +3390,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
29866  static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
29868 -       struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
29869 +       struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_ATOMIC);
29871         if (!pp_smu)
29872                 return pp_smu;
29873 @@ -4034,7 +4034,7 @@ struct resource_pool *dcn20_create_resource_pool(
29874                 struct dc *dc)
29876         struct dcn20_resource_pool *pool =
29877 -               kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL);
29878 +               kzalloc(sizeof(struct dcn20_resource_pool), GFP_ATOMIC);
29880         if (!pool)
29881                 return NULL;
29882 diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
29883 index 06dc1e2e8383..07c8d2e2c09c 100644
29884 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
29885 +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
29886 @@ -848,7 +848,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
29888                                         cmd.mall.cursor_copy_src.quad_part = cursor_attr.address.quad_part;
29889                                         cmd.mall.cursor_copy_dst.quad_part =
29890 -                                                       plane->address.grph.cursor_cache_addr.quad_part;
29891 +                                                       (plane->address.grph.cursor_cache_addr.quad_part + 2047) & ~2047;
29892                                         cmd.mall.cursor_width = cursor_attr.width;
29893                                         cmd.mall.cursor_height = cursor_attr.height;
29894                                         cmd.mall.cursor_pitch = cursor_attr.pitch;
29895 @@ -858,8 +858,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
29896                                         dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
29898                                         /* Use copied cursor, and it's okay to not switch back */
29899 -                                       cursor_attr.address.quad_part =
29900 -                                                       plane->address.grph.cursor_cache_addr.quad_part;
29901 +                                       cursor_attr.address.quad_part = cmd.mall.cursor_copy_dst.quad_part;
29902                                         dc_stream_set_cursor_attributes(stream, &cursor_attr);
29903                                 }
29905 diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
29906 index 3e6f76096119..a7598356f37d 100644
29907 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
29908 +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
29909 @@ -143,16 +143,18 @@ static void mpc3_power_on_ogam_lut(
29911         struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
29913 -       if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) {
29914 -               // Force power on
29915 -               REG_UPDATE(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_DIS, power_on == true ? 1:0);
29916 -               // Wait for confirmation when powering on
29917 -               if (power_on)
29918 -                       REG_WAIT(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_STATE, 0, 10, 10);
29919 -       } else {
29920 -               REG_SET(MPCC_MEM_PWR_CTRL[mpcc_id], 0,
29921 -                               MPCC_OGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1);
29922 -       }
29923 +       /*
29924 +        * Powering on: force memory active so the LUT can be updated.
29925 +        * Powering off: allow entering memory low power mode
29926 +        *
29927 +        * Memory low power mode is controlled during MPC OGAM LUT init.
29928 +        */
29929 +       REG_UPDATE(MPCC_MEM_PWR_CTRL[mpcc_id],
29930 +                  MPCC_OGAM_MEM_PWR_DIS, power_on != 0);
29932 +       /* Wait for memory to be powered on - we won't be able to write to it otherwise. */
29933 +       if (power_on)
29934 +               REG_WAIT(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_STATE, 0, 10, 10);
29937  static void mpc3_configure_ogam_lut(
29938 @@ -1427,7 +1429,7 @@ const struct mpc_funcs dcn30_mpc_funcs = {
29939         .acquire_rmu = mpcc3_acquire_rmu,
29940         .program_3dlut = mpc3_program_3dlut,
29941         .release_rmu = mpcc3_release_rmu,
29942 -       .power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
29943 +       .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
29944         .get_mpc_out_mux = mpc1_get_mpc_out_mux,
29946  };
29947 diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
29948 index fb7f1dea3c46..9b33182f3abd 100644
29949 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
29950 +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
29951 @@ -181,7 +181,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = {
29952                 },
29953         .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
29954         .num_states = 1,
29955 -       .sr_exit_time_us = 12,
29956 +       .sr_exit_time_us = 15.5,
29957         .sr_enter_plus_exit_time_us = 20,
29958         .urgent_latency_us = 4.0,
29959         .urgent_latency_pixel_data_only_us = 4.0,
29960 @@ -826,10 +826,11 @@ static const struct dc_plane_cap plane_cap = {
29961                         .fp16 = 16000
29962         },
29964 +       /* 6:1 downscaling ratio: 1000/6 = 166.666 */
29965         .max_downscale_factor = {
29966 -                       .argb8888 = 600,
29967 -                       .nv12 = 600,
29968 -                       .fp16 = 600
29969 +                       .argb8888 = 167,
29970 +                       .nv12 = 167,
29971 +                       .fp16 = 167
29972         }
29973  };
29975 diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
29976 index c494235016e0..00f066f1da0c 100644
29977 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
29978 +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
29979 @@ -843,10 +843,11 @@ static const struct dc_plane_cap plane_cap = {
29980                         .fp16 = 16000
29981         },
29983 +       /* 6:1 downscaling ratio: 1000/6 = 166.666 */
29984         .max_downscale_factor = {
29985 -                       .argb8888 = 600,
29986 -                       .nv12 = 600,
29987 -                       .fp16 = 600
29988 +                       .argb8888 = 167,
29989 +                       .nv12 = 167,
29990 +                       .fp16 = 167
29991         },
29992         64,
29993         64
29994 diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
29995 index 4b659b63f75b..7d9d591de411 100644
29996 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
29997 +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
29998 @@ -164,7 +164,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc = {
30000                 .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
30001                 .num_states = 1,
30002 -               .sr_exit_time_us = 12,
30003 +               .sr_exit_time_us = 15.5,
30004                 .sr_enter_plus_exit_time_us = 20,
30005                 .urgent_latency_us = 4.0,
30006                 .urgent_latency_pixel_data_only_us = 4.0,
30007 @@ -282,10 +282,11 @@ static const struct dc_plane_cap plane_cap = {
30008                                 .nv12 = 16000,
30009                                 .fp16 = 16000
30010                 },
30011 +               /* 6:1 downscaling ratio: 1000/6 = 166.666 */
30012                 .max_downscale_factor = {
30013 -                               .argb8888 = 600,
30014 -                               .nv12 = 600,
30015 -                               .fp16 = 600
30016 +                               .argb8888 = 167,
30017 +                               .nv12 = 167,
30018 +                               .fp16 = 167
30019                 },
30020                 16,
30021                 16
30022 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
30023 index 0f3f510fd83b..9729cf292e84 100644
30024 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
30025 +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
30026 @@ -3437,6 +3437,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
30027                         mode_lib->vba.DCCEnabledInAnyPlane = true;
30028                 }
30029         }
30030 +       mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly;
30031         for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
30032                 locals->FabricAndDRAMBandwidthPerState[i] = dml_min(
30033                                 mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
30034 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
30035 index 210c96cd5b03..51098c2c9854 100644
30036 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
30037 +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
30038 @@ -3544,6 +3544,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
30039                         mode_lib->vba.DCCEnabledInAnyPlane = true;
30040                 }
30041         }
30042 +       mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly;
30043         for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
30044                 locals->FabricAndDRAMBandwidthPerState[i] = dml_min(
30045                                 mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
30046 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
30047 index 72423dc425dc..799bae229e67 100644
30048 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
30049 +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
30050 @@ -293,13 +293,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
30051         if (surf_linear) {
30052                 log2_swath_height_l = 0;
30053                 log2_swath_height_c = 0;
30054 -       } else if (!surf_vert) {
30055 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
30056 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
30057         } else {
30058 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
30059 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
30060 +               unsigned int swath_height_l;
30061 +               unsigned int swath_height_c;
30063 +               if (!surf_vert) {
30064 +                       swath_height_l = rq_param->misc.rq_l.blk256_height;
30065 +                       swath_height_c = rq_param->misc.rq_c.blk256_height;
30066 +               } else {
30067 +                       swath_height_l = rq_param->misc.rq_l.blk256_width;
30068 +                       swath_height_c = rq_param->misc.rq_c.blk256_width;
30069 +               }
30071 +               if (swath_height_l > 0)
30072 +                       log2_swath_height_l = dml_log2(swath_height_l);
30074 +               if (req128_l && log2_swath_height_l > 0)
30075 +                       log2_swath_height_l -= 1;
30077 +               if (swath_height_c > 0)
30078 +                       log2_swath_height_c = dml_log2(swath_height_c);
30080 +               if (req128_c && log2_swath_height_c > 0)
30081 +                       log2_swath_height_c -= 1;
30082         }
30084         rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
30085         rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
30087 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
30088 index 9c78446c3a9d..6a6d5970d1d5 100644
30089 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
30090 +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
30091 @@ -293,13 +293,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
30092         if (surf_linear) {
30093                 log2_swath_height_l = 0;
30094                 log2_swath_height_c = 0;
30095 -       } else if (!surf_vert) {
30096 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
30097 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
30098         } else {
30099 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
30100 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
30101 +               unsigned int swath_height_l;
30102 +               unsigned int swath_height_c;
30104 +               if (!surf_vert) {
30105 +                       swath_height_l = rq_param->misc.rq_l.blk256_height;
30106 +                       swath_height_c = rq_param->misc.rq_c.blk256_height;
30107 +               } else {
30108 +                       swath_height_l = rq_param->misc.rq_l.blk256_width;
30109 +                       swath_height_c = rq_param->misc.rq_c.blk256_width;
30110 +               }
30112 +               if (swath_height_l > 0)
30113 +                       log2_swath_height_l = dml_log2(swath_height_l);
30115 +               if (req128_l && log2_swath_height_l > 0)
30116 +                       log2_swath_height_l -= 1;
30118 +               if (swath_height_c > 0)
30119 +                       log2_swath_height_c = dml_log2(swath_height_c);
30121 +               if (req128_c && log2_swath_height_c > 0)
30122 +                       log2_swath_height_c -= 1;
30123         }
30125         rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
30126         rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
30128 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
30129 index edd41d358291..dc1c81a6e377 100644
30130 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
30131 +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
30132 @@ -277,13 +277,31 @@ static void handle_det_buf_split(
30133         if (surf_linear) {
30134                 log2_swath_height_l = 0;
30135                 log2_swath_height_c = 0;
30136 -       } else if (!surf_vert) {
30137 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
30138 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
30139         } else {
30140 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
30141 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
30142 +               unsigned int swath_height_l;
30143 +               unsigned int swath_height_c;
30145 +               if (!surf_vert) {
30146 +                       swath_height_l = rq_param->misc.rq_l.blk256_height;
30147 +                       swath_height_c = rq_param->misc.rq_c.blk256_height;
30148 +               } else {
30149 +                       swath_height_l = rq_param->misc.rq_l.blk256_width;
30150 +                       swath_height_c = rq_param->misc.rq_c.blk256_width;
30151 +               }
30153 +               if (swath_height_l > 0)
30154 +                       log2_swath_height_l = dml_log2(swath_height_l);
30156 +               if (req128_l && log2_swath_height_l > 0)
30157 +                       log2_swath_height_l -= 1;
30159 +               if (swath_height_c > 0)
30160 +                       log2_swath_height_c = dml_log2(swath_height_c);
30162 +               if (req128_c && log2_swath_height_c > 0)
30163 +                       log2_swath_height_c -= 1;
30164         }
30166         rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
30167         rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
30169 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
30170 index 0f14f205ebe5..04601a767a8f 100644
30171 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
30172 +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
30173 @@ -237,13 +237,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
30174         if (surf_linear) {
30175                 log2_swath_height_l = 0;
30176                 log2_swath_height_c = 0;
30177 -       } else if (!surf_vert) {
30178 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
30179 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
30180         } else {
30181 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
30182 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
30183 +               unsigned int swath_height_l;
30184 +               unsigned int swath_height_c;
30186 +               if (!surf_vert) {
30187 +                       swath_height_l = rq_param->misc.rq_l.blk256_height;
30188 +                       swath_height_c = rq_param->misc.rq_c.blk256_height;
30189 +               } else {
30190 +                       swath_height_l = rq_param->misc.rq_l.blk256_width;
30191 +                       swath_height_c = rq_param->misc.rq_c.blk256_width;
30192 +               }
30194 +               if (swath_height_l > 0)
30195 +                       log2_swath_height_l = dml_log2(swath_height_l);
30197 +               if (req128_l && log2_swath_height_l > 0)
30198 +                       log2_swath_height_l -= 1;
30200 +               if (swath_height_c > 0)
30201 +                       log2_swath_height_c = dml_log2(swath_height_c);
30203 +               if (req128_c && log2_swath_height_c > 0)
30204 +                       log2_swath_height_c -= 1;
30205         }
30207         rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
30208         rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
30210 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
30211 index 4c3e9cc30167..414da64f5734 100644
30212 --- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
30213 +++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
30214 @@ -344,13 +344,31 @@ static void handle_det_buf_split(
30215         if (surf_linear) {
30216                 log2_swath_height_l = 0;
30217                 log2_swath_height_c = 0;
30218 -       } else if (!surf_vert) {
30219 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
30220 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
30221         } else {
30222 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
30223 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
30224 +               unsigned int swath_height_l;
30225 +               unsigned int swath_height_c;
30227 +               if (!surf_vert) {
30228 +                       swath_height_l = rq_param->misc.rq_l.blk256_height;
30229 +                       swath_height_c = rq_param->misc.rq_c.blk256_height;
30230 +               } else {
30231 +                       swath_height_l = rq_param->misc.rq_l.blk256_width;
30232 +                       swath_height_c = rq_param->misc.rq_c.blk256_width;
30233 +               }
30235 +               if (swath_height_l > 0)
30236 +                       log2_swath_height_l = dml_log2(swath_height_l);
30238 +               if (req128_l && log2_swath_height_l > 0)
30239 +                       log2_swath_height_l -= 1;
30241 +               if (swath_height_c > 0)
30242 +                       log2_swath_height_c = dml_log2(swath_height_c);
30244 +               if (req128_c && log2_swath_height_c > 0)
30245 +                       log2_swath_height_c -= 1;
30246         }
30248         rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
30249         rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
30251 diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
30252 index 5e384a8a83dc..51855a2624cf 100644
30253 --- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
30254 +++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
30255 @@ -39,7 +39,7 @@
30256  #define HDCP14_KSV_SIZE 5
30257  #define HDCP14_MAX_KSV_FIFO_SIZE 127*HDCP14_KSV_SIZE
30259 -static const bool hdcp_cmd_is_read[] = {
30260 +static const bool hdcp_cmd_is_read[HDCP_MESSAGE_ID_MAX] = {
30261         [HDCP_MESSAGE_ID_READ_BKSV] = true,
30262         [HDCP_MESSAGE_ID_READ_RI_R0] = true,
30263         [HDCP_MESSAGE_ID_READ_PJ] = true,
30264 @@ -75,7 +75,7 @@ static const bool hdcp_cmd_is_read[] = {
30265         [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = false
30266  };
30268 -static const uint8_t hdcp_i2c_offsets[] = {
30269 +static const uint8_t hdcp_i2c_offsets[HDCP_MESSAGE_ID_MAX] = {
30270         [HDCP_MESSAGE_ID_READ_BKSV] = 0x0,
30271         [HDCP_MESSAGE_ID_READ_RI_R0] = 0x8,
30272         [HDCP_MESSAGE_ID_READ_PJ] = 0xA,
30273 @@ -106,7 +106,8 @@ static const uint8_t hdcp_i2c_offsets[] = {
30274         [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60,
30275         [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60,
30276         [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80,
30277 -       [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70
30278 +       [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70,
30279 +       [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x0,
30280  };
30282  struct protection_properties {
30283 @@ -184,7 +185,7 @@ static const struct protection_properties hdmi_14_protection = {
30284         .process_transaction = hdmi_14_process_transaction
30285  };
30287 -static const uint32_t hdcp_dpcd_addrs[] = {
30288 +static const uint32_t hdcp_dpcd_addrs[HDCP_MESSAGE_ID_MAX] = {
30289         [HDCP_MESSAGE_ID_READ_BKSV] = 0x68000,
30290         [HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005,
30291         [HDCP_MESSAGE_ID_READ_PJ] = 0xFFFFFFFF,
30292 diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
30293 index 904ce9b88088..afbe8856468a 100644
30294 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
30295 +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
30296 @@ -791,6 +791,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
30297                            TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
30298                         hdcp->connection.is_hdcp2_revoked = 1;
30299                         status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
30300 +               } else {
30301 +                       status = MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
30302                 }
30303         }
30304         mutex_unlock(&psp->hdcp_context.mutex);
30305 diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
30306 index ed05a30d1139..e2a56a7f3d7a 100644
30307 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
30308 +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
30309 @@ -1526,20 +1526,6 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
30311                 smu10_data->gfx_actual_soft_min_freq = min_freq;
30312                 smu10_data->gfx_actual_soft_max_freq = max_freq;
30314 -               ret = smum_send_msg_to_smc_with_parameter(hwmgr,
30315 -                                       PPSMC_MSG_SetHardMinGfxClk,
30316 -                                       min_freq,
30317 -                                       NULL);
30318 -               if (ret)
30319 -                       return ret;
30321 -               ret = smum_send_msg_to_smc_with_parameter(hwmgr,
30322 -                                       PPSMC_MSG_SetSoftMaxGfxClk,
30323 -                                       max_freq,
30324 -                                       NULL);
30325 -               if (ret)
30326 -                       return ret;
30327         } else if (type == PP_OD_COMMIT_DPM_TABLE) {
30328                 if (size != 0) {
30329                         pr_err("Input parameter number not correct\n");
30330 diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
30331 index 599ec9726601..959143eff651 100644
30332 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
30333 +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
30334 @@ -5160,7 +5160,7 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
30336  out:
30337         smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
30338 -                                               1 << power_profile_mode,
30339 +                                               (!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1),
30340                                                 NULL);
30341         hwmgr->power_profile_mode = power_profile_mode;
30343 diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
30344 index cd905e41080e..ec0037a21331 100644
30345 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
30346 +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
30347 @@ -279,35 +279,25 @@ static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_
30348         if (smu->adev->in_suspend)
30349                 return;
30351 -       /*
30352 -        * mclk, fclk and socclk are interdependent
30353 -        * on each other
30354 -        */
30355         if (clk == SMU_MCLK) {
30356 -               /* reset clock dependency */
30357                 smu->user_dpm_profile.clk_dependency = 0;
30358 -               /* set mclk dependent clocks(fclk and socclk) */
30359                 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
30360         } else if (clk == SMU_FCLK) {
30361 -               /* give priority to mclk, if mclk dependent clocks are set */
30362 +               /* MCLK takes precedence over FCLK */
30363                 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
30364                         return;
30366 -               /* reset clock dependency */
30367                 smu->user_dpm_profile.clk_dependency = 0;
30368 -               /* set fclk dependent clocks(mclk and socclk) */
30369                 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
30370         } else if (clk == SMU_SOCCLK) {
30371 -               /* give priority to mclk, if mclk dependent clocks are set */
30372 +               /* MCLK takes precedence over SOCCLK */
30373                 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
30374                         return;
30376 -               /* reset clock dependency */
30377                 smu->user_dpm_profile.clk_dependency = 0;
30378 -               /* set socclk dependent clocks(mclk and fclk) */
30379                 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
30380         } else
30381 -               /* add clk dependencies here, if any */
30382 +               /* Add clk dependencies here, if any */
30383                 return;
30386 @@ -331,7 +321,7 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
30387                 return;
30389         /* Enable restore flag */
30390 -       smu->user_dpm_profile.flags = SMU_DPM_USER_PROFILE_RESTORE;
30391 +       smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
30393         /* set the user dpm power limit */
30394         if (smu->user_dpm_profile.power_limit) {
30395 @@ -354,8 +344,8 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
30396                                 ret = smu_force_clk_levels(smu, clk_type,
30397                                                 smu->user_dpm_profile.clk_mask[clk_type]);
30398                                 if (ret)
30399 -                                       dev_err(smu->adev->dev, "Failed to set clock type = %d\n",
30400 -                                                       clk_type);
30401 +                                       dev_err(smu->adev->dev,
30402 +                                               "Failed to set clock type = %d\n", clk_type);
30403                         }
30404                 }
30405         }
30406 @@ -1777,7 +1767,7 @@ int smu_force_clk_levels(struct smu_context *smu,
30408         if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
30409                 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
30410 -               if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) {
30411 +               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
30412                         smu->user_dpm_profile.clk_mask[clk_type] = mask;
30413                         smu_set_user_clk_dependencies(smu, clk_type);
30414                 }
30415 @@ -2034,7 +2024,7 @@ int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
30416         if (smu->ppt_funcs->set_fan_speed_percent) {
30417                 percent = speed * 100 / smu->fan_max_rpm;
30418                 ret = smu->ppt_funcs->set_fan_speed_percent(smu, percent);
30419 -               if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
30420 +               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
30421                         smu->user_dpm_profile.fan_speed_percent = percent;
30422         }
30424 @@ -2096,6 +2086,7 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
30425                 dev_err(smu->adev->dev,
30426                         "New power limit (%d) is over the max allowed %d\n",
30427                         limit, smu->max_power_limit);
30428 +               ret = -EINVAL;
30429                 goto out;
30430         }
30432 @@ -2104,7 +2095,7 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
30434         if (smu->ppt_funcs->set_power_limit) {
30435                 ret = smu->ppt_funcs->set_power_limit(smu, limit);
30436 -               if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
30437 +               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
30438                         smu->user_dpm_profile.power_limit = limit;
30439         }
30441 @@ -2285,7 +2276,7 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)
30443         if (smu->ppt_funcs->set_fan_control_mode) {
30444                 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
30445 -               if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
30446 +               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
30447                         smu->user_dpm_profile.fan_mode = value;
30448         }
30450 @@ -2293,7 +2284,7 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)
30452         /* reset user dpm fan speed */
30453         if (!ret && value != AMD_FAN_CTRL_MANUAL &&
30454 -                       smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
30455 +                       !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
30456                 smu->user_dpm_profile.fan_speed_percent = 0;
30458         return ret;
30459 @@ -2335,7 +2326,7 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
30460                 if (speed > 100)
30461                         speed = 100;
30462                 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
30463 -               if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
30464 +               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
30465                         smu->user_dpm_profile.fan_speed_percent = speed;
30466         }
30468 diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
30469 index 6e641f1513d8..fbff3df72e6c 100644
30470 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
30471 +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
30472 @@ -1110,7 +1110,6 @@ static int navi10_force_clk_levels(struct smu_context *smu,
30473         case SMU_SOCCLK:
30474         case SMU_MCLK:
30475         case SMU_UCLK:
30476 -       case SMU_DCEFCLK:
30477         case SMU_FCLK:
30478                 /* There is only 2 levels for fine grained DPM */
30479                 if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
30480 @@ -1130,6 +1129,10 @@ static int navi10_force_clk_levels(struct smu_context *smu,
30481                 if (ret)
30482                         return size;
30483                 break;
30484 +       case SMU_DCEFCLK:
30485 +               dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n");
30486 +               break;
30488         default:
30489                 break;
30490         }
30491 diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
30492 index af73e1430af5..61438940c26e 100644
30493 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
30494 +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
30495 @@ -1127,7 +1127,6 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
30496         case SMU_SOCCLK:
30497         case SMU_MCLK:
30498         case SMU_UCLK:
30499 -       case SMU_DCEFCLK:
30500         case SMU_FCLK:
30501                 /* There is only 2 levels for fine grained DPM */
30502                 if (sienna_cichlid_is_support_fine_grained_dpm(smu, clk_type)) {
30503 @@ -1147,6 +1146,9 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
30504                 if (ret)
30505                         goto forec_level_out;
30506                 break;
30507 +       case SMU_DCEFCLK:
30508 +               dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n");
30509 +               break;
30510         default:
30511                 break;
30512         }
30513 diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
30514 index 101eaa20db9b..a80f551771b9 100644
30515 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
30516 +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
30517 @@ -1462,7 +1462,6 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
30518                                         long input[], uint32_t size)
30520         int ret = 0;
30521 -       int i;
30522         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
30524         if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) {
30525 @@ -1535,43 +1534,6 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
30526                         smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
30527                         smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
30528                         smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
30530 -                       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
30531 -                                                                       smu->gfx_actual_hard_min_freq, NULL);
30532 -                       if (ret) {
30533 -                               dev_err(smu->adev->dev, "Restore the default hard min sclk failed!");
30534 -                               return ret;
30535 -                       }
30537 -                       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
30538 -                                                                       smu->gfx_actual_soft_max_freq, NULL);
30539 -                       if (ret) {
30540 -                               dev_err(smu->adev->dev, "Restore the default soft max sclk failed!");
30541 -                               return ret;
30542 -                       }
30544 -                       if (smu->adev->pm.fw_version < 0x43f1b00) {
30545 -                               dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n");
30546 -                               break;
30547 -                       }
30549 -                       for (i = 0; i < smu->cpu_core_num; i++) {
30550 -                               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
30551 -                                                                     (i << 20) | smu->cpu_actual_soft_min_freq,
30552 -                                                                     NULL);
30553 -                               if (ret) {
30554 -                                       dev_err(smu->adev->dev, "Set hard min cclk failed!");
30555 -                                       return ret;
30556 -                               }
30558 -                               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
30559 -                                                                     (i << 20) | smu->cpu_actual_soft_max_freq,
30560 -                                                                     NULL);
30561 -                               if (ret) {
30562 -                                       dev_err(smu->adev->dev, "Set soft max cclk failed!");
30563 -                                       return ret;
30564 -                               }
30565 -                       }
30566                 }
30567                 break;
30568         case PP_OD_COMMIT_DPM_TABLE:
30569 diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
30570 index 5493388fcb10..dbe6d0caddb7 100644
30571 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
30572 +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
30573 @@ -389,24 +389,6 @@ static int renoir_od_edit_dpm_table(struct smu_context *smu,
30574                 }
30575                 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
30576                 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
30578 -               ret = smu_cmn_send_smc_msg_with_param(smu,
30579 -                                                               SMU_MSG_SetHardMinGfxClk,
30580 -                                                               smu->gfx_actual_hard_min_freq,
30581 -                                                               NULL);
30582 -               if (ret) {
30583 -                       dev_err(smu->adev->dev, "Restore the default hard min sclk failed!");
30584 -                       return ret;
30585 -               }
30587 -               ret = smu_cmn_send_smc_msg_with_param(smu,
30588 -                                                               SMU_MSG_SetSoftMaxGfxClk,
30589 -                                                               smu->gfx_actual_soft_max_freq,
30590 -                                                               NULL);
30591 -               if (ret) {
30592 -                       dev_err(smu->adev->dev, "Restore the default soft max sclk failed!");
30593 -                       return ret;
30594 -               }
30595                 break;
30596         case PP_OD_COMMIT_DPM_TABLE:
30597                 if (size != 0) {
30598 diff --git a/drivers/gpu/drm/arm/display/include/malidp_utils.h b/drivers/gpu/drm/arm/display/include/malidp_utils.h
30599 index 3bc383d5bf73..49a1d7f3539c 100644
30600 --- a/drivers/gpu/drm/arm/display/include/malidp_utils.h
30601 +++ b/drivers/gpu/drm/arm/display/include/malidp_utils.h
30602 @@ -13,9 +13,6 @@
30603  #define has_bit(nr, mask)      (BIT(nr) & (mask))
30604  #define has_bits(bits, mask)   (((bits) & (mask)) == (bits))
30606 -#define dp_for_each_set_bit(bit, mask) \
30607 -       for_each_set_bit((bit), ((unsigned long *)&(mask)), sizeof(mask) * 8)
30609  #define dp_wait_cond(__cond, __tries, __min_range, __max_range)        \
30610  ({                                                     \
30611         int num_tries = __tries;                        \
30612 diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
30613 index 719a79728e24..06c595378dda 100644
30614 --- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
30615 +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
30616 @@ -46,8 +46,9 @@ void komeda_pipeline_destroy(struct komeda_dev *mdev,
30618         struct komeda_component *c;
30619         int i;
30620 +       unsigned long avail_comps = pipe->avail_comps;
30622 -       dp_for_each_set_bit(i, pipe->avail_comps) {
30623 +       for_each_set_bit(i, &avail_comps, 32) {
30624                 c = komeda_pipeline_get_component(pipe, i);
30625                 komeda_component_destroy(mdev, c);
30626         }
30627 @@ -247,6 +248,7 @@ static void komeda_pipeline_dump(struct komeda_pipeline *pipe)
30629         struct komeda_component *c;
30630         int id;
30631 +       unsigned long avail_comps = pipe->avail_comps;
30633         DRM_INFO("Pipeline-%d: n_layers: %d, n_scalers: %d, output: %s.\n",
30634                  pipe->id, pipe->n_layers, pipe->n_scalers,
30635 @@ -258,7 +260,7 @@ static void komeda_pipeline_dump(struct komeda_pipeline *pipe)
30636                  pipe->of_output_links[1] ?
30637                  pipe->of_output_links[1]->full_name : "none");
30639 -       dp_for_each_set_bit(id, pipe->avail_comps) {
30640 +       for_each_set_bit(id, &avail_comps, 32) {
30641                 c = komeda_pipeline_get_component(pipe, id);
30643                 komeda_component_dump(c);
30644 @@ -270,8 +272,9 @@ static void komeda_component_verify_inputs(struct komeda_component *c)
30645         struct komeda_pipeline *pipe = c->pipeline;
30646         struct komeda_component *input;
30647         int id;
30648 +       unsigned long supported_inputs = c->supported_inputs;
30650 -       dp_for_each_set_bit(id, c->supported_inputs) {
30651 +       for_each_set_bit(id, &supported_inputs, 32) {
30652                 input = komeda_pipeline_get_component(pipe, id);
30653                 if (!input) {
30654                         c->supported_inputs &= ~(BIT(id));
30655 @@ -302,8 +305,9 @@ static void komeda_pipeline_assemble(struct komeda_pipeline *pipe)
30656         struct komeda_component *c;
30657         struct komeda_layer *layer;
30658         int i, id;
30659 +       unsigned long avail_comps = pipe->avail_comps;
30661 -       dp_for_each_set_bit(id, pipe->avail_comps) {
30662 +       for_each_set_bit(id, &avail_comps, 32) {
30663                 c = komeda_pipeline_get_component(pipe, id);
30664                 komeda_component_verify_inputs(c);
30665         }
30666 @@ -355,13 +359,15 @@ void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
30668         struct komeda_component *c;
30669         u32 id;
30670 +       unsigned long avail_comps;
30672         seq_printf(sf, "\n======== Pipeline-%d ==========\n", pipe->id);
30674         if (pipe->funcs && pipe->funcs->dump_register)
30675                 pipe->funcs->dump_register(pipe, sf);
30677 -       dp_for_each_set_bit(id, pipe->avail_comps) {
30678 +       avail_comps = pipe->avail_comps;
30679 +       for_each_set_bit(id, &avail_comps, 32) {
30680                 c = komeda_pipeline_get_component(pipe, id);
30682                 seq_printf(sf, "\n------%s------\n", c->name);
30683 diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
30684 index 5c085116de3f..e672b9cffee3 100644
30685 --- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
30686 +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
30687 @@ -1231,14 +1231,15 @@ komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
30688         struct komeda_pipeline_state *old = priv_to_pipe_st(pipe->obj.state);
30689         struct komeda_component_state *c_st;
30690         struct komeda_component *c;
30691 -       u32 disabling_comps, id;
30692 +       u32 id;
30693 +       unsigned long disabling_comps;
30695         WARN_ON(!old);
30697         disabling_comps = (~new->active_comps) & old->active_comps;
30699         /* unbound all disabling component */
30700 -       dp_for_each_set_bit(id, disabling_comps) {
30701 +       for_each_set_bit(id, &disabling_comps, 32) {
30702                 c = komeda_pipeline_get_component(pipe, id);
30703                 c_st = komeda_component_get_state_and_set_user(c,
30704                                 drm_st, NULL, new->crtc);
30705 @@ -1286,7 +1287,8 @@ bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
30706         struct komeda_pipeline_state *old;
30707         struct komeda_component *c;
30708         struct komeda_component_state *c_st;
30709 -       u32 id, disabling_comps = 0;
30710 +       u32 id;
30711 +       unsigned long disabling_comps;
30713         old = komeda_pipeline_get_old_state(pipe, old_state);
30715 @@ -1296,10 +1298,10 @@ bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
30716                 disabling_comps = old->active_comps &
30717                                   pipe->standalone_disabled_comps;
30719 -       DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%x.\n",
30720 +       DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%lx.\n",
30721                          pipe->id, old->active_comps, disabling_comps);
30723 -       dp_for_each_set_bit(id, disabling_comps) {
30724 +       for_each_set_bit(id, &disabling_comps, 32) {
30725                 c = komeda_pipeline_get_component(pipe, id);
30726                 c_st = priv_to_comp_st(c->obj.state);
30728 @@ -1330,16 +1332,17 @@ void komeda_pipeline_update(struct komeda_pipeline *pipe,
30729         struct komeda_pipeline_state *new = priv_to_pipe_st(pipe->obj.state);
30730         struct komeda_pipeline_state *old;
30731         struct komeda_component *c;
30732 -       u32 id, changed_comps = 0;
30733 +       u32 id;
30734 +       unsigned long changed_comps;
30736         old = komeda_pipeline_get_old_state(pipe, old_state);
30738         changed_comps = new->active_comps | old->active_comps;
30740 -       DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%x.\n",
30741 +       DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%lx.\n",
30742                          pipe->id, new->active_comps, changed_comps);
30744 -       dp_for_each_set_bit(id, changed_comps) {
30745 +       for_each_set_bit(id, &changed_comps, 32) {
30746                 c = komeda_pipeline_get_component(pipe, id);
30748                 if (new->active_comps & BIT(c->id))
30749 diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
30750 index ea8164e7a6dc..01837bea18c2 100644
30751 --- a/drivers/gpu/drm/ast/ast_drv.c
30752 +++ b/drivers/gpu/drm/ast/ast_drv.c
30753 @@ -30,6 +30,7 @@
30754  #include <linux/module.h>
30755  #include <linux/pci.h>
30757 +#include <drm/drm_atomic_helper.h>
30758  #include <drm/drm_crtc_helper.h>
30759  #include <drm/drm_drv.h>
30760  #include <drm/drm_fb_helper.h>
30761 @@ -138,6 +139,7 @@ static void ast_pci_remove(struct pci_dev *pdev)
30762         struct drm_device *dev = pci_get_drvdata(pdev);
30764         drm_dev_unregister(dev);
30765 +       drm_atomic_helper_shutdown(dev);
30768  static int ast_drm_freeze(struct drm_device *dev)
30769 diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
30770 index 988b270fea5e..758c69aa7232 100644
30771 --- a/drivers/gpu/drm/ast/ast_mode.c
30772 +++ b/drivers/gpu/drm/ast/ast_mode.c
30773 @@ -688,7 +688,7 @@ ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
30774         unsigned int offset_x, offset_y;
30776         offset_x = AST_MAX_HWC_WIDTH - fb->width;
30777 -       offset_y = AST_MAX_HWC_WIDTH - fb->height;
30778 +       offset_y = AST_MAX_HWC_HEIGHT - fb->height;
30780         if (state->fb != old_state->fb) {
30781                 /* A new cursor image was installed. */
30782 diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
30783 index e4110d6ca7b3..bc60fc4728d7 100644
30784 --- a/drivers/gpu/drm/bridge/Kconfig
30785 +++ b/drivers/gpu/drm/bridge/Kconfig
30786 @@ -67,6 +67,7 @@ config DRM_LONTIUM_LT9611UXC
30787         depends on OF
30788         select DRM_PANEL_BRIDGE
30789         select DRM_KMS_HELPER
30790 +       select DRM_MIPI_DSI
30791         select REGMAP_I2C
30792         help
30793           Driver for Lontium LT9611UXC DSI to HDMI bridge
30794 @@ -151,6 +152,7 @@ config DRM_SII902X
30795         tristate "Silicon Image sii902x RGB/HDMI bridge"
30796         depends on OF
30797         select DRM_KMS_HELPER
30798 +       select DRM_MIPI_DSI
30799         select REGMAP_I2C
30800         select I2C_MUX
30801         select SND_SOC_HDMI_CODEC if SND_SOC
30802 @@ -200,6 +202,7 @@ config DRM_TOSHIBA_TC358767
30803         tristate "Toshiba TC358767 eDP bridge"
30804         depends on OF
30805         select DRM_KMS_HELPER
30806 +       select DRM_MIPI_DSI
30807         select REGMAP_I2C
30808         select DRM_PANEL
30809         help
30810 diff --git a/drivers/gpu/drm/bridge/analogix/Kconfig b/drivers/gpu/drm/bridge/analogix/Kconfig
30811 index 024ea2a570e7..9160fd80dd70 100644
30812 --- a/drivers/gpu/drm/bridge/analogix/Kconfig
30813 +++ b/drivers/gpu/drm/bridge/analogix/Kconfig
30814 @@ -30,6 +30,7 @@ config DRM_ANALOGIX_ANX7625
30815         tristate "Analogix Anx7625 MIPI to DP interface support"
30816         depends on DRM
30817         depends on OF
30818 +       select DRM_MIPI_DSI
30819         help
30820           ANX7625 is an ultra-low power 4K mobile HD transmitter
30821           designed for portable devices. It converts MIPI/DPI to
30822 diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
30823 index 0ddc37551194..c916f4b8907e 100644
30824 --- a/drivers/gpu/drm/bridge/panel.c
30825 +++ b/drivers/gpu/drm/bridge/panel.c
30826 @@ -87,6 +87,18 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
30828  static void panel_bridge_detach(struct drm_bridge *bridge)
30830 +       struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
30831 +       struct drm_connector *connector = &panel_bridge->connector;
30833 +       /*
30834 +        * Cleanup the connector if we know it was initialized.
30835 +        *
30836 +        * FIXME: This wouldn't be needed if the panel_bridge structure was
30837 +        * allocated with drmm_kzalloc(). This might be tricky since the
30838 +        * drm_device pointer can only be retrieved when the bridge is attached.
30839 +        */
30840 +       if (connector->dev)
30841 +               drm_connector_cleanup(connector);
30844  static void panel_bridge_pre_enable(struct drm_bridge *bridge)
30845 diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
30846 index 309afe61afdd..9c75c8815056 100644
30847 --- a/drivers/gpu/drm/drm_dp_mst_topology.c
30848 +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
30849 @@ -1154,6 +1154,7 @@ static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
30851         req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
30852         drm_dp_encode_sideband_req(&req, msg);
30853 +       msg->path_msg = true;
30856  static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
30857 @@ -2824,15 +2825,21 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
30859         req_type = txmsg->msg[0] & 0x7f;
30860         if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
30861 -               req_type == DP_RESOURCE_STATUS_NOTIFY)
30862 +               req_type == DP_RESOURCE_STATUS_NOTIFY ||
30863 +               req_type == DP_CLEAR_PAYLOAD_ID_TABLE)
30864                 hdr->broadcast = 1;
30865         else
30866                 hdr->broadcast = 0;
30867         hdr->path_msg = txmsg->path_msg;
30868 -       hdr->lct = mstb->lct;
30869 -       hdr->lcr = mstb->lct - 1;
30870 -       if (mstb->lct > 1)
30871 -               memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
30872 +       if (hdr->broadcast) {
30873 +               hdr->lct = 1;
30874 +               hdr->lcr = 6;
30875 +       } else {
30876 +               hdr->lct = mstb->lct;
30877 +               hdr->lcr = mstb->lct - 1;
30878 +       }
30880 +       memcpy(hdr->rad, mstb->rad, hdr->lct / 2);
30882         return 0;
30884 diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
30885 index 58f5dc2f6dd5..f6bdec7fa925 100644
30886 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
30887 +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
30888 @@ -84,6 +84,13 @@ static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
30889         .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
30890  };
30892 +static const struct drm_dmi_panel_orientation_data onegx1_pro = {
30893 +       .width = 1200,
30894 +       .height = 1920,
30895 +       .bios_dates = (const char * const []){ "12/17/2020", NULL },
30896 +       .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
30899  static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
30900         .width = 720,
30901         .height = 1280,
30902 @@ -211,6 +218,13 @@ static const struct dmi_system_id orientation_data[] = {
30903                   DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
30904                 },
30905                 .driver_data = (void *)&lcd1200x1920_rightside_up,
30906 +       }, {    /* OneGX1 Pro */
30907 +               .matches = {
30908 +                 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"),
30909 +                 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SYSTEM_PRODUCT_NAME"),
30910 +                 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"),
30911 +               },
30912 +               .driver_data = (void *)&onegx1_pro,
30913         }, {    /* VIOS LTH17 */
30914                 .matches = {
30915                   DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
30916 diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
30917 index ad59a51eab6d..e7e1ee2aa352 100644
30918 --- a/drivers/gpu/drm/drm_probe_helper.c
30919 +++ b/drivers/gpu/drm/drm_probe_helper.c
30920 @@ -624,6 +624,7 @@ static void output_poll_execute(struct work_struct *work)
30921         struct drm_connector_list_iter conn_iter;
30922         enum drm_connector_status old_status;
30923         bool repoll = false, changed;
30924 +       u64 old_epoch_counter;
30926         if (!dev->mode_config.poll_enabled)
30927                 return;
30928 @@ -660,8 +661,9 @@ static void output_poll_execute(struct work_struct *work)
30930                 repoll = true;
30932 +               old_epoch_counter = connector->epoch_counter;
30933                 connector->status = drm_helper_probe_detect(connector, NULL, false);
30934 -               if (old_status != connector->status) {
30935 +               if (old_epoch_counter != connector->epoch_counter) {
30936                         const char *old, *new;
30938                         /*
30939 @@ -690,6 +692,9 @@ static void output_poll_execute(struct work_struct *work)
30940                                       connector->base.id,
30941                                       connector->name,
30942                                       old, new);
30943 +                       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] epoch counter %llu -> %llu\n",
30944 +                                     connector->base.id, connector->name,
30945 +                                     old_epoch_counter, connector->epoch_counter);
30947                         changed = true;
30948                 }
30949 diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
30950 index 775d89b6c3fc..97a785aa8839 100644
30951 --- a/drivers/gpu/drm/i915/display/intel_dp.c
30952 +++ b/drivers/gpu/drm/i915/display/intel_dp.c
30953 @@ -1174,44 +1174,6 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
30954         return -EINVAL;
30957 -/* Optimize link config in order: max bpp, min lanes, min clock */
30958 -static int
30959 -intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
30960 -                                 struct intel_crtc_state *pipe_config,
30961 -                                 const struct link_config_limits *limits)
30963 -       const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
30964 -       int bpp, clock, lane_count;
30965 -       int mode_rate, link_clock, link_avail;
30967 -       for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
30968 -               int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
30970 -               mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
30971 -                                                  output_bpp);
30973 -               for (lane_count = limits->min_lane_count;
30974 -                    lane_count <= limits->max_lane_count;
30975 -                    lane_count <<= 1) {
30976 -                       for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
30977 -                               link_clock = intel_dp->common_rates[clock];
30978 -                               link_avail = intel_dp_max_data_rate(link_clock,
30979 -                                                                   lane_count);
30981 -                               if (mode_rate <= link_avail) {
30982 -                                       pipe_config->lane_count = lane_count;
30983 -                                       pipe_config->pipe_bpp = bpp;
30984 -                                       pipe_config->port_clock = link_clock;
30986 -                                       return 0;
30987 -                               }
30988 -                       }
30989 -               }
30990 -       }
30992 -       return -EINVAL;
30995  static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
30997         int i, num_bpc;
30998 @@ -1461,22 +1423,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
30999             intel_dp_can_bigjoiner(intel_dp))
31000                 pipe_config->bigjoiner = true;
31002 -       if (intel_dp_is_edp(intel_dp))
31003 -               /*
31004 -                * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
31005 -                * section A.1: "It is recommended that the minimum number of
31006 -                * lanes be used, using the minimum link rate allowed for that
31007 -                * lane configuration."
31008 -                *
31009 -                * Note that we fall back to the max clock and lane count for eDP
31010 -                * panels that fail with the fast optimal settings (see
31011 -                * intel_dp->use_max_params), in which case the fast vs. wide
31012 -                * choice doesn't matter.
31013 -                */
31014 -               ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, &limits);
31015 -       else
31016 -               /* Optimize for slow and wide. */
31017 -               ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
31018 +       /*
31019 +        * Optimize for slow and wide for everything, because there are some
31020 +        * eDP 1.3 and 1.4 panels don't work well with fast and narrow.
31021 +        */
31022 +       ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
31024         /* enable compression if the mode doesn't fit available BW */
31025         drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
31026 @@ -4537,7 +4488,18 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
31027         drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
31029         for (;;) {
31030 -               u8 esi[DP_DPRX_ESI_LEN] = {};
31031 +               /*
31032 +                * The +2 is because DP_DPRX_ESI_LEN is 14, but we then
31033 +                * pass in "esi+10" to drm_dp_channel_eq_ok(), which
31034 +                * takes a 6-byte array. So we actually need 16 bytes
31035 +                * here.
31036 +                *
31037 +                * Somebody who knows what the limits actually are
31038 +                * should check this, but for now this is at least
31039 +                * harmless and avoids a valid compiler warning about
31040 +                * using more of the array than we have allocated.
31041 +                */
31042 +               u8 esi[DP_DPRX_ESI_LEN+2] = {};
31043                 bool handled;
31044                 int retry;
31046 diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
31047 index f455040fa989..7cbc81da80b7 100644
31048 --- a/drivers/gpu/drm/i915/display/intel_overlay.c
31049 +++ b/drivers/gpu/drm/i915/display/intel_overlay.c
31050 @@ -383,7 +383,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
31051                 i830_overlay_clock_gating(dev_priv, true);
31054 -static void
31055 +__i915_active_call static void
31056  intel_overlay_last_flip_retire(struct i915_active *active)
31058         struct intel_overlay *overlay =
31059 diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
31060 index ec28a6cde49b..0b2434e29d00 100644
31061 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
31062 +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
31063 @@ -189,7 +189,7 @@ compute_partial_view(const struct drm_i915_gem_object *obj,
31064         struct i915_ggtt_view view;
31066         if (i915_gem_object_is_tiled(obj))
31067 -               chunk = roundup(chunk, tile_row_pages(obj));
31068 +               chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
31070         view.type = I915_GGTT_VIEW_PARTIAL;
31071         view.partial.offset = rounddown(page_offset, chunk);
31072 diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
31073 index 43028f3539a6..76574e245916 100644
31074 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
31075 +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
31076 @@ -63,6 +63,8 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
31077             i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
31078                 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
31079                 i915_gem_object_set_tiling_quirk(obj);
31080 +               GEM_BUG_ON(!list_empty(&obj->mm.link));
31081 +               atomic_inc(&obj->mm.shrink_pin);
31082                 shrinkable = false;
31083         }
31085 diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
31086 index de575fdb033f..21f08e53889c 100644
31087 --- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c
31088 +++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
31089 @@ -397,7 +397,10 @@ static void emit_batch(struct i915_vma * const vma,
31090         gen7_emit_pipeline_invalidate(&cmds);
31091         batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
31092         batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
31093 -       batch_add(&cmds, 0xffff0000);
31094 +       batch_add(&cmds, 0xffff0000 |
31095 +                       ((IS_IVB_GT1(i915) || IS_VALLEYVIEW(i915)) ?
31096 +                        HIZ_RAW_STALL_OPT_DISABLE :
31097 +                        0));
31098         batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
31099         batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
31100         gen7_emit_pipeline_invalidate(&cmds);
31101 diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
31102 index 755522ced60d..3ae16945bd43 100644
31103 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
31104 +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
31105 @@ -630,7 +630,6 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
31107                 err = pin_pt_dma(vm, pde->pt.base);
31108                 if (err) {
31109 -                       i915_gem_object_put(pde->pt.base);
31110                         free_pd(vm, pde);
31111                         return err;
31112                 }
31113 diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
31114 index 67de2b189598..4b09490c20c0 100644
31115 --- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
31116 +++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
31117 @@ -670,8 +670,8 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
31118                  * banks of memory are paired and unswizzled on the
31119                  * uneven portion, so leave that as unknown.
31120                  */
31121 -               if (intel_uncore_read(uncore, C0DRB3) ==
31122 -                   intel_uncore_read(uncore, C1DRB3)) {
31123 +               if (intel_uncore_read16(uncore, C0DRB3) ==
31124 +                   intel_uncore_read16(uncore, C1DRB3)) {
31125                         swizzle_x = I915_BIT_6_SWIZZLE_9_10;
31126                         swizzle_y = I915_BIT_6_SWIZZLE_9;
31127                 }
31128 diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
31129 index d1d8ee4a5f16..57578bf28d77 100644
31130 --- a/drivers/gpu/drm/i915/gvt/gvt.c
31131 +++ b/drivers/gpu/drm/i915/gvt/gvt.c
31132 @@ -126,7 +126,7 @@ static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups
31133         return true;
31136 -static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
31137 +static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
31139         int i, j;
31140         struct intel_vgpu_type *type;
31141 @@ -144,7 +144,7 @@ static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
31142                 gvt_vgpu_type_groups[i] = group;
31143         }
31145 -       return true;
31146 +       return 0;
31148  unwind:
31149         for (j = 0; j < i; j++) {
31150 @@ -152,7 +152,7 @@ static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
31151                 kfree(group);
31152         }
31154 -       return false;
31155 +       return -ENOMEM;
31158  static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
31159 @@ -360,7 +360,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
31160                 goto out_clean_thread;
31162         ret = intel_gvt_init_vgpu_type_groups(gvt);
31163 -       if (ret == false) {
31164 +       if (ret) {
31165                 gvt_err("failed to init vgpu type groups: %d\n", ret);
31166                 goto out_clean_types;
31167         }
31168 diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
31169 index 3bc616cc1ad2..ea660e541c90 100644
31170 --- a/drivers/gpu/drm/i915/i915_active.c
31171 +++ b/drivers/gpu/drm/i915/i915_active.c
31172 @@ -1156,7 +1156,8 @@ static int auto_active(struct i915_active *ref)
31173         return 0;
31176 -static void auto_retire(struct i915_active *ref)
31177 +__i915_active_call static void
31178 +auto_retire(struct i915_active *ref)
31180         i915_active_put(ref);
31182 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
31183 index 8e9cb44e66e5..4ecb813c9bc7 100644
31184 --- a/drivers/gpu/drm/i915/i915_drv.c
31185 +++ b/drivers/gpu/drm/i915/i915_drv.c
31186 @@ -1049,6 +1049,8 @@ static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
31187  void i915_driver_shutdown(struct drm_i915_private *i915)
31189         disable_rpm_wakeref_asserts(&i915->runtime_pm);
31190 +       intel_runtime_pm_disable(&i915->runtime_pm);
31191 +       intel_power_domains_disable(i915);
31193         i915_gem_suspend(i915);
31195 @@ -1064,7 +1066,15 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
31196         intel_suspend_encoders(i915);
31197         intel_shutdown_encoders(i915);
31199 +       /*
31200 +        * The only requirement is to reboot with display DC states disabled,
31201 +        * for now leaving all display power wells in the INIT power domain
31202 +        * enabled matching the driver reload sequence.
31203 +        */
31204 +       intel_power_domains_driver_remove(i915);
31205         enable_rpm_wakeref_asserts(&i915->runtime_pm);
31207 +       intel_runtime_pm_driver_release(&i915->runtime_pm);
31210  static bool suspend_to_idle(struct drm_i915_private *dev_priv)
31211 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
31212 index aa4490934469..19351addb68c 100644
31213 --- a/drivers/gpu/drm/i915/i915_gem.c
31214 +++ b/drivers/gpu/drm/i915/i915_gem.c
31215 @@ -972,12 +972,11 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
31216                 obj->mm.madv = args->madv;
31218         if (i915_gem_object_has_pages(obj)) {
31219 -               struct list_head *list;
31220 +               unsigned long flags;
31222 -               if (i915_gem_object_is_shrinkable(obj)) {
31223 -                       unsigned long flags;
31225 -                       spin_lock_irqsave(&i915->mm.obj_lock, flags);
31226 +               spin_lock_irqsave(&i915->mm.obj_lock, flags);
31227 +               if (!list_empty(&obj->mm.link)) {
31228 +                       struct list_head *list;
31230                         if (obj->mm.madv != I915_MADV_WILLNEED)
31231                                 list = &i915->mm.purge_list;
31232 @@ -985,8 +984,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
31233                                 list = &i915->mm.shrink_list;
31234                         list_move_tail(&obj->mm.link, list);
31236 -                       spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
31237                 }
31238 +               spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
31239         }
31241         /* if the object is no longer attached, discard its backing storage */
31242 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
31243 index 4b4d8d034782..4ba20f959a71 100644
31244 --- a/drivers/gpu/drm/i915/intel_pm.c
31245 +++ b/drivers/gpu/drm/i915/intel_pm.c
31246 @@ -2993,7 +2993,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
31248  static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
31249                                    const char *name,
31250 -                                  const u16 wm[8])
31251 +                                  const u16 wm[])
31253         int level, max_level = ilk_wm_max_level(dev_priv);
31255 diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
31256 index 7bb31fbee29d..fd8870edde0e 100644
31257 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
31258 +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
31259 @@ -554,7 +554,7 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
31260                 height = state->src_h >> 16;
31261                 cpp = state->fb->format->cpp[0];
31263 -               if (priv->soc_info->has_osd && plane->type == DRM_PLANE_TYPE_OVERLAY)
31264 +               if (!priv->soc_info->has_osd || plane->type == DRM_PLANE_TYPE_OVERLAY)
31265                         hwdesc = &priv->dma_hwdescs->hwdesc_f0;
31266                 else
31267                         hwdesc = &priv->dma_hwdescs->hwdesc_f1;
31268 @@ -826,6 +826,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
31269         const struct jz_soc_info *soc_info;
31270         struct ingenic_drm *priv;
31271         struct clk *parent_clk;
31272 +       struct drm_plane *primary;
31273         struct drm_bridge *bridge;
31274         struct drm_panel *panel;
31275         struct drm_encoder *encoder;
31276 @@ -940,9 +941,11 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
31277         if (soc_info->has_osd)
31278                 priv->ipu_plane = drm_plane_from_index(drm, 0);
31280 -       drm_plane_helper_add(&priv->f1, &ingenic_drm_plane_helper_funcs);
31281 +       primary = priv->soc_info->has_osd ? &priv->f1 : &priv->f0;
31283 -       ret = drm_universal_plane_init(drm, &priv->f1, 1,
31284 +       drm_plane_helper_add(primary, &ingenic_drm_plane_helper_funcs);
31286 +       ret = drm_universal_plane_init(drm, primary, 1,
31287                                        &ingenic_drm_primary_plane_funcs,
31288                                        priv->soc_info->formats_f1,
31289                                        priv->soc_info->num_formats_f1,
31290 @@ -954,7 +957,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
31292         drm_crtc_helper_add(&priv->crtc, &ingenic_drm_crtc_helper_funcs);
31294 -       ret = drm_crtc_init_with_planes(drm, &priv->crtc, &priv->f1,
31295 +       ret = drm_crtc_init_with_planes(drm, &priv->crtc, primary,
31296                                         NULL, &ingenic_drm_crtc_funcs, NULL);
31297         if (ret) {
31298                 dev_err(dev, "Failed to init CRTC: %i\n", ret);
31299 diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
31300 index 2314c8122992..b3fd3501c412 100644
31301 --- a/drivers/gpu/drm/mcde/mcde_dsi.c
31302 +++ b/drivers/gpu/drm/mcde/mcde_dsi.c
31303 @@ -760,7 +760,7 @@ static void mcde_dsi_start(struct mcde_dsi *d)
31304                 DSI_MCTL_MAIN_DATA_CTL_BTA_EN |
31305                 DSI_MCTL_MAIN_DATA_CTL_READ_EN |
31306                 DSI_MCTL_MAIN_DATA_CTL_REG_TE_EN;
31307 -       if (d->mdsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET)
31308 +       if (!(d->mdsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
31309                 val |= DSI_MCTL_MAIN_DATA_CTL_HOST_EOT_GEN;
31310         writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
31312 diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
31313 index 8ee55f9e2954..7fb358167f8d 100644
31314 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
31315 +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
31316 @@ -153,7 +153,7 @@ struct mtk_hdmi_conf {
31317  struct mtk_hdmi {
31318         struct drm_bridge bridge;
31319         struct drm_bridge *next_bridge;
31320 -       struct drm_connector conn;
31321 +       struct drm_connector *curr_conn;/* current connector (only valid when 'enabled') */
31322         struct device *dev;
31323         const struct mtk_hdmi_conf *conf;
31324         struct phy *phy;
31325 @@ -186,11 +186,6 @@ static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
31326         return container_of(b, struct mtk_hdmi, bridge);
31329 -static inline struct mtk_hdmi *hdmi_ctx_from_conn(struct drm_connector *c)
31331 -       return container_of(c, struct mtk_hdmi, conn);
31334  static u32 mtk_hdmi_read(struct mtk_hdmi *hdmi, u32 offset)
31336         return readl(hdmi->regs + offset);
31337 @@ -974,7 +969,7 @@ static int mtk_hdmi_setup_avi_infoframe(struct mtk_hdmi *hdmi,
31338         ssize_t err;
31340         err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
31341 -                                                      &hdmi->conn, mode);
31342 +                                                      hdmi->curr_conn, mode);
31343         if (err < 0) {
31344                 dev_err(hdmi->dev,
31345                         "Failed to get AVI infoframe from mode: %zd\n", err);
31346 @@ -1054,7 +1049,7 @@ static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
31347         ssize_t err;
31349         err = drm_hdmi_vendor_infoframe_from_display_mode(&frame,
31350 -                                                         &hdmi->conn, mode);
31351 +                                                         hdmi->curr_conn, mode);
31352         if (err) {
31353                 dev_err(hdmi->dev,
31354                         "Failed to get vendor infoframe from mode: %zd\n", err);
31355 @@ -1201,48 +1196,16 @@ mtk_hdmi_update_plugged_status(struct mtk_hdmi *hdmi)
31356                connector_status_connected : connector_status_disconnected;
31359 -static enum drm_connector_status hdmi_conn_detect(struct drm_connector *conn,
31360 -                                                 bool force)
31361 +static enum drm_connector_status mtk_hdmi_detect(struct mtk_hdmi *hdmi)
31363 -       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
31364         return mtk_hdmi_update_plugged_status(hdmi);
31367 -static void hdmi_conn_destroy(struct drm_connector *conn)
31369 -       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
31371 -       mtk_cec_set_hpd_event(hdmi->cec_dev, NULL, NULL);
31373 -       drm_connector_cleanup(conn);
31376 -static int mtk_hdmi_conn_get_modes(struct drm_connector *conn)
31378 -       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
31379 -       struct edid *edid;
31380 -       int ret;
31382 -       if (!hdmi->ddc_adpt)
31383 -               return -ENODEV;
31385 -       edid = drm_get_edid(conn, hdmi->ddc_adpt);
31386 -       if (!edid)
31387 -               return -ENODEV;
31389 -       hdmi->dvi_mode = !drm_detect_monitor_audio(edid);
31391 -       drm_connector_update_edid_property(conn, edid);
31393 -       ret = drm_add_edid_modes(conn, edid);
31394 -       kfree(edid);
31395 -       return ret;
31398 -static int mtk_hdmi_conn_mode_valid(struct drm_connector *conn,
31399 -                                   struct drm_display_mode *mode)
31400 +static int mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
31401 +                                     const struct drm_display_info *info,
31402 +                                     const struct drm_display_mode *mode)
31404 -       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
31405 +       struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
31406         struct drm_bridge *next_bridge;
31408         dev_dbg(hdmi->dev, "xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
31409 @@ -1267,74 +1230,57 @@ static int mtk_hdmi_conn_mode_valid(struct drm_connector *conn,
31410         return drm_mode_validate_size(mode, 0x1fff, 0x1fff);
31413 -static struct drm_encoder *mtk_hdmi_conn_best_enc(struct drm_connector *conn)
31415 -       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
31417 -       return hdmi->bridge.encoder;
31420 -static const struct drm_connector_funcs mtk_hdmi_connector_funcs = {
31421 -       .detect = hdmi_conn_detect,
31422 -       .fill_modes = drm_helper_probe_single_connector_modes,
31423 -       .destroy = hdmi_conn_destroy,
31424 -       .reset = drm_atomic_helper_connector_reset,
31425 -       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
31426 -       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
31429 -static const struct drm_connector_helper_funcs
31430 -               mtk_hdmi_connector_helper_funcs = {
31431 -       .get_modes = mtk_hdmi_conn_get_modes,
31432 -       .mode_valid = mtk_hdmi_conn_mode_valid,
31433 -       .best_encoder = mtk_hdmi_conn_best_enc,
31436  static void mtk_hdmi_hpd_event(bool hpd, struct device *dev)
31438         struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
31440 -       if (hdmi && hdmi->bridge.encoder && hdmi->bridge.encoder->dev)
31441 +       if (hdmi && hdmi->bridge.encoder && hdmi->bridge.encoder->dev) {
31442 +               static enum drm_connector_status status;
31444 +               status = mtk_hdmi_detect(hdmi);
31445                 drm_helper_hpd_irq_event(hdmi->bridge.encoder->dev);
31446 +               drm_bridge_hpd_notify(&hdmi->bridge, status);
31447 +       }
31450  /*
31451   * Bridge callbacks
31452   */
31454 +static enum drm_connector_status mtk_hdmi_bridge_detect(struct drm_bridge *bridge)
31456 +       struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
31458 +       return mtk_hdmi_detect(hdmi);
31461 +static struct edid *mtk_hdmi_bridge_get_edid(struct drm_bridge *bridge,
31462 +                                            struct drm_connector *connector)
31464 +       struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
31465 +       struct edid *edid;
31467 +       if (!hdmi->ddc_adpt)
31468 +               return NULL;
31469 +       edid = drm_get_edid(connector, hdmi->ddc_adpt);
31470 +       if (!edid)
31471 +               return NULL;
31472 +       hdmi->dvi_mode = !drm_detect_monitor_audio(edid);
31473 +       return edid;
31476  static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge,
31477                                   enum drm_bridge_attach_flags flags)
31479         struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
31480         int ret;
31482 -       if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
31483 -               DRM_ERROR("Fix bridge driver to make connector optional!");
31484 +       if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
31485 +               DRM_ERROR("%s: The flag DRM_BRIDGE_ATTACH_NO_CONNECTOR must be supplied\n",
31486 +                         __func__);
31487                 return -EINVAL;
31488         }
31490 -       ret = drm_connector_init_with_ddc(bridge->encoder->dev, &hdmi->conn,
31491 -                                         &mtk_hdmi_connector_funcs,
31492 -                                         DRM_MODE_CONNECTOR_HDMIA,
31493 -                                         hdmi->ddc_adpt);
31494 -       if (ret) {
31495 -               dev_err(hdmi->dev, "Failed to initialize connector: %d\n", ret);
31496 -               return ret;
31497 -       }
31498 -       drm_connector_helper_add(&hdmi->conn, &mtk_hdmi_connector_helper_funcs);
31500 -       hdmi->conn.polled = DRM_CONNECTOR_POLL_HPD;
31501 -       hdmi->conn.interlace_allowed = true;
31502 -       hdmi->conn.doublescan_allowed = false;
31504 -       ret = drm_connector_attach_encoder(&hdmi->conn,
31505 -                                               bridge->encoder);
31506 -       if (ret) {
31507 -               dev_err(hdmi->dev,
31508 -                       "Failed to attach connector to encoder: %d\n", ret);
31509 -               return ret;
31510 -       }
31512         if (hdmi->next_bridge) {
31513                 ret = drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
31514                                         bridge, flags);
31515 @@ -1357,7 +1303,8 @@ static bool mtk_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
31516         return true;
31519 -static void mtk_hdmi_bridge_disable(struct drm_bridge *bridge)
31520 +static void mtk_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
31521 +                                          struct drm_bridge_state *old_bridge_state)
31523         struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
31525 @@ -1368,10 +1315,13 @@ static void mtk_hdmi_bridge_disable(struct drm_bridge *bridge)
31526         clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
31527         clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
31529 +       hdmi->curr_conn = NULL;
31531         hdmi->enabled = false;
31534 -static void mtk_hdmi_bridge_post_disable(struct drm_bridge *bridge)
31535 +static void mtk_hdmi_bridge_atomic_post_disable(struct drm_bridge *bridge,
31536 +                                               struct drm_bridge_state *old_state)
31538         struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
31540 @@ -1406,7 +1356,8 @@ static void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge,
31541         drm_mode_copy(&hdmi->mode, adjusted_mode);
31544 -static void mtk_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
31545 +static void mtk_hdmi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
31546 +                                             struct drm_bridge_state *old_state)
31548         struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
31550 @@ -1426,10 +1377,16 @@ static void mtk_hdmi_send_infoframe(struct mtk_hdmi *hdmi,
31551                 mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
31554 -static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
31555 +static void mtk_hdmi_bridge_atomic_enable(struct drm_bridge *bridge,
31556 +                                         struct drm_bridge_state *old_state)
31558 +       struct drm_atomic_state *state = old_state->base.state;
31559         struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
31561 +       /* Retrieve the connector through the atomic state. */
31562 +       hdmi->curr_conn = drm_atomic_get_new_connector_for_encoder(state,
31563 +                                                                  bridge->encoder);
31565         mtk_hdmi_output_set_display_mode(hdmi, &hdmi->mode);
31566         clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
31567         clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
31568 @@ -1440,13 +1397,19 @@ static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
31571  static const struct drm_bridge_funcs mtk_hdmi_bridge_funcs = {
31572 +       .mode_valid = mtk_hdmi_bridge_mode_valid,
31573 +       .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
31574 +       .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
31575 +       .atomic_reset = drm_atomic_helper_bridge_reset,
31576         .attach = mtk_hdmi_bridge_attach,
31577         .mode_fixup = mtk_hdmi_bridge_mode_fixup,
31578 -       .disable = mtk_hdmi_bridge_disable,
31579 -       .post_disable = mtk_hdmi_bridge_post_disable,
31580 +       .atomic_disable = mtk_hdmi_bridge_atomic_disable,
31581 +       .atomic_post_disable = mtk_hdmi_bridge_atomic_post_disable,
31582         .mode_set = mtk_hdmi_bridge_mode_set,
31583 -       .pre_enable = mtk_hdmi_bridge_pre_enable,
31584 -       .enable = mtk_hdmi_bridge_enable,
31585 +       .atomic_pre_enable = mtk_hdmi_bridge_atomic_pre_enable,
31586 +       .atomic_enable = mtk_hdmi_bridge_atomic_enable,
31587 +       .detect = mtk_hdmi_bridge_detect,
31588 +       .get_edid = mtk_hdmi_bridge_get_edid,
31589  };
31591  static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
31592 @@ -1662,8 +1625,10 @@ static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
31594         struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
31596 -       memcpy(buf, hdmi->conn.eld, min(sizeof(hdmi->conn.eld), len));
31598 +       if (hdmi->enabled)
31599 +               memcpy(buf, hdmi->curr_conn->eld, min(sizeof(hdmi->curr_conn->eld), len));
31600 +       else
31601 +               memset(buf, 0, len);
31602         return 0;
31605 @@ -1755,6 +1720,9 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev)
31607         hdmi->bridge.funcs = &mtk_hdmi_bridge_funcs;
31608         hdmi->bridge.of_node = pdev->dev.of_node;
31609 +       hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
31610 +                        | DRM_BRIDGE_OP_HPD;
31611 +       hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
31612         drm_bridge_add(&hdmi->bridge);
31614         ret = mtk_hdmi_clk_enable_audio(hdmi);
31615 diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
31616 index 91cf46f84025..3d55e153fa9c 100644
31617 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
31618 +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
31619 @@ -246,7 +246,7 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
31622  struct a6xx_gmu_oob_bits {
31623 -       int set, ack, set_new, ack_new;
31624 +       int set, ack, set_new, ack_new, clear, clear_new;
31625         const char *name;
31626  };
31628 @@ -260,6 +260,8 @@ static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
31629                 .ack = 24,
31630                 .set_new = 30,
31631                 .ack_new = 31,
31632 +               .clear = 24,
31633 +               .clear_new = 31,
31634         },
31636         [GMU_OOB_PERFCOUNTER_SET] = {
31637 @@ -268,18 +270,22 @@ static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
31638                 .ack = 25,
31639                 .set_new = 28,
31640                 .ack_new = 30,
31641 +               .clear = 25,
31642 +               .clear_new = 29,
31643         },
31645         [GMU_OOB_BOOT_SLUMBER] = {
31646                 .name = "BOOT_SLUMBER",
31647                 .set = 22,
31648                 .ack = 30,
31649 +               .clear = 30,
31650         },
31652         [GMU_OOB_DCVS_SET] = {
31653                 .name = "GPU_DCVS",
31654                 .set = 23,
31655                 .ack = 31,
31656 +               .clear = 31,
31657         },
31658  };
31660 @@ -335,9 +341,9 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
31661                 return;
31663         if (gmu->legacy)
31664 -               bit = a6xx_gmu_oob_bits[state].ack;
31665 +               bit = a6xx_gmu_oob_bits[state].clear;
31666         else
31667 -               bit = a6xx_gmu_oob_bits[state].ack_new;
31668 +               bit = a6xx_gmu_oob_bits[state].clear_new;
31670         gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit);
31672 diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
31673 index d553f62f4eeb..b4d8e1b01ee4 100644
31674 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
31675 +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
31676 @@ -1153,10 +1153,6 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
31678         struct device_node *phandle;
31680 -       a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
31681 -       if (IS_ERR(a6xx_gpu->llc_mmio))
31682 -               return;
31684         /*
31685          * There is a different programming path for targets with an mmu500
31686          * attached, so detect if that is the case
31687 @@ -1166,6 +1162,11 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
31688                 of_device_is_compatible(phandle, "arm,mmu-500"));
31689         of_node_put(phandle);
31691 +       if (a6xx_gpu->have_mmu500)
31692 +               a6xx_gpu->llc_mmio = NULL;
31693 +       else
31694 +               a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
31696         a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
31697         a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
31699 diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
31700 index 189f3533525c..e4444452759c 100644
31701 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
31702 +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
31703 @@ -22,7 +22,7 @@
31704         (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4))
31706  #define VIG_SM8250_MASK \
31707 -       (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3LITE))
31708 +       (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3LITE))
31710  #define DMA_SDM845_MASK \
31711         (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
31712 diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
31713 index ff2c1d583c79..0392d4dfe270 100644
31714 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
31715 +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
31716 @@ -20,7 +20,7 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
31718         struct mdp5_kms *mdp5_kms = get_kms(encoder);
31719         struct device *dev = encoder->dev->dev;
31720 -       u32 total_lines_x100, vclks_line, cfg;
31721 +       u32 total_lines, vclks_line, cfg;
31722         long vsync_clk_speed;
31723         struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
31724         int pp_id = mixer->pp;
31725 @@ -30,8 +30,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
31726                 return -EINVAL;
31727         }
31729 -       total_lines_x100 = mode->vtotal * drm_mode_vrefresh(mode);
31730 -       if (!total_lines_x100) {
31731 +       total_lines = mode->vtotal * drm_mode_vrefresh(mode);
31732 +       if (!total_lines) {
31733                 DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
31734                               __func__, mode->vtotal, drm_mode_vrefresh(mode));
31735                 return -EINVAL;
31736 @@ -43,15 +43,23 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
31737                                                         vsync_clk_speed);
31738                 return -EINVAL;
31739         }
31740 -       vclks_line = vsync_clk_speed * 100 / total_lines_x100;
31741 +       vclks_line = vsync_clk_speed / total_lines;
31743         cfg = MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN
31744                 | MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN;
31745         cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line);
31747 +       /*
31748 +        * Tearcheck emits a blanking signal every vclks_line * vtotal * 2 ticks on
31749 +        * the vsync_clk equating to roughly half the desired panel refresh rate.
31750 +        * This is only necessary as stability fallback if interrupts from the
31751 +        * panel arrive too late or not at all, but is currently used by default
31752 +        * because these panel interrupts are not wired up yet.
31753 +        */
31754         mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg);
31755         mdp5_write(mdp5_kms,
31756 -               REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), 0xfff0);
31757 +               REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), (2 * mode->vtotal));
31759         mdp5_write(mdp5_kms,
31760                 REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay);
31761         mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1);
31762 diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
31763 index 82a8673ab8da..d7e4a39a904e 100644
31764 --- a/drivers/gpu/drm/msm/dp/dp_audio.c
31765 +++ b/drivers/gpu/drm/msm/dp/dp_audio.c
31766 @@ -527,6 +527,7 @@ int dp_audio_hw_params(struct device *dev,
31767         dp_audio_setup_acr(audio);
31768         dp_audio_safe_to_exit_level(audio);
31769         dp_audio_enable(audio, true);
31770 +       dp_display_signal_audio_start(dp_display);
31771         dp_display->audio_enabled = true;
31773  end:
31774 diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
31775 index 5a39da6e1eaf..1784e119269b 100644
31776 --- a/drivers/gpu/drm/msm/dp/dp_display.c
31777 +++ b/drivers/gpu/drm/msm/dp/dp_display.c
31778 @@ -178,6 +178,15 @@ static int dp_del_event(struct dp_display_private *dp_priv, u32 event)
31779         return 0;
31782 +void dp_display_signal_audio_start(struct msm_dp *dp_display)
31784 +       struct dp_display_private *dp;
31786 +       dp = container_of(dp_display, struct dp_display_private, dp_display);
31788 +       reinit_completion(&dp->audio_comp);
31791  void dp_display_signal_audio_complete(struct msm_dp *dp_display)
31793         struct dp_display_private *dp;
31794 @@ -586,10 +595,8 @@ static int dp_connect_pending_timeout(struct dp_display_private *dp, u32 data)
31795         mutex_lock(&dp->event_mutex);
31797         state = dp->hpd_state;
31798 -       if (state == ST_CONNECT_PENDING) {
31799 -               dp_display_enable(dp, 0);
31800 +       if (state == ST_CONNECT_PENDING)
31801                 dp->hpd_state = ST_CONNECTED;
31802 -       }
31804         mutex_unlock(&dp->event_mutex);
31806 @@ -651,7 +658,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
31807         dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
31809         /* signal the disconnect event early to ensure proper teardown */
31810 -       reinit_completion(&dp->audio_comp);
31811         dp_display_handle_plugged_change(g_dp_display, false);
31813         dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
31814 @@ -669,10 +675,8 @@ static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data
31815         mutex_lock(&dp->event_mutex);
31817         state =  dp->hpd_state;
31818 -       if (state == ST_DISCONNECT_PENDING) {
31819 -               dp_display_disable(dp, 0);
31820 +       if (state == ST_DISCONNECT_PENDING)
31821                 dp->hpd_state = ST_DISCONNECTED;
31822 -       }
31824         mutex_unlock(&dp->event_mutex);
31826 @@ -898,7 +902,6 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
31827         /* wait only if audio was enabled */
31828         if (dp_display->audio_enabled) {
31829                 /* signal the disconnect event */
31830 -               reinit_completion(&dp->audio_comp);
31831                 dp_display_handle_plugged_change(dp_display, false);
31832                 if (!wait_for_completion_timeout(&dp->audio_comp,
31833                                 HZ * 5))
31834 @@ -1272,7 +1275,12 @@ static int dp_pm_resume(struct device *dev)
31836         status = dp_catalog_link_is_connected(dp->catalog);
31838 -       if (status)
31839 +       /*
31840 +        * can not declared display is connected unless
31841 +        * HDMI cable is plugged in and sink_count of
31842 +        * dongle become 1
31843 +        */
31844 +       if (status && dp->link->sink_count)
31845                 dp->dp_display.is_connected = true;
31846         else
31847                 dp->dp_display.is_connected = false;
31848 diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
31849 index 6092ba1ed85e..5173c89eedf7 100644
31850 --- a/drivers/gpu/drm/msm/dp/dp_display.h
31851 +++ b/drivers/gpu/drm/msm/dp/dp_display.h
31852 @@ -34,6 +34,7 @@ int dp_display_get_modes(struct msm_dp *dp_display,
31853  int dp_display_request_irq(struct msm_dp *dp_display);
31854  bool dp_display_check_video_test(struct msm_dp *dp_display);
31855  int dp_display_get_test_bpp(struct msm_dp *dp_display);
31856 +void dp_display_signal_audio_start(struct msm_dp *dp_display);
31857  void dp_display_signal_audio_complete(struct msm_dp *dp_display);
31859  #endif /* _DP_DISPLAY_H_ */
31860 diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.c b/drivers/gpu/drm/msm/dp/dp_hpd.c
31861 index 5b8fe32022b5..e1c90fa47411 100644
31862 --- a/drivers/gpu/drm/msm/dp/dp_hpd.c
31863 +++ b/drivers/gpu/drm/msm/dp/dp_hpd.c
31864 @@ -34,8 +34,8 @@ int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd)
31866         dp_usbpd->hpd_high = hpd;
31868 -       if (!hpd_priv->dp_cb && !hpd_priv->dp_cb->configure
31869 -                               && !hpd_priv->dp_cb->disconnect) {
31870 +       if (!hpd_priv->dp_cb || !hpd_priv->dp_cb->configure
31871 +                               || !hpd_priv->dp_cb->disconnect) {
31872                 pr_err("hpd dp_cb not initialized\n");
31873                 return -EINVAL;
31874         }
31875 diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
31876 index 85ad0babc326..d611cc8e54a4 100644
31877 --- a/drivers/gpu/drm/msm/msm_debugfs.c
31878 +++ b/drivers/gpu/drm/msm/msm_debugfs.c
31879 @@ -111,23 +111,15 @@ static const struct file_operations msm_gpu_fops = {
31880  static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
31882         struct msm_drm_private *priv = dev->dev_private;
31883 -       struct msm_gpu *gpu = priv->gpu;
31884         int ret;
31886 -       ret = mutex_lock_interruptible(&priv->mm_lock);
31887 +       ret = mutex_lock_interruptible(&priv->obj_lock);
31888         if (ret)
31889                 return ret;
31891 -       if (gpu) {
31892 -               seq_printf(m, "Active Objects (%s):\n", gpu->name);
31893 -               msm_gem_describe_objects(&gpu->active_list, m);
31894 -       }
31896 -       seq_printf(m, "Inactive Objects:\n");
31897 -       msm_gem_describe_objects(&priv->inactive_dontneed, m);
31898 -       msm_gem_describe_objects(&priv->inactive_willneed, m);
31899 +       msm_gem_describe_objects(&priv->objects, m);
31901 -       mutex_unlock(&priv->mm_lock);
31902 +       mutex_unlock(&priv->obj_lock);
31904         return 0;
31906 diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
31907 index 196907689c82..18ea1c66de71 100644
31908 --- a/drivers/gpu/drm/msm/msm_drv.c
31909 +++ b/drivers/gpu/drm/msm/msm_drv.c
31910 @@ -446,6 +446,9 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
31912         priv->wq = alloc_ordered_workqueue("msm", 0);
31914 +       INIT_LIST_HEAD(&priv->objects);
31915 +       mutex_init(&priv->obj_lock);
31917         INIT_LIST_HEAD(&priv->inactive_willneed);
31918         INIT_LIST_HEAD(&priv->inactive_dontneed);
31919         mutex_init(&priv->mm_lock);
31920 diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
31921 index 591c47a654e8..6b58e49754cb 100644
31922 --- a/drivers/gpu/drm/msm/msm_drv.h
31923 +++ b/drivers/gpu/drm/msm/msm_drv.h
31924 @@ -174,7 +174,14 @@ struct msm_drm_private {
31925         struct msm_rd_state *hangrd;   /* debugfs to dump hanging submits */
31926         struct msm_perf_state *perf;
31928 -       /*
31929 +       /**
31930 +        * List of all GEM objects (mainly for debugfs, protected by obj_lock
31931 +        * (acquire before per GEM object lock)
31932 +        */
31933 +       struct list_head objects;
31934 +       struct mutex obj_lock;
31936 +       /**
31937          * Lists of inactive GEM objects.  Every bo is either in one of the
31938          * inactive lists (depending on whether or not it is shrinkable) or
31939          * gpu->active_list (for the gpu it is active on[1])
31940 diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
31941 index f091c1e164fa..aeba3eb8ce46 100644
31942 --- a/drivers/gpu/drm/msm/msm_gem.c
31943 +++ b/drivers/gpu/drm/msm/msm_gem.c
31944 @@ -951,7 +951,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
31945         size_t size = 0;
31947         seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
31948 -       list_for_each_entry(msm_obj, list, mm_list) {
31949 +       list_for_each_entry(msm_obj, list, node) {
31950                 struct drm_gem_object *obj = &msm_obj->base;
31951                 seq_puts(m, "   ");
31952                 msm_gem_describe(obj, m);
31953 @@ -970,6 +970,10 @@ void msm_gem_free_object(struct drm_gem_object *obj)
31954         struct drm_device *dev = obj->dev;
31955         struct msm_drm_private *priv = dev->dev_private;
31957 +       mutex_lock(&priv->obj_lock);
31958 +       list_del(&msm_obj->node);
31959 +       mutex_unlock(&priv->obj_lock);
31961         mutex_lock(&priv->mm_lock);
31962         list_del(&msm_obj->mm_list);
31963         mutex_unlock(&priv->mm_lock);
31964 @@ -1157,6 +1161,10 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
31965         list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
31966         mutex_unlock(&priv->mm_lock);
31968 +       mutex_lock(&priv->obj_lock);
31969 +       list_add_tail(&msm_obj->node, &priv->objects);
31970 +       mutex_unlock(&priv->obj_lock);
31972         return obj;
31974  fail:
31975 @@ -1227,6 +1235,10 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
31976         list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
31977         mutex_unlock(&priv->mm_lock);
31979 +       mutex_lock(&priv->obj_lock);
31980 +       list_add_tail(&msm_obj->node, &priv->objects);
31981 +       mutex_unlock(&priv->obj_lock);
31983         return obj;
31985  fail:
31986 diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
31987 index b3a0a880cbab..99d4c0e9465e 100644
31988 --- a/drivers/gpu/drm/msm/msm_gem.h
31989 +++ b/drivers/gpu/drm/msm/msm_gem.h
31990 @@ -55,8 +55,16 @@ struct msm_gem_object {
31991          */
31992         uint8_t vmap_count;
31994 -       /* And object is either:
31995 -        *  inactive - on priv->inactive_list
31996 +       /**
31997 +        * Node in list of all objects (mainly for debugfs, protected by
31998 +        * priv->obj_lock
31999 +        */
32000 +       struct list_head node;
32002 +       /**
32003 +        * An object is either:
32004 +        *  inactive - on priv->inactive_dontneed or priv->inactive_willneed
32005 +        *     (depending on purgability status)
32006          *  active   - on one one of the gpu's active_list..  well, at
32007          *     least for now we don't have (I don't think) hw sync between
32008          *     2d and 3d one devices which have both, meaning we need to
32009 diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
32010 index b31d750c425a..5f1722b040f4 100644
32011 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c
32012 +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
32013 @@ -4327,7 +4327,8 @@ static int omap_dsi_register_te_irq(struct dsi_data *dsi,
32014         irq_set_status_flags(te_irq, IRQ_NOAUTOEN);
32016         err = request_threaded_irq(te_irq, NULL, omap_dsi_te_irq_handler,
32017 -                                  IRQF_TRIGGER_RISING, "TE", dsi);
32018 +                                  IRQF_TRIGGER_RISING | IRQF_ONESHOT,
32019 +                                  "TE", dsi);
32020         if (err) {
32021                 dev_err(dsi->dev, "request irq failed with %d\n", err);
32022                 gpiod_put(dsi->te_gpio);
32023 diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
32024 index b9a0e56f33e2..ef70140c5b09 100644
32025 --- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
32026 +++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
32027 @@ -898,8 +898,7 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
32028          */
32029         dsi->hs_rate = 349440000;
32030         dsi->lp_rate = 9600000;
32031 -       dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
32032 -               MIPI_DSI_MODE_EOT_PACKET;
32033 +       dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
32035         /*
32036          * Every new incarnation of this display must have a unique
32037 diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
32038 index 4aac0d1573dd..70560cac53a9 100644
32039 --- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
32040 +++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
32041 @@ -184,9 +184,7 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi)
32042          * As we only send commands we do not need to be continuously
32043          * clocked.
32044          */
32045 -       dsi->mode_flags =
32046 -               MIPI_DSI_CLOCK_NON_CONTINUOUS |
32047 -               MIPI_DSI_MODE_EOT_PACKET;
32048 +       dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
32050         s6->supply = devm_regulator_get(dev, "vdd1");
32051         if (IS_ERR(s6->supply))
32052 diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
32053 index eec74c10ddda..9c3563c61e8c 100644
32054 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
32055 +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
32056 @@ -97,7 +97,6 @@ static int s6e63m0_dsi_probe(struct mipi_dsi_device *dsi)
32057         dsi->hs_rate = 349440000;
32058         dsi->lp_rate = 9600000;
32059         dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
32060 -               MIPI_DSI_MODE_EOT_PACKET |
32061                 MIPI_DSI_MODE_VIDEO_BURST;
32063         ret = s6e63m0_probe(dev, s6e63m0_dsi_dcs_read, s6e63m0_dsi_dcs_write,
32064 diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
32065 index 4e2dad314c79..e8b1a0e873ea 100644
32066 --- a/drivers/gpu/drm/panel/panel-simple.c
32067 +++ b/drivers/gpu/drm/panel/panel-simple.c
32068 @@ -406,7 +406,7 @@ static int panel_simple_prepare(struct drm_panel *panel)
32069                 if (IS_ERR(p->hpd_gpio)) {
32070                         err = panel_simple_get_hpd_gpio(panel->dev, p, false);
32071                         if (err)
32072 -                               return err;
32073 +                               goto error;
32074                 }
32076                 err = readx_poll_timeout(gpiod_get_value_cansleep, p->hpd_gpio,
32077 @@ -418,13 +418,20 @@ static int panel_simple_prepare(struct drm_panel *panel)
32078                 if (err) {
32079                         dev_err(panel->dev,
32080                                 "error waiting for hpd GPIO: %d\n", err);
32081 -                       return err;
32082 +                       goto error;
32083                 }
32084         }
32086         p->prepared_time = ktime_get();
32088         return 0;
32090 +error:
32091 +       gpiod_set_value_cansleep(p->enable_gpio, 0);
32092 +       regulator_disable(p->supply);
32093 +       p->unprepared_time = ktime_get();
32095 +       return err;
32098  static int panel_simple_enable(struct drm_panel *panel)
32099 diff --git a/drivers/gpu/drm/panel/panel-sony-acx424akp.c b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
32100 index 065efae213f5..95659a4d15e9 100644
32101 --- a/drivers/gpu/drm/panel/panel-sony-acx424akp.c
32102 +++ b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
32103 @@ -449,8 +449,7 @@ static int acx424akp_probe(struct mipi_dsi_device *dsi)
32104                         MIPI_DSI_MODE_VIDEO_BURST;
32105         else
32106                 dsi->mode_flags =
32107 -                       MIPI_DSI_CLOCK_NON_CONTINUOUS |
32108 -                       MIPI_DSI_MODE_EOT_PACKET;
32109 +                       MIPI_DSI_CLOCK_NON_CONTINUOUS;
32111         acx->supply = devm_regulator_get(dev, "vddi");
32112         if (IS_ERR(acx->supply))
32113 diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
32114 index 7c1b3481b785..21e552d1ac71 100644
32115 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
32116 +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
32117 @@ -488,8 +488,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
32118                 }
32119                 bo->base.pages = pages;
32120                 bo->base.pages_use_count = 1;
32121 -       } else
32122 +       } else {
32123                 pages = bo->base.pages;
32124 +               if (pages[page_offset]) {
32125 +                       /* Pages are already mapped, bail out. */
32126 +                       mutex_unlock(&bo->base.pages_lock);
32127 +                       goto out;
32128 +               }
32129 +       }
32131         mapping = bo->base.base.filp->f_mapping;
32132         mapping_set_unevictable(mapping);
32133 @@ -522,6 +528,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
32135         dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
32137 +out:
32138         panfrost_gem_mapping_put(bomapping);
32140         return 0;
32141 @@ -593,6 +600,8 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
32142                 access_type = (fault_status >> 8) & 0x3;
32143                 source_id = (fault_status >> 16);
32145 +               mmu_write(pfdev, MMU_INT_CLEAR, mask);
32147                 /* Page fault only */
32148                 ret = -1;
32149                 if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0)
32150 @@ -616,8 +625,6 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
32151                                 access_type, access_type_name(pfdev, fault_status),
32152                                 source_id);
32154 -               mmu_write(pfdev, MMU_INT_CLEAR, mask);
32156                 status &= ~mask;
32157         }
32159 diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
32160 index 54e3c3a97440..741cc983daf1 100644
32161 --- a/drivers/gpu/drm/qxl/qxl_cmd.c
32162 +++ b/drivers/gpu/drm/qxl/qxl_cmd.c
32163 @@ -268,7 +268,7 @@ int qxl_alloc_bo_reserved(struct qxl_device *qdev,
32164         int ret;
32166         ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
32167 -                           false, QXL_GEM_DOMAIN_VRAM, NULL, &bo);
32168 +                           false, QXL_GEM_DOMAIN_VRAM, 0, NULL, &bo);
32169         if (ret) {
32170                 DRM_ERROR("failed to allocate VRAM BO\n");
32171                 return ret;
32172 diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
32173 index 10738e04c09b..3f432ec8e771 100644
32174 --- a/drivers/gpu/drm/qxl/qxl_display.c
32175 +++ b/drivers/gpu/drm/qxl/qxl_display.c
32176 @@ -798,8 +798,8 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
32177                                 qdev->dumb_shadow_bo = NULL;
32178                         }
32179                         qxl_bo_create(qdev, surf.height * surf.stride,
32180 -                                     true, true, QXL_GEM_DOMAIN_SURFACE, &surf,
32181 -                                     &qdev->dumb_shadow_bo);
32182 +                                     true, true, QXL_GEM_DOMAIN_SURFACE, 0,
32183 +                                     &surf, &qdev->dumb_shadow_bo);
32184                 }
32185                 if (user_bo->shadow != qdev->dumb_shadow_bo) {
32186                         if (user_bo->shadow) {
32187 @@ -1228,6 +1228,10 @@ int qxl_modeset_init(struct qxl_device *qdev)
32189  void qxl_modeset_fini(struct qxl_device *qdev)
32191 +       if (qdev->dumb_shadow_bo) {
32192 +               drm_gem_object_put(&qdev->dumb_shadow_bo->tbo.base);
32193 +               qdev->dumb_shadow_bo = NULL;
32194 +       }
32195         qxl_destroy_monitors_object(qdev);
32196         drm_mode_config_cleanup(&qdev->ddev);
32198 diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
32199 index 48e096285b4c..a08da0bd9098 100644
32200 --- a/drivers/gpu/drm/qxl/qxl_gem.c
32201 +++ b/drivers/gpu/drm/qxl/qxl_gem.c
32202 @@ -55,7 +55,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
32203         /* At least align on page size */
32204         if (alignment < PAGE_SIZE)
32205                 alignment = PAGE_SIZE;
32206 -       r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo);
32207 +       r = qxl_bo_create(qdev, size, kernel, false, initial_domain, 0, surf, &qbo);
32208         if (r) {
32209                 if (r != -ERESTARTSYS)
32210                         DRM_ERROR(
32211 diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
32212 index ceebc5881f68..a5806667697a 100644
32213 --- a/drivers/gpu/drm/qxl/qxl_object.c
32214 +++ b/drivers/gpu/drm/qxl/qxl_object.c
32215 @@ -103,8 +103,8 @@ static const struct drm_gem_object_funcs qxl_object_funcs = {
32216         .print_info = drm_gem_ttm_print_info,
32217  };
32219 -int qxl_bo_create(struct qxl_device *qdev,
32220 -                 unsigned long size, bool kernel, bool pinned, u32 domain,
32221 +int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
32222 +                 bool kernel, bool pinned, u32 domain, u32 priority,
32223                   struct qxl_surface *surf,
32224                   struct qxl_bo **bo_ptr)
32226 @@ -137,6 +137,7 @@ int qxl_bo_create(struct qxl_device *qdev,
32228         qxl_ttm_placement_from_domain(bo, domain);
32230 +       bo->tbo.priority = priority;
32231         r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, size, type,
32232                                  &bo->placement, 0, &ctx, size,
32233                                  NULL, NULL, &qxl_ttm_bo_destroy);
32234 diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
32235 index e60a8f88e226..dc1659e717f1 100644
32236 --- a/drivers/gpu/drm/qxl/qxl_object.h
32237 +++ b/drivers/gpu/drm/qxl/qxl_object.h
32238 @@ -61,6 +61,7 @@ static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
32239  extern int qxl_bo_create(struct qxl_device *qdev,
32240                          unsigned long size,
32241                          bool kernel, bool pinned, u32 domain,
32242 +                        u32 priority,
32243                          struct qxl_surface *surf,
32244                          struct qxl_bo **bo_ptr);
32245  extern int qxl_bo_kmap(struct qxl_bo *bo, struct dma_buf_map *map);
32246 diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
32247 index b372455e2729..801ce77b1dac 100644
32248 --- a/drivers/gpu/drm/qxl/qxl_release.c
32249 +++ b/drivers/gpu/drm/qxl/qxl_release.c
32250 @@ -199,11 +199,12 @@ qxl_release_free(struct qxl_device *qdev,
32253  static int qxl_release_bo_alloc(struct qxl_device *qdev,
32254 -                               struct qxl_bo **bo)
32255 +                               struct qxl_bo **bo,
32256 +                               u32 priority)
32258         /* pin releases bo's they are too messy to evict */
32259         return qxl_bo_create(qdev, PAGE_SIZE, false, true,
32260 -                            QXL_GEM_DOMAIN_VRAM, NULL, bo);
32261 +                            QXL_GEM_DOMAIN_VRAM, priority, NULL, bo);
32264  int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
32265 @@ -326,13 +327,18 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
32266         int ret = 0;
32267         union qxl_release_info *info;
32268         int cur_idx;
32269 +       u32 priority;
32271 -       if (type == QXL_RELEASE_DRAWABLE)
32272 +       if (type == QXL_RELEASE_DRAWABLE) {
32273                 cur_idx = 0;
32274 -       else if (type == QXL_RELEASE_SURFACE_CMD)
32275 +               priority = 0;
32276 +       } else if (type == QXL_RELEASE_SURFACE_CMD) {
32277                 cur_idx = 1;
32278 -       else if (type == QXL_RELEASE_CURSOR_CMD)
32279 +               priority = 1;
32280 +       } else if (type == QXL_RELEASE_CURSOR_CMD) {
32281                 cur_idx = 2;
32282 +               priority = 1;
32283 +       }
32284         else {
32285                 DRM_ERROR("got illegal type: %d\n", type);
32286                 return -EINVAL;
32287 @@ -352,7 +358,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
32288                 qdev->current_release_bo[cur_idx] = NULL;
32289         }
32290         if (!qdev->current_release_bo[cur_idx]) {
32291 -               ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
32292 +               ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority);
32293                 if (ret) {
32294                         mutex_unlock(&qdev->release_mutex);
32295                         if (free_bo) {
32296 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
32297 index 3effc8c71494..ea44423376c4 100644
32298 --- a/drivers/gpu/drm/radeon/radeon.h
32299 +++ b/drivers/gpu/drm/radeon/radeon.h
32300 @@ -1558,6 +1558,7 @@ struct radeon_dpm {
32301         void                    *priv;
32302         u32                     new_active_crtcs;
32303         int                     new_active_crtc_count;
32304 +       int                     high_pixelclock_count;
32305         u32                     current_active_crtcs;
32306         int                     current_active_crtc_count;
32307         bool single_display;
32308 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
32309 index 42301b4e56f5..28c4413f4dc8 100644
32310 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
32311 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
32312 @@ -2120,11 +2120,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
32313                 return state_index;
32314         /* last mode is usually default, array is low to high */
32315         for (i = 0; i < num_modes; i++) {
32316 -               rdev->pm.power_state[state_index].clock_info =
32317 -                       kcalloc(1, sizeof(struct radeon_pm_clock_info),
32318 -                               GFP_KERNEL);
32319 +               /* avoid memory leaks from invalid modes or unknown frev. */
32320 +               if (!rdev->pm.power_state[state_index].clock_info) {
32321 +                       rdev->pm.power_state[state_index].clock_info =
32322 +                               kzalloc(sizeof(struct radeon_pm_clock_info),
32323 +                                       GFP_KERNEL);
32324 +               }
32325                 if (!rdev->pm.power_state[state_index].clock_info)
32326 -                       return state_index;
32327 +                       goto out;
32328                 rdev->pm.power_state[state_index].num_clock_modes = 1;
32329                 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
32330                 switch (frev) {
32331 @@ -2243,17 +2246,24 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
32332                         break;
32333                 }
32334         }
32335 +out:
32336 +       /* free any unused clock_info allocation. */
32337 +       if (state_index && state_index < num_modes) {
32338 +               kfree(rdev->pm.power_state[state_index].clock_info);
32339 +               rdev->pm.power_state[state_index].clock_info = NULL;
32340 +       }
32342         /* last mode is usually default */
32343 -       if (rdev->pm.default_power_state_index == -1) {
32344 +       if (state_index && rdev->pm.default_power_state_index == -1) {
32345                 rdev->pm.power_state[state_index - 1].type =
32346                         POWER_STATE_TYPE_DEFAULT;
32347                 rdev->pm.default_power_state_index = state_index - 1;
32348                 rdev->pm.power_state[state_index - 1].default_clock_mode =
32349                         &rdev->pm.power_state[state_index - 1].clock_info[0];
32350 -               rdev->pm.power_state[state_index].flags &=
32351 +               rdev->pm.power_state[state_index - 1].flags &=
32352                         ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
32353 -               rdev->pm.power_state[state_index].misc = 0;
32354 -               rdev->pm.power_state[state_index].misc2 = 0;
32355 +               rdev->pm.power_state[state_index - 1].misc = 0;
32356 +               rdev->pm.power_state[state_index - 1].misc2 = 0;
32357         }
32358         return state_index;
32360 diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
32361 index 2c32186c4acd..4e4c937c36c6 100644
32362 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
32363 +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
32364 @@ -242,6 +242,9 @@ radeon_dp_mst_detect(struct drm_connector *connector,
32365                 to_radeon_connector(connector);
32366         struct radeon_connector *master = radeon_connector->mst_port;
32368 +       if (drm_connector_is_unregistered(connector))
32369 +               return connector_status_disconnected;
32371         return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
32372                                       radeon_connector->port);
32374 diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
32375 index 3808a753127b..04109a2a6fd7 100644
32376 --- a/drivers/gpu/drm/radeon/radeon_gart.c
32377 +++ b/drivers/gpu/drm/radeon/radeon_gart.c
32378 @@ -301,7 +301,8 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
32379         p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
32381         for (i = 0; i < pages; i++, p++) {
32382 -               rdev->gart.pages[p] = pagelist[i];
32383 +               rdev->gart.pages[p] = pagelist ? pagelist[i] :
32384 +                       rdev->dummy_page.page;
32385                 page_base = dma_addr[i];
32386                 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
32387                         page_entry = radeon_gart_get_page_entry(page_base, flags);
32388 diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
32389 index 2479d6ab7a36..58876bb4ef2a 100644
32390 --- a/drivers/gpu/drm/radeon/radeon_kms.c
32391 +++ b/drivers/gpu/drm/radeon/radeon_kms.c
32392 @@ -518,6 +518,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
32393                         *value = rdev->config.si.backend_enable_mask;
32394                 } else {
32395                         DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
32396 +                       return -EINVAL;
32397                 }
32398                 break;
32399         case RADEON_INFO_MAX_SCLK:
32400 diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
32401 index 9b81786782de..499ce55e34cc 100644
32402 --- a/drivers/gpu/drm/radeon/radeon_object.c
32403 +++ b/drivers/gpu/drm/radeon/radeon_object.c
32404 @@ -384,6 +384,8 @@ int radeon_bo_evict_vram(struct radeon_device *rdev)
32405         }
32406  #endif
32407         man = ttm_manager_type(bdev, TTM_PL_VRAM);
32408 +       if (!man)
32409 +               return 0;
32410         return ttm_resource_manager_evict_all(bdev, man);
32413 diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
32414 index 1995dad59dd0..2db4a8b1542d 100644
32415 --- a/drivers/gpu/drm/radeon/radeon_pm.c
32416 +++ b/drivers/gpu/drm/radeon/radeon_pm.c
32417 @@ -1775,6 +1775,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
32418         struct drm_device *ddev = rdev->ddev;
32419         struct drm_crtc *crtc;
32420         struct radeon_crtc *radeon_crtc;
32421 +       struct radeon_connector *radeon_connector;
32423         if (!rdev->pm.dpm_enabled)
32424                 return;
32425 @@ -1784,6 +1785,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
32426         /* update active crtc counts */
32427         rdev->pm.dpm.new_active_crtcs = 0;
32428         rdev->pm.dpm.new_active_crtc_count = 0;
32429 +       rdev->pm.dpm.high_pixelclock_count = 0;
32430         if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
32431                 list_for_each_entry(crtc,
32432                                     &ddev->mode_config.crtc_list, head) {
32433 @@ -1791,6 +1793,12 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
32434                         if (crtc->enabled) {
32435                                 rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
32436                                 rdev->pm.dpm.new_active_crtc_count++;
32437 +                               if (!radeon_crtc->connector)
32438 +                                       continue;
32440 +                               radeon_connector = to_radeon_connector(radeon_crtc->connector);
32441 +                               if (radeon_connector->pixelclock_for_modeset > 297000)
32442 +                                       rdev->pm.dpm.high_pixelclock_count++;
32443                         }
32444                 }
32445         }
32446 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
32447 index 78893bea85ae..c0258d213a72 100644
32448 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
32449 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
32450 @@ -485,13 +485,14 @@ static void radeon_ttm_backend_unbind(struct ttm_bo_device *bdev, struct ttm_tt
32451         struct radeon_ttm_tt *gtt = (void *)ttm;
32452         struct radeon_device *rdev = radeon_get_rdev(bdev);
32454 +       if (gtt->userptr)
32455 +               radeon_ttm_tt_unpin_userptr(bdev, ttm);
32457         if (!gtt->bound)
32458                 return;
32460         radeon_gart_unbind(rdev, gtt->offset, ttm->num_pages);
32462 -       if (gtt->userptr)
32463 -               radeon_ttm_tt_unpin_userptr(bdev, ttm);
32464         gtt->bound = false;
32467 diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
32468 index 91bfc4762767..43b63705d073 100644
32469 --- a/drivers/gpu/drm/radeon/si_dpm.c
32470 +++ b/drivers/gpu/drm/radeon/si_dpm.c
32471 @@ -2979,6 +2979,9 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
32472                     (rdev->pdev->device == 0x6605)) {
32473                         max_sclk = 75000;
32474                 }
32476 +               if (rdev->pm.dpm.high_pixelclock_count > 1)
32477 +                       disable_sclk_switching = true;
32478         }
32480         if (rps->vce_active) {
32481 diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
32482 index 7812094f93d6..6f3b523e16e8 100644
32483 --- a/drivers/gpu/drm/stm/ltdc.c
32484 +++ b/drivers/gpu/drm/stm/ltdc.c
32485 @@ -525,13 +525,42 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
32487         struct ltdc_device *ldev = crtc_to_ltdc(crtc);
32488         struct drm_device *ddev = crtc->dev;
32489 +       struct drm_connector_list_iter iter;
32490 +       struct drm_connector *connector = NULL;
32491 +       struct drm_encoder *encoder = NULL;
32492 +       struct drm_bridge *bridge = NULL;
32493         struct drm_display_mode *mode = &crtc->state->adjusted_mode;
32494         struct videomode vm;
32495         u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h;
32496         u32 total_width, total_height;
32497 +       u32 bus_flags = 0;
32498         u32 val;
32499         int ret;
32501 +       /* get encoder from crtc */
32502 +       drm_for_each_encoder(encoder, ddev)
32503 +               if (encoder->crtc == crtc)
32504 +                       break;
32506 +       if (encoder) {
32507 +               /* get bridge from encoder */
32508 +               list_for_each_entry(bridge, &encoder->bridge_chain, chain_node)
32509 +                       if (bridge->encoder == encoder)
32510 +                               break;
32512 +               /* Get the connector from encoder */
32513 +               drm_connector_list_iter_begin(ddev, &iter);
32514 +               drm_for_each_connector_iter(connector, &iter)
32515 +                       if (connector->encoder == encoder)
32516 +                               break;
32517 +               drm_connector_list_iter_end(&iter);
32518 +       }
32520 +       if (bridge && bridge->timings)
32521 +               bus_flags = bridge->timings->input_bus_flags;
32522 +       else if (connector)
32523 +               bus_flags = connector->display_info.bus_flags;
32525         if (!pm_runtime_active(ddev->dev)) {
32526                 ret = pm_runtime_get_sync(ddev->dev);
32527                 if (ret) {
32528 @@ -567,10 +596,10 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
32529         if (vm.flags & DISPLAY_FLAGS_VSYNC_HIGH)
32530                 val |= GCR_VSPOL;
32532 -       if (vm.flags & DISPLAY_FLAGS_DE_LOW)
32533 +       if (bus_flags & DRM_BUS_FLAG_DE_LOW)
32534                 val |= GCR_DEPOL;
32536 -       if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
32537 +       if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
32538                 val |= GCR_PCPOL;
32540         reg_update_bits(ldev->regs, LTDC_GCR,
32541 diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
32542 index 30213708fc99..d99afd19ca08 100644
32543 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
32544 +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
32545 @@ -515,6 +515,15 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
32547         drm_crtc_vblank_off(crtc);
32549 +       spin_lock_irq(&crtc->dev->event_lock);
32551 +       if (crtc->state->event) {
32552 +               drm_crtc_send_vblank_event(crtc, crtc->state->event);
32553 +               crtc->state->event = NULL;
32554 +       }
32556 +       spin_unlock_irq(&crtc->dev->event_lock);
32558         tilcdc_crtc_disable_irqs(dev);
32560         pm_runtime_put_sync(dev->dev);
32561 diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
32562 index 101a68dc615b..799ec7a7caa4 100644
32563 --- a/drivers/gpu/drm/ttm/ttm_bo.c
32564 +++ b/drivers/gpu/drm/ttm/ttm_bo.c
32565 @@ -153,6 +153,8 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
32567                 swap = &ttm_bo_glob.swap_lru[bo->priority];
32568                 list_move_tail(&bo->swap, swap);
32569 +       } else {
32570 +               list_del_init(&bo->swap);
32571         }
32573         if (bdev->driver->del_from_lru_notify)
32574 diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
32575 index 23eb6d772e40..669f2ee39515 100644
32576 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
32577 +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
32578 @@ -174,7 +174,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
32579                 if (!sync_file) {
32580                         dma_fence_put(&out_fence->f);
32581                         ret = -ENOMEM;
32582 -                       goto out_memdup;
32583 +                       goto out_unresv;
32584                 }
32586                 exbuf->fence_fd = out_fence_fd;
32587 diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
32588 index d69a5b6da553..4ff1ec28e630 100644
32589 --- a/drivers/gpu/drm/virtio/virtgpu_object.c
32590 +++ b/drivers/gpu/drm/virtio/virtgpu_object.c
32591 @@ -248,6 +248,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
32593         ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
32594         if (ret != 0) {
32595 +               virtio_gpu_array_put_free(objs);
32596                 virtio_gpu_free_object(&shmem_obj->base);
32597                 return ret;
32598         }
32599 diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
32600 index 0443b7deeaef..758d8a98d96b 100644
32601 --- a/drivers/gpu/drm/vkms/vkms_crtc.c
32602 +++ b/drivers/gpu/drm/vkms/vkms_crtc.c
32603 @@ -18,7 +18,8 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
32605         ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
32606                                           output->period_ns);
32607 -       WARN_ON(ret_overrun != 1);
32608 +       if (ret_overrun != 1)
32609 +               pr_warn("%s: vblank timer overrun\n", __func__);
32611         spin_lock(&output->lock);
32612         ret = drm_crtc_handle_vblank(crtc);
32613 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
32614 index 6c2a569f1fcb..8d7feeb0d7ab 100644
32615 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
32616 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
32617 @@ -201,7 +201,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
32618                         break;
32619                 }
32620                 if (lazy)
32621 -                       schedule_timeout(1);
32622 +                       schedule_min_hrtimeout();
32623                 else if ((++count & 0x0F) == 0) {
32624                         /**
32625                          * FIXME: Use schedule_hr_timeout here for
32626 diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
32627 index 99158ee67d02..59d1fb017da0 100644
32628 --- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
32629 +++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
32630 @@ -866,7 +866,7 @@ static int zynqmp_dp_train(struct zynqmp_dp *dp)
32631                 return ret;
32633         zynqmp_dp_write(dp, ZYNQMP_DP_SCRAMBLING_DISABLE, 1);
32634 -       memset(dp->train_set, 0, 4);
32635 +       memset(dp->train_set, 0, sizeof(dp->train_set));
32636         ret = zynqmp_dp_link_train_cr(dp);
32637         if (ret)
32638                 return ret;
32639 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
32640 index 67fd8a2f5aba..ba338973e968 100644
32641 --- a/drivers/hid/hid-ids.h
32642 +++ b/drivers/hid/hid-ids.h
32643 @@ -946,6 +946,7 @@
32644  #define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S   0x8003
32646  #define USB_VENDOR_ID_PLANTRONICS      0x047f
32647 +#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES        0xc056
32649  #define USB_VENDOR_ID_PANASONIC                0x04da
32650  #define USB_DEVICE_ID_PANABOARD_UBT780 0x1044
32651 diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
32652 index c6c8e20f3e8d..0ff03fed9770 100644
32653 --- a/drivers/hid/hid-lenovo.c
32654 +++ b/drivers/hid/hid-lenovo.c
32655 @@ -33,6 +33,9 @@
32657  #include "hid-ids.h"
32659 +/* Userspace expects F20 for mic-mute KEY_MICMUTE does not work */
32660 +#define LENOVO_KEY_MICMUTE KEY_F20
32662  struct lenovo_drvdata {
32663         u8 led_report[3]; /* Must be first for proper alignment */
32664         int led_state;
32665 @@ -62,8 +65,8 @@ struct lenovo_drvdata {
32666  #define TP10UBKBD_LED_OFF              1
32667  #define TP10UBKBD_LED_ON               2
32669 -static void lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
32670 -                                    enum led_brightness value)
32671 +static int lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
32672 +                                   enum led_brightness value)
32674         struct lenovo_drvdata *data = hid_get_drvdata(hdev);
32675         int ret;
32676 @@ -75,10 +78,18 @@ static void lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
32677         data->led_report[2] = value ? TP10UBKBD_LED_ON : TP10UBKBD_LED_OFF;
32678         ret = hid_hw_raw_request(hdev, data->led_report[0], data->led_report, 3,
32679                                  HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
32680 -       if (ret)
32681 -               hid_err(hdev, "Set LED output report error: %d\n", ret);
32682 +       if (ret != 3) {
32683 +               if (ret != -ENODEV)
32684 +                       hid_err(hdev, "Set LED output report error: %d\n", ret);
32686 +               ret = ret < 0 ? ret : -EIO;
32687 +       } else {
32688 +               ret = 0;
32689 +       }
32691         mutex_unlock(&data->led_report_mutex);
32693 +       return ret;
32696  static void lenovo_tp10ubkbd_sync_fn_lock(struct work_struct *work)
32697 @@ -126,7 +137,7 @@ static int lenovo_input_mapping_tpkbd(struct hid_device *hdev,
32698         if (usage->hid == (HID_UP_BUTTON | 0x0010)) {
32699                 /* This sub-device contains trackpoint, mark it */
32700                 hid_set_drvdata(hdev, (void *)1);
32701 -               map_key_clear(KEY_MICMUTE);
32702 +               map_key_clear(LENOVO_KEY_MICMUTE);
32703                 return 1;
32704         }
32705         return 0;
32706 @@ -141,7 +152,7 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev,
32707             (usage->hid & HID_USAGE_PAGE) == HID_UP_LNVENDOR) {
32708                 switch (usage->hid & HID_USAGE) {
32709                 case 0x00f1: /* Fn-F4: Mic mute */
32710 -                       map_key_clear(KEY_MICMUTE);
32711 +                       map_key_clear(LENOVO_KEY_MICMUTE);
32712                         return 1;
32713                 case 0x00f2: /* Fn-F5: Brightness down */
32714                         map_key_clear(KEY_BRIGHTNESSDOWN);
32715 @@ -231,7 +242,7 @@ static int lenovo_input_mapping_tp10_ultrabook_kbd(struct hid_device *hdev,
32716                         map_key_clear(KEY_FN_ESC);
32717                         return 1;
32718                 case 9: /* Fn-F4: Mic mute */
32719 -                       map_key_clear(KEY_MICMUTE);
32720 +                       map_key_clear(LENOVO_KEY_MICMUTE);
32721                         return 1;
32722                 case 10: /* Fn-F7: Control panel */
32723                         map_key_clear(KEY_CONFIG);
32724 @@ -349,7 +360,7 @@ static ssize_t attr_fn_lock_store(struct device *dev,
32726         struct hid_device *hdev = to_hid_device(dev);
32727         struct lenovo_drvdata *data = hid_get_drvdata(hdev);
32728 -       int value;
32729 +       int value, ret;
32731         if (kstrtoint(buf, 10, &value))
32732                 return -EINVAL;
32733 @@ -364,7 +375,9 @@ static ssize_t attr_fn_lock_store(struct device *dev,
32734                 lenovo_features_set_cptkbd(hdev);
32735                 break;
32736         case USB_DEVICE_ID_LENOVO_TP10UBKBD:
32737 -               lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value);
32738 +               ret = lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value);
32739 +               if (ret)
32740 +                       return ret;
32741                 break;
32742         }
32744 @@ -498,6 +511,9 @@ static int lenovo_event_cptkbd(struct hid_device *hdev,
32745  static int lenovo_event(struct hid_device *hdev, struct hid_field *field,
32746                 struct hid_usage *usage, __s32 value)
32748 +       if (!hid_get_drvdata(hdev))
32749 +               return 0;
32751         switch (hdev->product) {
32752         case USB_DEVICE_ID_LENOVO_CUSBKBD:
32753         case USB_DEVICE_ID_LENOVO_CBTKBD:
32754 @@ -777,7 +793,7 @@ static enum led_brightness lenovo_led_brightness_get(
32755                                 : LED_OFF;
32758 -static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
32759 +static int lenovo_led_brightness_set(struct led_classdev *led_cdev,
32760                         enum led_brightness value)
32762         struct device *dev = led_cdev->dev->parent;
32763 @@ -785,6 +801,7 @@ static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
32764         struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
32765         u8 tp10ubkbd_led[] = { TP10UBKBD_MUTE_LED, TP10UBKBD_MICMUTE_LED };
32766         int led_nr = 0;
32767 +       int ret = 0;
32769         if (led_cdev == &data_pointer->led_micmute)
32770                 led_nr = 1;
32771 @@ -799,9 +816,11 @@ static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
32772                 lenovo_led_set_tpkbd(hdev);
32773                 break;
32774         case USB_DEVICE_ID_LENOVO_TP10UBKBD:
32775 -               lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value);
32776 +               ret = lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value);
32777                 break;
32778         }
32780 +       return ret;
32783  static int lenovo_register_leds(struct hid_device *hdev)
32784 @@ -822,7 +841,8 @@ static int lenovo_register_leds(struct hid_device *hdev)
32786         data->led_mute.name = name_mute;
32787         data->led_mute.brightness_get = lenovo_led_brightness_get;
32788 -       data->led_mute.brightness_set = lenovo_led_brightness_set;
32789 +       data->led_mute.brightness_set_blocking = lenovo_led_brightness_set;
32790 +       data->led_mute.flags = LED_HW_PLUGGABLE;
32791         data->led_mute.dev = &hdev->dev;
32792         ret = led_classdev_register(&hdev->dev, &data->led_mute);
32793         if (ret < 0)
32794 @@ -830,7 +850,8 @@ static int lenovo_register_leds(struct hid_device *hdev)
32796         data->led_micmute.name = name_micm;
32797         data->led_micmute.brightness_get = lenovo_led_brightness_get;
32798 -       data->led_micmute.brightness_set = lenovo_led_brightness_set;
32799 +       data->led_micmute.brightness_set_blocking = lenovo_led_brightness_set;
32800 +       data->led_micmute.flags = LED_HW_PLUGGABLE;
32801         data->led_micmute.dev = &hdev->dev;
32802         ret = led_classdev_register(&hdev->dev, &data->led_micmute);
32803         if (ret < 0) {
32804 diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
32805 index 85b685efc12f..e81b7cec2d12 100644
32806 --- a/drivers/hid/hid-plantronics.c
32807 +++ b/drivers/hid/hid-plantronics.c
32808 @@ -13,6 +13,7 @@
32810  #include <linux/hid.h>
32811  #include <linux/module.h>
32812 +#include <linux/jiffies.h>
32814  #define PLT_HID_1_0_PAGE       0xffa00000
32815  #define PLT_HID_2_0_PAGE       0xffa20000
32816 @@ -36,6 +37,16 @@
32817  #define PLT_ALLOW_CONSUMER (field->application == HID_CP_CONSUMERCONTROL && \
32818                             (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER)
32820 +#define PLT_QUIRK_DOUBLE_VOLUME_KEYS BIT(0)
32822 +#define PLT_DOUBLE_KEY_TIMEOUT 5 /* ms */
32824 +struct plt_drv_data {
32825 +       unsigned long device_type;
32826 +       unsigned long last_volume_key_ts;
32827 +       u32 quirks;
32830  static int plantronics_input_mapping(struct hid_device *hdev,
32831                                      struct hid_input *hi,
32832                                      struct hid_field *field,
32833 @@ -43,7 +54,8 @@ static int plantronics_input_mapping(struct hid_device *hdev,
32834                                      unsigned long **bit, int *max)
32836         unsigned short mapped_key;
32837 -       unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev);
32838 +       struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
32839 +       unsigned long plt_type = drv_data->device_type;
32841         /* special case for PTT products */
32842         if (field->application == HID_GD_JOYSTICK)
32843 @@ -105,6 +117,30 @@ static int plantronics_input_mapping(struct hid_device *hdev,
32844         return 1;
32847 +static int plantronics_event(struct hid_device *hdev, struct hid_field *field,
32848 +                            struct hid_usage *usage, __s32 value)
32850 +       struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
32852 +       if (drv_data->quirks & PLT_QUIRK_DOUBLE_VOLUME_KEYS) {
32853 +               unsigned long prev_ts, cur_ts;
32855 +               /* Usages are filtered in plantronics_usages. */
32857 +               if (!value) /* Handle key presses only. */
32858 +                       return 0;
32860 +               prev_ts = drv_data->last_volume_key_ts;
32861 +               cur_ts = jiffies;
32862 +               if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_DOUBLE_KEY_TIMEOUT)
32863 +                       return 1; /* Ignore the repeated key. */
32865 +               drv_data->last_volume_key_ts = cur_ts;
32866 +       }
32868 +       return 0;
32871  static unsigned long plantronics_device_type(struct hid_device *hdev)
32873         unsigned i, col_page;
32874 @@ -133,15 +169,24 @@ static unsigned long plantronics_device_type(struct hid_device *hdev)
32875  static int plantronics_probe(struct hid_device *hdev,
32876                              const struct hid_device_id *id)
32878 +       struct plt_drv_data *drv_data;
32879         int ret;
32881 +       drv_data = devm_kzalloc(&hdev->dev, sizeof(*drv_data), GFP_KERNEL);
32882 +       if (!drv_data)
32883 +               return -ENOMEM;
32885         ret = hid_parse(hdev);
32886         if (ret) {
32887                 hid_err(hdev, "parse failed\n");
32888                 goto err;
32889         }
32891 -       hid_set_drvdata(hdev, (void *)plantronics_device_type(hdev));
32892 +       drv_data->device_type = plantronics_device_type(hdev);
32893 +       drv_data->quirks = id->driver_data;
32894 +       drv_data->last_volume_key_ts = jiffies - msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT);
32896 +       hid_set_drvdata(hdev, drv_data);
32898         ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT |
32899                 HID_CONNECT_HIDINPUT_FORCE | HID_CONNECT_HIDDEV_FORCE);
32900 @@ -153,15 +198,26 @@ static int plantronics_probe(struct hid_device *hdev,
32903  static const struct hid_device_id plantronics_devices[] = {
32904 +       { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
32905 +                                        USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES),
32906 +               .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
32907         { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
32908         { }
32909  };
32910  MODULE_DEVICE_TABLE(hid, plantronics_devices);
32912 +static const struct hid_usage_id plantronics_usages[] = {
32913 +       { HID_CP_VOLUMEUP, EV_KEY, HID_ANY_ID },
32914 +       { HID_CP_VOLUMEDOWN, EV_KEY, HID_ANY_ID },
32915 +       { HID_TERMINATOR, HID_TERMINATOR, HID_TERMINATOR }
32918  static struct hid_driver plantronics_driver = {
32919         .name = "plantronics",
32920         .id_table = plantronics_devices,
32921 +       .usage_table = plantronics_usages,
32922         .input_mapping = plantronics_input_mapping,
32923 +       .event = plantronics_event,
32924         .probe = plantronics_probe,
32925  };
32926  module_hid_driver(plantronics_driver);
32927 diff --git a/drivers/hsi/hsi_core.c b/drivers/hsi/hsi_core.c
32928 index c3fb5beb846e..ec90713564e3 100644
32929 --- a/drivers/hsi/hsi_core.c
32930 +++ b/drivers/hsi/hsi_core.c
32931 @@ -210,8 +210,6 @@ static void hsi_add_client_from_dt(struct hsi_port *port,
32932         if (err)
32933                 goto err;
32935 -       dev_set_name(&cl->device, "%s", name);
32937         err = hsi_of_property_parse_mode(client, "hsi-mode", &mode);
32938         if (err) {
32939                 err = hsi_of_property_parse_mode(client, "hsi-rx-mode",
32940 @@ -293,6 +291,7 @@ static void hsi_add_client_from_dt(struct hsi_port *port,
32941         cl->device.release = hsi_client_release;
32942         cl->device.of_node = client;
32944 +       dev_set_name(&cl->device, "%s", name);
32945         if (device_register(&cl->device) < 0) {
32946                 pr_err("hsi: failed to register client: %s\n", name);
32947                 put_device(&cl->device);
32948 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
32949 index 0bd202de7960..945e41f5e3a8 100644
32950 --- a/drivers/hv/channel.c
32951 +++ b/drivers/hv/channel.c
32952 @@ -653,7 +653,7 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
32954         if (newchannel->rescind) {
32955                 err = -ENODEV;
32956 -               goto error_free_info;
32957 +               goto error_clean_msglist;
32958         }
32960         err = vmbus_post_msg(open_msg,
32961 diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
32962 index f0ed730e2e4e..ecebf1235fd5 100644
32963 --- a/drivers/hv/channel_mgmt.c
32964 +++ b/drivers/hv/channel_mgmt.c
32965 @@ -756,6 +756,12 @@ static void init_vp_index(struct vmbus_channel *channel)
32966         free_cpumask_var(available_mask);
32969 +#define UNLOAD_DELAY_UNIT_MS   10              /* 10 milliseconds */
32970 +#define UNLOAD_WAIT_MS         (100*1000)      /* 100 seconds */
32971 +#define UNLOAD_WAIT_LOOPS      (UNLOAD_WAIT_MS/UNLOAD_DELAY_UNIT_MS)
32972 +#define UNLOAD_MSG_MS          (5*1000)        /* Every 5 seconds */
32973 +#define UNLOAD_MSG_LOOPS       (UNLOAD_MSG_MS/UNLOAD_DELAY_UNIT_MS)
32975  static void vmbus_wait_for_unload(void)
32977         int cpu;
32978 @@ -773,12 +779,17 @@ static void vmbus_wait_for_unload(void)
32979          * vmbus_connection.unload_event. If not, the last thing we can do is
32980          * read message pages for all CPUs directly.
32981          *
32982 -        * Wait no more than 10 seconds so that the panic path can't get
32983 -        * hung forever in case the response message isn't seen.
32984 +        * Wait up to 100 seconds since an Azure host must writeback any dirty
32985 +        * data in its disk cache before the VMbus UNLOAD request will
32986 +        * complete. This flushing has been empirically observed to take up
32987 +        * to 50 seconds in cases with a lot of dirty data, so allow additional
32988 +        * leeway and for inaccuracies in mdelay(). But eventually time out so
32989 +        * that the panic path can't get hung forever in case the response
32990 +        * message isn't seen.
32991          */
32992 -       for (i = 0; i < 1000; i++) {
32993 +       for (i = 1; i <= UNLOAD_WAIT_LOOPS; i++) {
32994                 if (completion_done(&vmbus_connection.unload_event))
32995 -                       break;
32996 +                       goto completed;
32998                 for_each_online_cpu(cpu) {
32999                         struct hv_per_cpu_context *hv_cpu
33000 @@ -801,9 +812,18 @@ static void vmbus_wait_for_unload(void)
33001                         vmbus_signal_eom(msg, message_type);
33002                 }
33004 -               mdelay(10);
33005 +               /*
33006 +                * Give a notice periodically so someone watching the
33007 +                * serial output won't think it is completely hung.
33008 +                */
33009 +               if (!(i % UNLOAD_MSG_LOOPS))
33010 +                       pr_notice("Waiting for VMBus UNLOAD to complete\n");
33012 +               mdelay(UNLOAD_DELAY_UNIT_MS);
33013         }
33014 +       pr_err("Continuing even though VMBus UNLOAD did not complete\n");
33016 +completed:
33017         /*
33018          * We're crashing and already got the UNLOAD_RESPONSE, cleanup all
33019          * maybe-pending messages on all CPUs to be able to receive new
33020 diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
33021 index 35833d4d1a1d..ecd82ebfd5bc 100644
33022 --- a/drivers/hv/ring_buffer.c
33023 +++ b/drivers/hv/ring_buffer.c
33024 @@ -313,7 +313,6 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
33025                 rqst_id = vmbus_next_request_id(&channel->requestor, requestid);
33026                 if (rqst_id == VMBUS_RQST_ERROR) {
33027                         spin_unlock_irqrestore(&outring_info->ring_lock, flags);
33028 -                       pr_err("No request id available\n");
33029                         return -EAGAIN;
33030                 }
33031         }
33032 diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
33033 index 29f5fed28c2a..974cb08c7aa7 100644
33034 --- a/drivers/hwmon/fam15h_power.c
33035 +++ b/drivers/hwmon/fam15h_power.c
33036 @@ -221,7 +221,7 @@ static ssize_t power1_average_show(struct device *dev,
33037                 prev_ptsc[cu] = data->cpu_sw_pwr_ptsc[cu];
33038         }
33040 -       leftover = schedule_timeout_interruptible(msecs_to_jiffies(data->power_period));
33041 +       leftover = schedule_msec_hrtimeout_interruptible((data->power_period));
33042         if (leftover)
33043                 return 0;
33045 diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
33046 index ac4adb44b224..97ab491d2922 100644
33047 --- a/drivers/hwmon/lm80.c
33048 +++ b/drivers/hwmon/lm80.c
33049 @@ -596,7 +596,6 @@ static int lm80_probe(struct i2c_client *client)
33050         struct device *dev = &client->dev;
33051         struct device *hwmon_dev;
33052         struct lm80_data *data;
33053 -       int rv;
33055         data = devm_kzalloc(dev, sizeof(struct lm80_data), GFP_KERNEL);
33056         if (!data)
33057 @@ -609,14 +608,8 @@ static int lm80_probe(struct i2c_client *client)
33058         lm80_init_client(client);
33060         /* A few vars need to be filled upon startup */
33061 -       rv = lm80_read_value(client, LM80_REG_FAN_MIN(1));
33062 -       if (rv < 0)
33063 -               return rv;
33064 -       data->fan[f_min][0] = rv;
33065 -       rv = lm80_read_value(client, LM80_REG_FAN_MIN(2));
33066 -       if (rv < 0)
33067 -               return rv;
33068 -       data->fan[f_min][1] = rv;
33069 +       data->fan[f_min][0] = lm80_read_value(client, LM80_REG_FAN_MIN(1));
33070 +       data->fan[f_min][1] = lm80_read_value(client, LM80_REG_FAN_MIN(2));
33072         hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
33073                                                            data, lm80_groups);
33074 diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
33075 index 4382105bf142..2a4bed0ab226 100644
33076 --- a/drivers/hwmon/ltc2992.c
33077 +++ b/drivers/hwmon/ltc2992.c
33078 @@ -900,11 +900,15 @@ static int ltc2992_parse_dt(struct ltc2992_state *st)
33080         fwnode_for_each_available_child_node(fwnode, child) {
33081                 ret = fwnode_property_read_u32(child, "reg", &addr);
33082 -               if (ret < 0)
33083 +               if (ret < 0) {
33084 +                       fwnode_handle_put(child);
33085                         return ret;
33086 +               }
33088 -               if (addr > 1)
33089 +               if (addr > 1) {
33090 +                       fwnode_handle_put(child);
33091                         return -EINVAL;
33092 +               }
33094                 ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val);
33095                 if (!ret)
33096 diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
33097 index 7a5e539b567b..580e63d7daa0 100644
33098 --- a/drivers/hwmon/occ/common.c
33099 +++ b/drivers/hwmon/occ/common.c
33100 @@ -217,9 +217,9 @@ int occ_update_response(struct occ *occ)
33101                 return rc;
33103         /* limit the maximum rate of polling the OCC */
33104 -       if (time_after(jiffies, occ->last_update + OCC_UPDATE_FREQUENCY)) {
33105 +       if (time_after(jiffies, occ->next_update)) {
33106                 rc = occ_poll(occ);
33107 -               occ->last_update = jiffies;
33108 +               occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
33109         } else {
33110                 rc = occ->last_error;
33111         }
33112 @@ -1164,6 +1164,7 @@ int occ_setup(struct occ *occ, const char *name)
33113                 return rc;
33114         }
33116 +       occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
33117         occ_parse_poll_response(occ);
33119         rc = occ_setup_sensor_attrs(occ);
33120 diff --git a/drivers/hwmon/occ/common.h b/drivers/hwmon/occ/common.h
33121 index 67e6968b8978..e6df719770e8 100644
33122 --- a/drivers/hwmon/occ/common.h
33123 +++ b/drivers/hwmon/occ/common.h
33124 @@ -99,7 +99,7 @@ struct occ {
33125         u8 poll_cmd_data;               /* to perform OCC poll command */
33126         int (*send_cmd)(struct occ *occ, u8 *cmd);
33128 -       unsigned long last_update;
33129 +       unsigned long next_update;
33130         struct mutex lock;              /* lock OCC access */
33132         struct device *hwmon;
33133 diff --git a/drivers/hwmon/pmbus/pxe1610.c b/drivers/hwmon/pmbus/pxe1610.c
33134 index da27ce34ee3f..eb4a06003b7f 100644
33135 --- a/drivers/hwmon/pmbus/pxe1610.c
33136 +++ b/drivers/hwmon/pmbus/pxe1610.c
33137 @@ -41,6 +41,15 @@ static int pxe1610_identify(struct i2c_client *client,
33138                                 info->vrm_version[i] = vr13;
33139                                 break;
33140                         default:
33141 +                               /*
33142 +                                * If prior pages are available limit operation
33143 +                                * to them
33144 +                                */
33145 +                               if (i != 0) {
33146 +                                       info->pages = i;
33147 +                                       return 0;
33148 +                               }
33150                                 return -ENODEV;
33151                         }
33152                 }
33153 diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
33154 index 0f603b4094f2..a706ba11b93e 100644
33155 --- a/drivers/hwtracing/coresight/coresight-etm-perf.c
33156 +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
33157 @@ -52,7 +52,7 @@ static ssize_t format_attr_contextid_show(struct device *dev,
33159         int pid_fmt = ETM_OPT_CTXTID;
33161 -#if defined(CONFIG_CORESIGHT_SOURCE_ETM4X)
33162 +#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X)
33163         pid_fmt = is_kernel_in_hyp_mode() ? ETM_OPT_CTXTID2 : ETM_OPT_CTXTID;
33164  #endif
33165         return sprintf(page, "config:%d\n", pid_fmt);
33166 diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
33167 index 3629b7885aca..c594f45319fc 100644
33168 --- a/drivers/hwtracing/coresight/coresight-platform.c
33169 +++ b/drivers/hwtracing/coresight/coresight-platform.c
33170 @@ -90,6 +90,12 @@ static void of_coresight_get_ports_legacy(const struct device_node *node,
33171         struct of_endpoint endpoint;
33172         int in = 0, out = 0;
33174 +       /*
33175 +        * Avoid warnings in of_graph_get_next_endpoint()
33176 +        * if the device doesn't have any graph connections
33177 +        */
33178 +       if (!of_graph_is_present(node))
33179 +               return;
33180         do {
33181                 ep = of_graph_get_next_endpoint(node, ep);
33182                 if (!ep)
33183 diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
33184 index f72803a02391..28509b02a0b5 100644
33185 --- a/drivers/hwtracing/intel_th/gth.c
33186 +++ b/drivers/hwtracing/intel_th/gth.c
33187 @@ -543,7 +543,7 @@ static void intel_th_gth_disable(struct intel_th_device *thdev,
33188         output->active = false;
33190         for_each_set_bit(master, gth->output[output->port].master,
33191 -                        TH_CONFIGURABLE_MASTERS) {
33192 +                        TH_CONFIGURABLE_MASTERS + 1) {
33193                 gth_master_set(gth, master, -1);
33194         }
33195         spin_unlock(&gth->gth_lock);
33196 @@ -697,7 +697,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
33197         othdev->output.port = -1;
33198         othdev->output.active = false;
33199         gth->output[port].output = NULL;
33200 -       for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
33201 +       for (master = 0; master < TH_CONFIGURABLE_MASTERS + 1; master++)
33202                 if (gth->master[master] == port)
33203                         gth->master[master] = -1;
33204         spin_unlock(&gth->gth_lock);
33205 diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
33206 index 251e75c9ba9d..817cdb29bbd8 100644
33207 --- a/drivers/hwtracing/intel_th/pci.c
33208 +++ b/drivers/hwtracing/intel_th/pci.c
33209 @@ -273,11 +273,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
33210                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x51a6),
33211                 .driver_data = (kernel_ulong_t)&intel_th_2x,
33212         },
33213 +       {
33214 +               /* Alder Lake-M */
33215 +               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x54a6),
33216 +               .driver_data = (kernel_ulong_t)&intel_th_2x,
33217 +       },
33218         {
33219                 /* Alder Lake CPU */
33220                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
33221                 .driver_data = (kernel_ulong_t)&intel_th_2x,
33222         },
33223 +       {
33224 +               /* Rocket Lake CPU */
33225 +               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c19),
33226 +               .driver_data = (kernel_ulong_t)&intel_th_2x,
33227 +       },
33228         { 0 },
33229  };
33231 diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
33232 index e4b7f2a951ad..c1bbc4caeb5c 100644
33233 --- a/drivers/i2c/busses/i2c-cadence.c
33234 +++ b/drivers/i2c/busses/i2c-cadence.c
33235 @@ -789,7 +789,7 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
33236         bool change_role = false;
33237  #endif
33239 -       ret = pm_runtime_get_sync(id->dev);
33240 +       ret = pm_runtime_resume_and_get(id->dev);
33241         if (ret < 0)
33242                 return ret;
33244 @@ -911,7 +911,7 @@ static int cdns_reg_slave(struct i2c_client *slave)
33245         if (slave->flags & I2C_CLIENT_TEN)
33246                 return -EAFNOSUPPORT;
33248 -       ret = pm_runtime_get_sync(id->dev);
33249 +       ret = pm_runtime_resume_and_get(id->dev);
33250         if (ret < 0)
33251                 return ret;
33253 @@ -1200,7 +1200,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
33254         if (IS_ERR(id->membase))
33255                 return PTR_ERR(id->membase);
33257 -       id->irq = platform_get_irq(pdev, 0);
33258 +       ret = platform_get_irq(pdev, 0);
33259 +       if (ret < 0)
33260 +               return ret;
33261 +       id->irq = ret;
33263         id->adap.owner = THIS_MODULE;
33264         id->adap.dev.of_node = pdev->dev.of_node;
33265 diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
33266 index a08554c1a570..bdff0e6345d9 100644
33267 --- a/drivers/i2c/busses/i2c-emev2.c
33268 +++ b/drivers/i2c/busses/i2c-emev2.c
33269 @@ -395,7 +395,10 @@ static int em_i2c_probe(struct platform_device *pdev)
33271         em_i2c_reset(&priv->adap);
33273 -       priv->irq = platform_get_irq(pdev, 0);
33274 +       ret = platform_get_irq(pdev, 0);
33275 +       if (ret < 0)
33276 +               goto err_clk;
33277 +       priv->irq = ret;
33278         ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
33279                                 "em_i2c", priv);
33280         if (ret)
33281 diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
33282 index 4acee6f9e5a3..99d446763530 100644
33283 --- a/drivers/i2c/busses/i2c-i801.c
33284 +++ b/drivers/i2c/busses/i2c-i801.c
33285 @@ -73,6 +73,7 @@
33286   * Comet Lake-V (PCH)          0xa3a3  32      hard    yes     yes     yes
33287   * Alder Lake-S (PCH)          0x7aa3  32      hard    yes     yes     yes
33288   * Alder Lake-P (PCH)          0x51a3  32      hard    yes     yes     yes
33289 + * Alder Lake-M (PCH)          0x54a3  32      hard    yes     yes     yes
33290   *
33291   * Features supported by this driver:
33292   * Software PEC                                no
33293 @@ -230,6 +231,7 @@
33294  #define PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS         0x4b23
33295  #define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS          0x4da3
33296  #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS         0x51a3
33297 +#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS         0x54a3
33298  #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS              0x5ad4
33299  #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS         0x7aa3
33300  #define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS            0x8c22
33301 @@ -1087,6 +1089,7 @@ static const struct pci_device_id i801_ids[] = {
33302         { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS) },
33303         { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS) },
33304         { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS) },
33305 +       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS) },
33306         { 0, }
33307  };
33309 @@ -1771,6 +1774,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
33310         case PCI_DEVICE_ID_INTEL_EBG_SMBUS:
33311         case PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS:
33312         case PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS:
33313 +       case PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS:
33314                 priv->features |= FEATURE_BLOCK_PROC;
33315                 priv->features |= FEATURE_I2C_BLOCK_READ;
33316                 priv->features |= FEATURE_IRQ;
33317 diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
33318 index 98a89301ed2a..8e987945ed45 100644
33319 --- a/drivers/i2c/busses/i2c-img-scb.c
33320 +++ b/drivers/i2c/busses/i2c-img-scb.c
33321 @@ -1057,7 +1057,7 @@ static int img_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
33322                         atomic = true;
33323         }
33325 -       ret = pm_runtime_get_sync(adap->dev.parent);
33326 +       ret = pm_runtime_resume_and_get(adap->dev.parent);
33327         if (ret < 0)
33328                 return ret;
33330 @@ -1158,7 +1158,7 @@ static int img_i2c_init(struct img_i2c *i2c)
33331         u32 rev;
33332         int ret;
33334 -       ret = pm_runtime_get_sync(i2c->adap.dev.parent);
33335 +       ret = pm_runtime_resume_and_get(i2c->adap.dev.parent);
33336         if (ret < 0)
33337                 return ret;
33339 diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
33340 index 9db6ccded5e9..8b9ba055c418 100644
33341 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c
33342 +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
33343 @@ -259,7 +259,7 @@ static int lpi2c_imx_master_enable(struct lpi2c_imx_struct *lpi2c_imx)
33344         unsigned int temp;
33345         int ret;
33347 -       ret = pm_runtime_get_sync(lpi2c_imx->adapter.dev.parent);
33348 +       ret = pm_runtime_resume_and_get(lpi2c_imx->adapter.dev.parent);
33349         if (ret < 0)
33350                 return ret;
33352 diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
33353 index b80fdc1f0092..dc5ca71906db 100644
33354 --- a/drivers/i2c/busses/i2c-imx.c
33355 +++ b/drivers/i2c/busses/i2c-imx.c
33356 @@ -801,7 +801,7 @@ static int i2c_imx_reg_slave(struct i2c_client *client)
33357         i2c_imx->last_slave_event = I2C_SLAVE_STOP;
33359         /* Resume */
33360 -       ret = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
33361 +       ret = pm_runtime_resume_and_get(i2c_imx->adapter.dev.parent);
33362         if (ret < 0) {
33363                 dev_err(&i2c_imx->adapter.dev, "failed to resume i2c controller");
33364                 return ret;
33365 @@ -1253,7 +1253,7 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
33366         struct imx_i2c_struct *i2c_imx = i2c_get_adapdata(adapter);
33367         int result;
33369 -       result = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
33370 +       result = pm_runtime_resume_and_get(i2c_imx->adapter.dev.parent);
33371         if (result < 0)
33372                 return result;
33374 @@ -1496,7 +1496,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
33375         struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
33376         int irq, ret;
33378 -       ret = pm_runtime_get_sync(&pdev->dev);
33379 +       ret = pm_runtime_resume_and_get(&pdev->dev);
33380         if (ret < 0)
33381                 return ret;
33383 diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
33384 index 55177eb21d7b..baa7319eee53 100644
33385 --- a/drivers/i2c/busses/i2c-jz4780.c
33386 +++ b/drivers/i2c/busses/i2c-jz4780.c
33387 @@ -825,7 +825,10 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
33389         jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0);
33391 -       i2c->irq = platform_get_irq(pdev, 0);
33392 +       ret = platform_get_irq(pdev, 0);
33393 +       if (ret < 0)
33394 +               goto err;
33395 +       i2c->irq = ret;
33396         ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
33397                                dev_name(&pdev->dev), i2c);
33398         if (ret)
33399 diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
33400 index 2fb0532d8a16..ab261d762dea 100644
33401 --- a/drivers/i2c/busses/i2c-mlxbf.c
33402 +++ b/drivers/i2c/busses/i2c-mlxbf.c
33403 @@ -2376,6 +2376,8 @@ static int mlxbf_i2c_probe(struct platform_device *pdev)
33404         mlxbf_i2c_init_slave(pdev, priv);
33406         irq = platform_get_irq(pdev, 0);
33407 +       if (irq < 0)
33408 +               return irq;
33409         ret = devm_request_irq(dev, irq, mlxbf_smbus_irq,
33410                                IRQF_ONESHOT | IRQF_SHARED | IRQF_PROBE_SHARED,
33411                                dev_name(dev), priv);
33412 diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
33413 index 2ffd2f354d0a..bf25acba2ed5 100644
33414 --- a/drivers/i2c/busses/i2c-mt65xx.c
33415 +++ b/drivers/i2c/busses/i2c-mt65xx.c
33416 @@ -479,7 +479,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
33418         u16 control_reg;
33420 -       if (i2c->dev_comp->dma_sync) {
33421 +       if (i2c->dev_comp->apdma_sync) {
33422                 writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);
33423                 udelay(10);
33424                 writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
33425 @@ -564,7 +564,7 @@ static const struct i2c_spec_values *mtk_i2c_get_spec(unsigned int speed)
33427  static int mtk_i2c_max_step_cnt(unsigned int target_speed)
33429 -       if (target_speed > I2C_MAX_FAST_MODE_FREQ)
33430 +       if (target_speed > I2C_MAX_FAST_MODE_PLUS_FREQ)
33431                 return MAX_HS_STEP_CNT_DIV;
33432         else
33433                 return MAX_STEP_CNT_DIV;
33434 @@ -635,7 +635,7 @@ static int mtk_i2c_check_ac_timing(struct mtk_i2c *i2c,
33435         if (sda_min > sda_max)
33436                 return -3;
33438 -       if (check_speed > I2C_MAX_FAST_MODE_FREQ) {
33439 +       if (check_speed > I2C_MAX_FAST_MODE_PLUS_FREQ) {
33440                 if (i2c->dev_comp->ltiming_adjust) {
33441                         i2c->ac_timing.hs = I2C_TIME_DEFAULT_VALUE |
33442                                 (sample_cnt << 12) | (high_cnt << 8);
33443 @@ -850,7 +850,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
33445         control_reg = mtk_i2c_readw(i2c, OFFSET_CONTROL) &
33446                         ~(I2C_CONTROL_DIR_CHANGE | I2C_CONTROL_RS);
33447 -       if ((i2c->speed_hz > I2C_MAX_FAST_MODE_FREQ) || (left_num >= 1))
33448 +       if ((i2c->speed_hz > I2C_MAX_FAST_MODE_PLUS_FREQ) || (left_num >= 1))
33449                 control_reg |= I2C_CONTROL_RS;
33451         if (i2c->op == I2C_MASTER_WRRD)
33452 @@ -1067,7 +1067,8 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
33453                 }
33454         }
33456 -       if (i2c->auto_restart && num >= 2 && i2c->speed_hz > I2C_MAX_FAST_MODE_FREQ)
33457 +       if (i2c->auto_restart && num >= 2 &&
33458 +               i2c->speed_hz > I2C_MAX_FAST_MODE_PLUS_FREQ)
33459                 /* ignore the first restart irq after the master code,
33460                  * otherwise the first transfer will be discarded.
33461                  */
33462 diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
33463 index 12ac4212aded..d4f6c6d60683 100644
33464 --- a/drivers/i2c/busses/i2c-omap.c
33465 +++ b/drivers/i2c/busses/i2c-omap.c
33466 @@ -1404,9 +1404,9 @@ omap_i2c_probe(struct platform_device *pdev)
33467         pm_runtime_set_autosuspend_delay(omap->dev, OMAP_I2C_PM_TIMEOUT);
33468         pm_runtime_use_autosuspend(omap->dev);
33470 -       r = pm_runtime_get_sync(omap->dev);
33471 +       r = pm_runtime_resume_and_get(omap->dev);
33472         if (r < 0)
33473 -               goto err_free_mem;
33474 +               goto err_disable_pm;
33476         /*
33477          * Read the Rev hi bit-[15:14] ie scheme this is 1 indicates ver2.
33478 @@ -1513,8 +1513,8 @@ omap_i2c_probe(struct platform_device *pdev)
33479         omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
33480         pm_runtime_dont_use_autosuspend(omap->dev);
33481         pm_runtime_put_sync(omap->dev);
33482 +err_disable_pm:
33483         pm_runtime_disable(&pdev->dev);
33484 -err_free_mem:
33486         return r;
33488 @@ -1525,7 +1525,7 @@ static int omap_i2c_remove(struct platform_device *pdev)
33489         int ret;
33491         i2c_del_adapter(&omap->adapter);
33492 -       ret = pm_runtime_get_sync(&pdev->dev);
33493 +       ret = pm_runtime_resume_and_get(&pdev->dev);
33494         if (ret < 0)
33495                 return ret;
33497 diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
33498 index 12f6d452c0f7..8722ca23f889 100644
33499 --- a/drivers/i2c/busses/i2c-rcar.c
33500 +++ b/drivers/i2c/busses/i2c-rcar.c
33501 @@ -1027,7 +1027,10 @@ static int rcar_i2c_probe(struct platform_device *pdev)
33502         if (of_property_read_bool(dev->of_node, "smbus"))
33503                 priv->flags |= ID_P_HOST_NOTIFY;
33505 -       priv->irq = platform_get_irq(pdev, 0);
33506 +       ret = platform_get_irq(pdev, 0);
33507 +       if (ret < 0)
33508 +               goto out_pm_disable;
33509 +       priv->irq = ret;
33510         ret = devm_request_irq(dev, priv->irq, irqhandler, irqflags, dev_name(dev), priv);
33511         if (ret < 0) {
33512                 dev_err(dev, "cannot get irq %d\n", priv->irq);
33513 diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
33514 index c2005c789d2b..319d1fa617c8 100644
33515 --- a/drivers/i2c/busses/i2c-sh7760.c
33516 +++ b/drivers/i2c/busses/i2c-sh7760.c
33517 @@ -471,7 +471,10 @@ static int sh7760_i2c_probe(struct platform_device *pdev)
33518                 goto out2;
33519         }
33521 -       id->irq = platform_get_irq(pdev, 0);
33522 +       ret = platform_get_irq(pdev, 0);
33523 +       if (ret < 0)
33524 +               goto out3;
33525 +       id->irq = ret;
33527         id->adap.nr = pdev->id;
33528         id->adap.algo = &sh7760_i2c_algo;
33529 diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
33530 index 2917fecf6c80..8ead7e021008 100644
33531 --- a/drivers/i2c/busses/i2c-sprd.c
33532 +++ b/drivers/i2c/busses/i2c-sprd.c
33533 @@ -290,7 +290,7 @@ static int sprd_i2c_master_xfer(struct i2c_adapter *i2c_adap,
33534         struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
33535         int im, ret;
33537 -       ret = pm_runtime_get_sync(i2c_dev->dev);
33538 +       ret = pm_runtime_resume_and_get(i2c_dev->dev);
33539         if (ret < 0)
33540                 return ret;
33542 @@ -576,7 +576,7 @@ static int sprd_i2c_remove(struct platform_device *pdev)
33543         struct sprd_i2c *i2c_dev = platform_get_drvdata(pdev);
33544         int ret;
33546 -       ret = pm_runtime_get_sync(i2c_dev->dev);
33547 +       ret = pm_runtime_resume_and_get(i2c_dev->dev);
33548         if (ret < 0)
33549                 return ret;
33551 diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
33552 index c62c815b88eb..318abfa7926b 100644
33553 --- a/drivers/i2c/busses/i2c-stm32f7.c
33554 +++ b/drivers/i2c/busses/i2c-stm32f7.c
33555 @@ -1652,7 +1652,7 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
33556         i2c_dev->msg_id = 0;
33557         f7_msg->smbus = false;
33559 -       ret = pm_runtime_get_sync(i2c_dev->dev);
33560 +       ret = pm_runtime_resume_and_get(i2c_dev->dev);
33561         if (ret < 0)
33562                 return ret;
33564 @@ -1698,7 +1698,7 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
33565         f7_msg->read_write = read_write;
33566         f7_msg->smbus = true;
33568 -       ret = pm_runtime_get_sync(dev);
33569 +       ret = pm_runtime_resume_and_get(dev);
33570         if (ret < 0)
33571                 return ret;
33573 @@ -1799,7 +1799,7 @@ static int stm32f7_i2c_reg_slave(struct i2c_client *slave)
33574         if (ret)
33575                 return ret;
33577 -       ret = pm_runtime_get_sync(dev);
33578 +       ret = pm_runtime_resume_and_get(dev);
33579         if (ret < 0)
33580                 return ret;
33582 @@ -1880,7 +1880,7 @@ static int stm32f7_i2c_unreg_slave(struct i2c_client *slave)
33584         WARN_ON(!i2c_dev->slave[id]);
33586 -       ret = pm_runtime_get_sync(i2c_dev->dev);
33587 +       ret = pm_runtime_resume_and_get(i2c_dev->dev);
33588         if (ret < 0)
33589                 return ret;
33591 @@ -2273,7 +2273,7 @@ static int stm32f7_i2c_regs_backup(struct stm32f7_i2c_dev *i2c_dev)
33592         int ret;
33593         struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs;
33595 -       ret = pm_runtime_get_sync(i2c_dev->dev);
33596 +       ret = pm_runtime_resume_and_get(i2c_dev->dev);
33597         if (ret < 0)
33598                 return ret;
33600 @@ -2295,7 +2295,7 @@ static int stm32f7_i2c_regs_restore(struct stm32f7_i2c_dev *i2c_dev)
33601         int ret;
33602         struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs;
33604 -       ret = pm_runtime_get_sync(i2c_dev->dev);
33605 +       ret = pm_runtime_resume_and_get(i2c_dev->dev);
33606         if (ret < 0)
33607                 return ret;
33609 diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
33610 index 087b2951942e..2a8568b97c14 100644
33611 --- a/drivers/i2c/busses/i2c-xiic.c
33612 +++ b/drivers/i2c/busses/i2c-xiic.c
33613 @@ -706,7 +706,7 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
33614         dev_dbg(adap->dev.parent, "%s entry SR: 0x%x\n", __func__,
33615                 xiic_getreg8(i2c, XIIC_SR_REG_OFFSET));
33617 -       err = pm_runtime_get_sync(i2c->dev);
33618 +       err = pm_runtime_resume_and_get(i2c->dev);
33619         if (err < 0)
33620                 return err;
33622 @@ -873,7 +873,7 @@ static int xiic_i2c_remove(struct platform_device *pdev)
33623         /* remove adapter & data */
33624         i2c_del_adapter(&i2c->adap);
33626 -       ret = pm_runtime_get_sync(i2c->dev);
33627 +       ret = pm_runtime_resume_and_get(i2c->dev);
33628         if (ret < 0)
33629                 return ret;
33631 diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
33632 index 6ceb11cc4be1..6ef38a8ee95c 100644
33633 --- a/drivers/i2c/i2c-dev.c
33634 +++ b/drivers/i2c/i2c-dev.c
33635 @@ -440,8 +440,13 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
33636                                    sizeof(rdwr_arg)))
33637                         return -EFAULT;
33639 -               /* Put an arbitrary limit on the number of messages that can
33640 -                * be sent at once */
33641 +               if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0)
33642 +                       return -EINVAL;
33644 +               /*
33645 +                * Put an arbitrary limit on the number of messages that can
33646 +                * be sent at once
33647 +                */
33648                 if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
33649                         return -EINVAL;
33651 diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
33652 index f8e9b7305c13..e2e12a5585e5 100644
33653 --- a/drivers/i3c/master.c
33654 +++ b/drivers/i3c/master.c
33655 @@ -2535,7 +2535,7 @@ int i3c_master_register(struct i3c_master_controller *master,
33657         ret = i3c_master_bus_init(master);
33658         if (ret)
33659 -               goto err_destroy_wq;
33660 +               goto err_put_dev;
33662         ret = device_add(&master->dev);
33663         if (ret)
33664 @@ -2566,9 +2566,6 @@ int i3c_master_register(struct i3c_master_controller *master,
33665  err_cleanup_bus:
33666         i3c_master_bus_cleanup(master);
33668 -err_destroy_wq:
33669 -       destroy_workqueue(master->wq);
33671  err_put_dev:
33672         put_device(&master->dev);
33674 diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
33675 index 2e0c62c39155..8acf277b8b25 100644
33676 --- a/drivers/iio/accel/Kconfig
33677 +++ b/drivers/iio/accel/Kconfig
33678 @@ -211,7 +211,6 @@ config DMARD10
33679  config HID_SENSOR_ACCEL_3D
33680         depends on HID_SENSOR_HUB
33681         select IIO_BUFFER
33682 -       select IIO_TRIGGERED_BUFFER
33683         select HID_SENSOR_IIO_COMMON
33684         select HID_SENSOR_IIO_TRIGGER
33685         tristate "HID Accelerometers 3D"
33686 diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c
33687 index 3633a4e302c6..fe225990de24 100644
33688 --- a/drivers/iio/accel/adis16201.c
33689 +++ b/drivers/iio/accel/adis16201.c
33690 @@ -215,7 +215,7 @@ static const struct iio_chan_spec adis16201_channels[] = {
33691         ADIS_AUX_ADC_CHAN(ADIS16201_AUX_ADC_REG, ADIS16201_SCAN_AUX_ADC, 0, 12),
33692         ADIS_INCLI_CHAN(X, ADIS16201_XINCL_OUT_REG, ADIS16201_SCAN_INCLI_X,
33693                         BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
33694 -       ADIS_INCLI_CHAN(X, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y,
33695 +       ADIS_INCLI_CHAN(Y, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y,
33696                         BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
33697         IIO_CHAN_SOFT_TIMESTAMP(7)
33698  };
33699 diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
33700 index e0667c4b3c08..91958da22dcf 100644
33701 --- a/drivers/iio/adc/Kconfig
33702 +++ b/drivers/iio/adc/Kconfig
33703 @@ -249,7 +249,7 @@ config AD799X
33704  config AD9467
33705         tristate "Analog Devices AD9467 High Speed ADC driver"
33706         depends on SPI
33707 -       select ADI_AXI_ADC
33708 +       depends on ADI_AXI_ADC
33709         help
33710           Say yes here to build support for Analog Devices:
33711           * AD9467 16-Bit, 200 MSPS/250 MSPS Analog-to-Digital Converter
33712 @@ -266,8 +266,6 @@ config ADI_AXI_ADC
33713         select IIO_BUFFER
33714         select IIO_BUFFER_HW_CONSUMER
33715         select IIO_BUFFER_DMAENGINE
33716 -       depends on HAS_IOMEM
33717 -       depends on OF
33718         help
33719           Say yes here to build support for Analog Devices Generic
33720           AXI ADC IP core. The IP core is used for interfacing with
33721 diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
33722 index 17402714b387..9e9ff07cf972 100644
33723 --- a/drivers/iio/adc/ad7476.c
33724 +++ b/drivers/iio/adc/ad7476.c
33725 @@ -321,25 +321,15 @@ static int ad7476_probe(struct spi_device *spi)
33726         spi_message_init(&st->msg);
33727         spi_message_add_tail(&st->xfer, &st->msg);
33729 -       ret = iio_triggered_buffer_setup(indio_dev, NULL,
33730 -                       &ad7476_trigger_handler, NULL);
33731 +       ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev, NULL,
33732 +                                             &ad7476_trigger_handler, NULL);
33733         if (ret)
33734 -               goto error_disable_reg;
33735 +               return ret;
33737         if (st->chip_info->reset)
33738                 st->chip_info->reset(st);
33740 -       ret = iio_device_register(indio_dev);
33741 -       if (ret)
33742 -               goto error_ring_unregister;
33743 -       return 0;
33745 -error_ring_unregister:
33746 -       iio_triggered_buffer_cleanup(indio_dev);
33747 -error_disable_reg:
33748 -       regulator_disable(st->reg);
33750 -       return ret;
33751 +       return devm_iio_device_register(&spi->dev, indio_dev);
33754  static const struct spi_device_id ad7476_id[] = {
33755 diff --git a/drivers/iio/common/hid-sensors/Kconfig b/drivers/iio/common/hid-sensors/Kconfig
33756 index 24d492567336..2a3dd3b907be 100644
33757 --- a/drivers/iio/common/hid-sensors/Kconfig
33758 +++ b/drivers/iio/common/hid-sensors/Kconfig
33759 @@ -19,6 +19,7 @@ config HID_SENSOR_IIO_TRIGGER
33760         tristate "Common module (trigger) for all HID Sensor IIO drivers"
33761         depends on HID_SENSOR_HUB && HID_SENSOR_IIO_COMMON && IIO_BUFFER
33762         select IIO_TRIGGER
33763 +       select IIO_TRIGGERED_BUFFER
33764         help
33765           Say yes here to build trigger support for HID sensors.
33766           Triggers will be send if all requested attributes were read.
33767 diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
33768 index 5824f2edf975..20b5ac7ab66a 100644
33769 --- a/drivers/iio/gyro/Kconfig
33770 +++ b/drivers/iio/gyro/Kconfig
33771 @@ -111,7 +111,6 @@ config FXAS21002C_SPI
33772  config HID_SENSOR_GYRO_3D
33773         depends on HID_SENSOR_HUB
33774         select IIO_BUFFER
33775 -       select IIO_TRIGGERED_BUFFER
33776         select HID_SENSOR_IIO_COMMON
33777         select HID_SENSOR_IIO_TRIGGER
33778         tristate "HID Gyroscope 3D"
33779 diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
33780 index ac90be03332a..f17a93519535 100644
33781 --- a/drivers/iio/gyro/mpu3050-core.c
33782 +++ b/drivers/iio/gyro/mpu3050-core.c
33783 @@ -272,7 +272,16 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
33784         case IIO_CHAN_INFO_OFFSET:
33785                 switch (chan->type) {
33786                 case IIO_TEMP:
33787 -                       /* The temperature scaling is (x+23000)/280 Celsius */
33788 +                       /*
33789 +                        * The temperature scaling is (x+23000)/280 Celsius
33790 +                        * for the "best fit straight line" temperature range
33791 +                        * of -30C..85C.  The 23000 includes room temperature
33792 +                        * offset of +35C, 280 is the precision scale and x is
33793 +                        * the 16-bit signed integer reported by hardware.
33794 +                        *
33795 +                        * Temperature value itself represents temperature of
33796 +                        * the sensor die.
33797 +                        */
33798                         *val = 23000;
33799                         return IIO_VAL_INT;
33800                 default:
33801 @@ -329,7 +338,7 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
33802                                 goto out_read_raw_unlock;
33803                         }
33805 -                       *val = be16_to_cpu(raw_val);
33806 +                       *val = (s16)be16_to_cpu(raw_val);
33807                         ret = IIO_VAL_INT;
33809                         goto out_read_raw_unlock;
33810 diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
33811 index 6549fcf6db69..2de5494e7c22 100644
33812 --- a/drivers/iio/humidity/Kconfig
33813 +++ b/drivers/iio/humidity/Kconfig
33814 @@ -52,7 +52,6 @@ config HID_SENSOR_HUMIDITY
33815         tristate "HID Environmental humidity sensor"
33816         depends on HID_SENSOR_HUB
33817         select IIO_BUFFER
33818 -       select IIO_TRIGGERED_BUFFER
33819         select HID_SENSOR_IIO_COMMON
33820         select HID_SENSOR_IIO_TRIGGER
33821         help
33822 diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
33823 index dfe86c589325..c41b8ef1e250 100644
33824 --- a/drivers/iio/imu/adis16480.c
33825 +++ b/drivers/iio/imu/adis16480.c
33826 @@ -10,6 +10,7 @@
33827  #include <linux/of_irq.h>
33828  #include <linux/interrupt.h>
33829  #include <linux/delay.h>
33830 +#include <linux/math.h>
33831  #include <linux/mutex.h>
33832  #include <linux/device.h>
33833  #include <linux/kernel.h>
33834 @@ -17,6 +18,7 @@
33835  #include <linux/slab.h>
33836  #include <linux/sysfs.h>
33837  #include <linux/module.h>
33838 +#include <linux/lcm.h>
33840  #include <linux/iio/iio.h>
33841  #include <linux/iio/sysfs.h>
33842 @@ -170,6 +172,11 @@ static const char * const adis16480_int_pin_names[4] = {
33843         [ADIS16480_PIN_DIO4] = "DIO4",
33844  };
33846 +static bool low_rate_allow;
33847 +module_param(low_rate_allow, bool, 0444);
33848 +MODULE_PARM_DESC(low_rate_allow,
33849 +                "Allow IMU rates below the minimum advisable when external clk is used in PPS mode (default: N)");
33851  #ifdef CONFIG_DEBUG_FS
33853  static ssize_t adis16480_show_firmware_revision(struct file *file,
33854 @@ -312,7 +319,8 @@ static int adis16480_debugfs_init(struct iio_dev *indio_dev)
33855  static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
33857         struct adis16480 *st = iio_priv(indio_dev);
33858 -       unsigned int t, reg;
33859 +       unsigned int t, sample_rate = st->clk_freq;
33860 +       int ret;
33862         if (val < 0 || val2 < 0)
33863                 return -EINVAL;
33864 @@ -321,28 +329,65 @@ static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
33865         if (t == 0)
33866                 return -EINVAL;
33868 +       mutex_lock(&st->adis.state_lock);
33869         /*
33870 -        * When using PPS mode, the rate of data collection is equal to the
33871 -        * product of the external clock frequency and the scale factor in the
33872 -        * SYNC_SCALE register.
33873 -        * When using sync mode, or internal clock, the output data rate is
33874 -        * equal with  the clock frequency divided by DEC_RATE + 1.
33875 +        * When using PPS mode, the input clock needs to be scaled so that we have an IMU
33876 +        * sample rate between (optimally) 4000 and 4250. After this, we can use the
33877 +        * decimation filter to lower the sampling rate in order to get what the user wants.
33878 +        * Optimally, the user sample rate is a multiple of both the IMU sample rate and
33879 +        * the input clock. Hence, calculating the sync_scale dynamically gives us better
33880 +        * chances of achieving a perfect/integer value for DEC_RATE. The math here is:
33881 +        *      1. lcm of the input clock and the desired output rate.
33882 +        *      2. get the highest multiple of the previous result lower than the adis max rate.
33883 +        *      3. The last result becomes the IMU sample rate. Use that to calculate SYNC_SCALE
33884 +        *         and DEC_RATE (to get the user output rate)
33885          */
33886         if (st->clk_mode == ADIS16480_CLK_PPS) {
33887 -               t = t / st->clk_freq;
33888 -               reg = ADIS16495_REG_SYNC_SCALE;
33889 -       } else {
33890 -               t = st->clk_freq / t;
33891 -               reg = ADIS16480_REG_DEC_RATE;
33892 +               unsigned long scaled_rate = lcm(st->clk_freq, t);
33893 +               int sync_scale;
33895 +               /*
33896 +                * If lcm is bigger than the IMU maximum sampling rate there's no perfect
33897 +                * solution. In this case, we get the highest multiple of the input clock
33898 +                * lower than the IMU max sample rate.
33899 +                */
33900 +               if (scaled_rate > st->chip_info->int_clk)
33901 +                       scaled_rate = st->chip_info->int_clk / st->clk_freq * st->clk_freq;
33902 +               else
33903 +                       scaled_rate = st->chip_info->int_clk / scaled_rate * scaled_rate;
33905 +               /*
33906 +                * This is not an hard requirement but it's not advised to run the IMU
33907 +                * with a sample rate lower than 4000Hz due to possible undersampling
33908 +                * issues. However, there are users that might really want to take the risk.
33909 +                * Hence, we provide a module parameter for them. If set, we allow sample
33910 +                * rates lower than 4KHz. By default, we won't allow this and we just roundup
33911 +                * the rate to the next multiple of the input clock bigger than 4KHz. This
33912 +                * is done like this as in some cases (when DEC_RATE is 0) might give
33913 +                * us the closest value to the one desired by the user...
33914 +                */
33915 +               if (scaled_rate < 4000000 && !low_rate_allow)
33916 +                       scaled_rate = roundup(4000000, st->clk_freq);
33918 +               sync_scale = scaled_rate / st->clk_freq;
33919 +               ret = __adis_write_reg_16(&st->adis, ADIS16495_REG_SYNC_SCALE, sync_scale);
33920 +               if (ret)
33921 +                       goto error;
33923 +               sample_rate = scaled_rate;
33924         }
33926 +       t = DIV_ROUND_CLOSEST(sample_rate, t);
33927 +       if (t)
33928 +               t--;
33930         if (t > st->chip_info->max_dec_rate)
33931                 t = st->chip_info->max_dec_rate;
33933 -       if ((t != 0) && (st->clk_mode != ADIS16480_CLK_PPS))
33934 -               t--;
33936 -       return adis_write_reg_16(&st->adis, reg, t);
33937 +       ret = __adis_write_reg_16(&st->adis, ADIS16480_REG_DEC_RATE, t);
33938 +error:
33939 +       mutex_unlock(&st->adis.state_lock);
33940 +       return ret;
33943  static int adis16480_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
33944 @@ -350,34 +395,35 @@ static int adis16480_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
33945         struct adis16480 *st = iio_priv(indio_dev);
33946         uint16_t t;
33947         int ret;
33948 -       unsigned int freq;
33949 -       unsigned int reg;
33950 +       unsigned int freq, sample_rate = st->clk_freq;
33952 -       if (st->clk_mode == ADIS16480_CLK_PPS)
33953 -               reg = ADIS16495_REG_SYNC_SCALE;
33954 -       else
33955 -               reg = ADIS16480_REG_DEC_RATE;
33956 +       mutex_lock(&st->adis.state_lock);
33958 +       if (st->clk_mode == ADIS16480_CLK_PPS) {
33959 +               u16 sync_scale;
33961 +               ret = __adis_read_reg_16(&st->adis, ADIS16495_REG_SYNC_SCALE, &sync_scale);
33962 +               if (ret)
33963 +                       goto error;
33965 -       ret = adis_read_reg_16(&st->adis, reg, &t);
33966 +               sample_rate = st->clk_freq * sync_scale;
33967 +       }
33969 +       ret = __adis_read_reg_16(&st->adis, ADIS16480_REG_DEC_RATE, &t);
33970         if (ret)
33971 -               return ret;
33972 +               goto error;
33974 -       /*
33975 -        * When using PPS mode, the rate of data collection is equal to the
33976 -        * product of the external clock frequency and the scale factor in the
33977 -        * SYNC_SCALE register.
33978 -        * When using sync mode, or internal clock, the output data rate is
33979 -        * equal with  the clock frequency divided by DEC_RATE + 1.
33980 -        */
33981 -       if (st->clk_mode == ADIS16480_CLK_PPS)
33982 -               freq = st->clk_freq * t;
33983 -       else
33984 -               freq = st->clk_freq / (t + 1);
33985 +       mutex_unlock(&st->adis.state_lock);
33987 +       freq = DIV_ROUND_CLOSEST(sample_rate, (t + 1));
33989         *val = freq / 1000;
33990         *val2 = (freq % 1000) * 1000;
33992         return IIO_VAL_INT_PLUS_MICRO;
33993 +error:
33994 +       mutex_unlock(&st->adis.state_lock);
33995 +       return ret;
33998  enum {
33999 @@ -1278,6 +1324,20 @@ static int adis16480_probe(struct spi_device *spi)
34001                 st->clk_freq = clk_get_rate(st->ext_clk);
34002                 st->clk_freq *= 1000; /* micro */
34003 +               if (st->clk_mode == ADIS16480_CLK_PPS) {
34004 +                       u16 sync_scale;
34006 +                       /*
34007 +                        * In PPS mode, the IMU sample rate is the clk_freq * sync_scale. Hence,
34008 +                        * default the IMU sample rate to the highest multiple of the input clock
34009 +                        * lower than the IMU max sample rate. The internal sample rate is the
34010 +                        * max...
34011 +                        */
34012 +                       sync_scale = st->chip_info->int_clk / st->clk_freq;
34013 +                       ret = __adis_write_reg_16(&st->adis, ADIS16495_REG_SYNC_SCALE, sync_scale);
34014 +                       if (ret)
34015 +                               return ret;
34016 +               }
34017         } else {
34018                 st->clk_freq = st->chip_info->int_clk;
34019         }
34020 diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
34021 index 453c51c79655..69ab94ab7297 100644
34022 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
34023 +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
34024 @@ -731,12 +731,16 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
34025         }
34028 -static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
34029 +static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val,
34030 +                                       int val2)
34032         int result, i;
34034 +       if (val != 0)
34035 +               return -EINVAL;
34037         for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) {
34038 -               if (gyro_scale_6050[i] == val) {
34039 +               if (gyro_scale_6050[i] == val2) {
34040                         result = inv_mpu6050_set_gyro_fsr(st, i);
34041                         if (result)
34042                                 return result;
34043 @@ -767,13 +771,17 @@ static int inv_write_raw_get_fmt(struct iio_dev *indio_dev,
34044         return -EINVAL;
34047 -static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
34048 +static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val,
34049 +                                        int val2)
34051         int result, i;
34052         u8 d;
34054 +       if (val != 0)
34055 +               return -EINVAL;
34057         for (i = 0; i < ARRAY_SIZE(accel_scale); ++i) {
34058 -               if (accel_scale[i] == val) {
34059 +               if (accel_scale[i] == val2) {
34060                         d = (i << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
34061                         result = regmap_write(st->map, st->reg->accl_config, d);
34062                         if (result)
34063 @@ -814,10 +822,10 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
34064         case IIO_CHAN_INFO_SCALE:
34065                 switch (chan->type) {
34066                 case IIO_ANGL_VEL:
34067 -                       result = inv_mpu6050_write_gyro_scale(st, val2);
34068 +                       result = inv_mpu6050_write_gyro_scale(st, val, val2);
34069                         break;
34070                 case IIO_ACCEL:
34071 -                       result = inv_mpu6050_write_accel_scale(st, val2);
34072 +                       result = inv_mpu6050_write_accel_scale(st, val, val2);
34073                         break;
34074                 default:
34075                         result = -EINVAL;
34076 diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
34077 index 7db761afa578..36f3a900878d 100644
34078 --- a/drivers/iio/industrialio-core.c
34079 +++ b/drivers/iio/industrialio-core.c
34080 @@ -1734,7 +1734,6 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
34081         if (!indio_dev->info)
34082                 goto out_unlock;
34084 -       ret = -EINVAL;
34085         list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) {
34086                 ret = h->ioctl(indio_dev, filp, cmd, arg);
34087                 if (ret != IIO_IOCTL_UNHANDLED)
34088 @@ -1742,7 +1741,7 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
34089         }
34091         if (ret == IIO_IOCTL_UNHANDLED)
34092 -               ret = -EINVAL;
34093 +               ret = -ENODEV;
34095  out_unlock:
34096         mutex_unlock(&indio_dev->info_exist_lock);
34097 @@ -1864,9 +1863,6 @@ EXPORT_SYMBOL(__iio_device_register);
34098   **/
34099  void iio_device_unregister(struct iio_dev *indio_dev)
34101 -       struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
34102 -       struct iio_ioctl_handler *h, *t;
34104         cdev_device_del(&indio_dev->chrdev, &indio_dev->dev);
34106         mutex_lock(&indio_dev->info_exist_lock);
34107 @@ -1877,9 +1873,6 @@ void iio_device_unregister(struct iio_dev *indio_dev)
34109         indio_dev->info = NULL;
34111 -       list_for_each_entry_safe(h, t, &iio_dev_opaque->ioctl_handlers, entry)
34112 -               list_del(&h->entry);
34114         iio_device_wakeup_eventset(indio_dev);
34115         iio_buffer_wakeup_poll(indio_dev);
34117 diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
34118 index 33ad4dd0b5c7..917f9becf9c7 100644
34119 --- a/drivers/iio/light/Kconfig
34120 +++ b/drivers/iio/light/Kconfig
34121 @@ -256,7 +256,6 @@ config ISL29125
34122  config HID_SENSOR_ALS
34123         depends on HID_SENSOR_HUB
34124         select IIO_BUFFER
34125 -       select IIO_TRIGGERED_BUFFER
34126         select HID_SENSOR_IIO_COMMON
34127         select HID_SENSOR_IIO_TRIGGER
34128         tristate "HID ALS"
34129 @@ -270,7 +269,6 @@ config HID_SENSOR_ALS
34130  config HID_SENSOR_PROX
34131         depends on HID_SENSOR_HUB
34132         select IIO_BUFFER
34133 -       select IIO_TRIGGERED_BUFFER
34134         select HID_SENSOR_IIO_COMMON
34135         select HID_SENSOR_IIO_TRIGGER
34136         tristate "HID PROX"
34137 diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
34138 index 7ba7aa59437c..040d8429a6e0 100644
34139 --- a/drivers/iio/light/gp2ap002.c
34140 +++ b/drivers/iio/light/gp2ap002.c
34141 @@ -583,7 +583,7 @@ static int gp2ap002_probe(struct i2c_client *client,
34142                                         "gp2ap002", indio_dev);
34143         if (ret) {
34144                 dev_err(dev, "unable to request IRQ\n");
34145 -               goto out_disable_vio;
34146 +               goto out_put_pm;
34147         }
34148         gp2ap002->irq = client->irq;
34150 @@ -613,8 +613,9 @@ static int gp2ap002_probe(struct i2c_client *client,
34152         return 0;
34154 -out_disable_pm:
34155 +out_put_pm:
34156         pm_runtime_put_noidle(dev);
34157 +out_disable_pm:
34158         pm_runtime_disable(dev);
34159  out_disable_vio:
34160         regulator_disable(gp2ap002->vio);
34161 diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
34162 index 5bf2bfbc5379..6ce37819fb73 100644
34163 --- a/drivers/iio/light/tsl2563.c
34164 +++ b/drivers/iio/light/tsl2563.c
34165 @@ -271,11 +271,7 @@ static void tsl2563_wait_adc(struct tsl2563_chip *chip)
34166         default:
34167                 delay = 402;
34168         }
34169 -       /*
34170 -        * TODO: Make sure that we wait at least required delay but why we
34171 -        * have to extend it one tick more?
34172 -        */
34173 -       schedule_timeout_interruptible(msecs_to_jiffies(delay) + 2);
34174 +       schedule_msec_hrtimeout_interruptible(delay + 1);
34177  static int tsl2563_adjust_gainlevel(struct tsl2563_chip *chip, u16 adc)
34178 diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
34179 index 0f787bfc88fc..c9d8f07a6fcd 100644
34180 --- a/drivers/iio/light/tsl2583.c
34181 +++ b/drivers/iio/light/tsl2583.c
34182 @@ -341,6 +341,14 @@ static int tsl2583_als_calibrate(struct iio_dev *indio_dev)
34183                 return lux_val;
34184         }
34186 +       /* Avoid division by zero of lux_value later on */
34187 +       if (lux_val == 0) {
34188 +               dev_err(&chip->client->dev,
34189 +                       "%s: lux_val of 0 will produce out of range trim_value\n",
34190 +                       __func__);
34191 +               return -ENODATA;
34192 +       }
34194         gain_trim_val = (unsigned int)(((chip->als_settings.als_cal_target)
34195                         * chip->als_settings.als_gain_trim) / lux_val);
34196         if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {
34197 diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
34198 index 5d4ffd66032e..74ad5701c6c2 100644
34199 --- a/drivers/iio/magnetometer/Kconfig
34200 +++ b/drivers/iio/magnetometer/Kconfig
34201 @@ -95,7 +95,6 @@ config MAG3110
34202  config HID_SENSOR_MAGNETOMETER_3D
34203         depends on HID_SENSOR_HUB
34204         select IIO_BUFFER
34205 -       select IIO_TRIGGERED_BUFFER
34206         select HID_SENSOR_IIO_COMMON
34207         select HID_SENSOR_IIO_TRIGGER
34208         tristate "HID Magenetometer 3D"
34209 diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c
34210 index d46f23d82b3d..2f2f8cb3c26c 100644
34211 --- a/drivers/iio/magnetometer/yamaha-yas530.c
34212 +++ b/drivers/iio/magnetometer/yamaha-yas530.c
34213 @@ -32,13 +32,14 @@
34214  #include <linux/regmap.h>
34215  #include <linux/regulator/consumer.h>
34216  #include <linux/random.h>
34217 -#include <linux/unaligned/be_byteshift.h>
34219  #include <linux/iio/buffer.h>
34220  #include <linux/iio/iio.h>
34221  #include <linux/iio/trigger_consumer.h>
34222  #include <linux/iio/triggered_buffer.h>
34224 +#include <asm/unaligned.h>
34226  /* This register map covers YAS530 and YAS532 but differs in YAS 537 and YAS539 */
34227  #define YAS5XX_DEVICE_ID               0x80
34228  #define YAS5XX_ACTUATE_INIT_COIL       0x81
34229 @@ -887,6 +888,7 @@ static int yas5xx_probe(struct i2c_client *i2c,
34230                 strncpy(yas5xx->name, "yas532", sizeof(yas5xx->name));
34231                 break;
34232         default:
34233 +               ret = -ENODEV;
34234                 dev_err(dev, "unhandled device ID %02x\n", yas5xx->devid);
34235                 goto assert_reset;
34236         }
34237 diff --git a/drivers/iio/orientation/Kconfig b/drivers/iio/orientation/Kconfig
34238 index a505583cc2fd..396cbbb867f4 100644
34239 --- a/drivers/iio/orientation/Kconfig
34240 +++ b/drivers/iio/orientation/Kconfig
34241 @@ -9,7 +9,6 @@ menu "Inclinometer sensors"
34242  config HID_SENSOR_INCLINOMETER_3D
34243         depends on HID_SENSOR_HUB
34244         select IIO_BUFFER
34245 -       select IIO_TRIGGERED_BUFFER
34246         select HID_SENSOR_IIO_COMMON
34247         select HID_SENSOR_IIO_TRIGGER
34248         tristate "HID Inclinometer 3D"
34249 @@ -20,7 +19,6 @@ config HID_SENSOR_INCLINOMETER_3D
34250  config HID_SENSOR_DEVICE_ROTATION
34251         depends on HID_SENSOR_HUB
34252         select IIO_BUFFER
34253 -       select IIO_TRIGGERED_BUFFER
34254         select HID_SENSOR_IIO_COMMON
34255         select HID_SENSOR_IIO_TRIGGER
34256         tristate "HID Device Rotation"
34257 diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
34258 index 18e4ef060096..c087d8f72a54 100644
34259 --- a/drivers/iio/orientation/hid-sensor-rotation.c
34260 +++ b/drivers/iio/orientation/hid-sensor-rotation.c
34261 @@ -21,7 +21,7 @@ struct dev_rot_state {
34262         struct hid_sensor_common common_attributes;
34263         struct hid_sensor_hub_attribute_info quaternion;
34264         struct {
34265 -               u32 sampled_vals[4] __aligned(16);
34266 +               s32 sampled_vals[4] __aligned(16);
34267                 u64 timestamp __aligned(8);
34268         } scan;
34269         int scale_pre_decml;
34270 @@ -170,8 +170,15 @@ static int dev_rot_capture_sample(struct hid_sensor_hub_device *hsdev,
34271         struct dev_rot_state *rot_state = iio_priv(indio_dev);
34273         if (usage_id == HID_USAGE_SENSOR_ORIENT_QUATERNION) {
34274 -               memcpy(&rot_state->scan.sampled_vals, raw_data,
34275 -                      sizeof(rot_state->scan.sampled_vals));
34276 +               if (raw_len / 4 == sizeof(s16)) {
34277 +                       rot_state->scan.sampled_vals[0] = ((s16 *)raw_data)[0];
34278 +                       rot_state->scan.sampled_vals[1] = ((s16 *)raw_data)[1];
34279 +                       rot_state->scan.sampled_vals[2] = ((s16 *)raw_data)[2];
34280 +                       rot_state->scan.sampled_vals[3] = ((s16 *)raw_data)[3];
34281 +               } else {
34282 +                       memcpy(&rot_state->scan.sampled_vals, raw_data,
34283 +                              sizeof(rot_state->scan.sampled_vals));
34284 +               }
34286                 dev_dbg(&indio_dev->dev, "Recd Quat len:%zu::%zu\n", raw_len,
34287                         sizeof(rot_state->scan.sampled_vals));
34288 diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
34289 index 689b978db4f9..fc0d3cfca418 100644
34290 --- a/drivers/iio/pressure/Kconfig
34291 +++ b/drivers/iio/pressure/Kconfig
34292 @@ -79,7 +79,6 @@ config DPS310
34293  config HID_SENSOR_PRESS
34294         depends on HID_SENSOR_HUB
34295         select IIO_BUFFER
34296 -       select IIO_TRIGGERED_BUFFER
34297         select HID_SENSOR_IIO_COMMON
34298         select HID_SENSOR_IIO_TRIGGER
34299         tristate "HID PRESS"
34300 diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
34301 index c685f10b5ae4..cc206bfa09c7 100644
34302 --- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
34303 +++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
34304 @@ -160,6 +160,7 @@ static int lidar_get_measurement(struct lidar_data *data, u16 *reg)
34305         ret = lidar_write_control(data, LIDAR_REG_CONTROL_ACQUIRE);
34306         if (ret < 0) {
34307                 dev_err(&client->dev, "cannot send start measurement command");
34308 +               pm_runtime_put_noidle(&client->dev);
34309                 return ret;
34310         }
34312 diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
34313 index 37fd0b65a014..ea82cfaf7f42 100644
34314 --- a/drivers/iio/proximity/sx9310.c
34315 +++ b/drivers/iio/proximity/sx9310.c
34316 @@ -763,7 +763,11 @@ static int sx9310_write_far_debounce(struct sx9310_data *data, int val)
34317         int ret;
34318         unsigned int regval;
34320 -       val = ilog2(val);
34321 +       if (val > 0)
34322 +               val = ilog2(val);
34323 +       if (!FIELD_FIT(SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_MASK, val))
34324 +               return -EINVAL;
34326         regval = FIELD_PREP(SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_MASK, val);
34328         mutex_lock(&data->mutex);
34329 @@ -780,7 +784,11 @@ static int sx9310_write_close_debounce(struct sx9310_data *data, int val)
34330         int ret;
34331         unsigned int regval;
34333 -       val = ilog2(val);
34334 +       if (val > 0)
34335 +               val = ilog2(val);
34336 +       if (!FIELD_FIT(SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_MASK, val))
34337 +               return -EINVAL;
34339         regval = FIELD_PREP(SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_MASK, val);
34341         mutex_lock(&data->mutex);
34342 @@ -1213,17 +1221,17 @@ static int sx9310_init_compensation(struct iio_dev *indio_dev)
34345  static const struct sx9310_reg_default *
34346 -sx9310_get_default_reg(struct sx9310_data *data, int i,
34347 +sx9310_get_default_reg(struct sx9310_data *data, int idx,
34348                        struct sx9310_reg_default *reg_def)
34350 -       int ret;
34351         const struct device_node *np = data->client->dev.of_node;
34352 -       u32 combined[SX9310_NUM_CHANNELS] = { 4, 4, 4, 4 };
34353 +       u32 combined[SX9310_NUM_CHANNELS];
34354 +       u32 start = 0, raw = 0, pos = 0;
34355         unsigned long comb_mask = 0;
34356 +       int ret, i, count;
34357         const char *res;
34358 -       u32 start = 0, raw = 0, pos = 0;
34360 -       memcpy(reg_def, &sx9310_default_regs[i], sizeof(*reg_def));
34361 +       memcpy(reg_def, &sx9310_default_regs[idx], sizeof(*reg_def));
34362         if (!np)
34363                 return reg_def;
34365 @@ -1234,15 +1242,31 @@ sx9310_get_default_reg(struct sx9310_data *data, int i,
34366                         reg_def->def |= SX9310_REG_PROX_CTRL2_SHIELDEN_GROUND;
34367                 }
34369 -               reg_def->def &= ~SX9310_REG_PROX_CTRL2_COMBMODE_MASK;
34370 -               of_property_read_u32_array(np, "semtech,combined-sensors",
34371 -                                          combined, ARRAY_SIZE(combined));
34372 -               for (i = 0; i < ARRAY_SIZE(combined); i++) {
34373 -                       if (combined[i] <= SX9310_NUM_CHANNELS)
34374 -                               comb_mask |= BIT(combined[i]);
34375 +               count = of_property_count_elems_of_size(np, "semtech,combined-sensors",
34376 +                                                       sizeof(u32));
34377 +               if (count > 0 && count <= ARRAY_SIZE(combined)) {
34378 +                       ret = of_property_read_u32_array(np, "semtech,combined-sensors",
34379 +                                                        combined, count);
34380 +                       if (ret)
34381 +                               break;
34382 +               } else {
34383 +                       /*
34384 +                        * Either the property does not exist in the DT or the
34385 +                        * number of entries is incorrect.
34386 +                        */
34387 +                       break;
34388                 }
34389 +               for (i = 0; i < count; i++) {
34390 +                       if (combined[i] >= SX9310_NUM_CHANNELS) {
34391 +                               /* Invalid sensor (invalid DT). */
34392 +                               break;
34393 +                       }
34394 +                       comb_mask |= BIT(combined[i]);
34395 +               }
34396 +               if (i < count)
34397 +                       break;
34399 -               comb_mask &= 0xf;
34400 +               reg_def->def &= ~SX9310_REG_PROX_CTRL2_COMBMODE_MASK;
34401                 if (comb_mask == (BIT(3) | BIT(2) | BIT(1) | BIT(0)))
34402                         reg_def->def |= SX9310_REG_PROX_CTRL2_COMBMODE_CS0_CS1_CS2_CS3;
34403                 else if (comb_mask == (BIT(1) | BIT(2)))
34404 diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig
34405 index f1f2a1499c9e..4df60082c1fa 100644
34406 --- a/drivers/iio/temperature/Kconfig
34407 +++ b/drivers/iio/temperature/Kconfig
34408 @@ -45,7 +45,6 @@ config HID_SENSOR_TEMP
34409         tristate "HID Environmental temperature sensor"
34410         depends on HID_SENSOR_HUB
34411         select IIO_BUFFER
34412 -       select IIO_TRIGGERED_BUFFER
34413         select HID_SENSOR_IIO_COMMON
34414         select HID_SENSOR_IIO_TRIGGER
34415         help
34416 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
34417 index 3d194bb60840..6adbaea358ae 100644
34418 --- a/drivers/infiniband/core/cm.c
34419 +++ b/drivers/infiniband/core/cm.c
34420 @@ -2138,7 +2138,8 @@ static int cm_req_handler(struct cm_work *work)
34421                 goto destroy;
34422         }
34424 -       cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
34425 +       if (cm_id_priv->av.ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE)
34426 +               cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
34428         memset(&work->path[0], 0, sizeof(work->path[0]));
34429         if (cm_req_has_alt_path(req_msg))
34430 diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
34431 index 94096511599f..5b9022a8c9ec 100644
34432 --- a/drivers/infiniband/core/cma.c
34433 +++ b/drivers/infiniband/core/cma.c
34434 @@ -463,7 +463,6 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
34435         id_priv->id.route.addr.dev_addr.transport =
34436                 rdma_node_get_transport(cma_dev->device->node_type);
34437         list_add_tail(&id_priv->list, &cma_dev->id_list);
34438 -       rdma_restrack_add(&id_priv->res);
34440         trace_cm_id_attach(id_priv, cma_dev->device);
34442 @@ -483,6 +482,7 @@ static void cma_release_dev(struct rdma_id_private *id_priv)
34443         list_del(&id_priv->list);
34444         cma_dev_put(id_priv->cma_dev);
34445         id_priv->cma_dev = NULL;
34446 +       id_priv->id.device = NULL;
34447         if (id_priv->id.route.addr.dev_addr.sgid_attr) {
34448                 rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
34449                 id_priv->id.route.addr.dev_addr.sgid_attr = NULL;
34450 @@ -700,6 +700,7 @@ static int cma_ib_acquire_dev(struct rdma_id_private *id_priv,
34451         mutex_lock(&lock);
34452         cma_attach_to_dev(id_priv, listen_id_priv->cma_dev);
34453         mutex_unlock(&lock);
34454 +       rdma_restrack_add(&id_priv->res);
34455         return 0;
34458 @@ -754,8 +755,10 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
34459         }
34461  out:
34462 -       if (!ret)
34463 +       if (!ret) {
34464                 cma_attach_to_dev(id_priv, cma_dev);
34465 +               rdma_restrack_add(&id_priv->res);
34466 +       }
34468         mutex_unlock(&lock);
34469         return ret;
34470 @@ -816,6 +819,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
34472  found:
34473         cma_attach_to_dev(id_priv, cma_dev);
34474 +       rdma_restrack_add(&id_priv->res);
34475         mutex_unlock(&lock);
34476         addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
34477         memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
34478 @@ -1861,6 +1865,7 @@ static void _destroy_id(struct rdma_id_private *id_priv,
34479                                 iw_destroy_cm_id(id_priv->cm_id.iw);
34480                 }
34481                 cma_leave_mc_groups(id_priv);
34482 +               rdma_restrack_del(&id_priv->res);
34483                 cma_release_dev(id_priv);
34484         }
34486 @@ -1874,7 +1879,6 @@ static void _destroy_id(struct rdma_id_private *id_priv,
34487         kfree(id_priv->id.route.path_rec);
34489         put_net(id_priv->id.route.addr.dev_addr.net);
34490 -       rdma_restrack_del(&id_priv->res);
34491         kfree(id_priv);
34494 @@ -2529,6 +2533,7 @@ static int cma_listen_on_dev(struct rdma_id_private *id_priv,
34495                rdma_addr_size(cma_src_addr(id_priv)));
34497         _cma_attach_to_dev(dev_id_priv, cma_dev);
34498 +       rdma_restrack_add(&dev_id_priv->res);
34499         cma_id_get(id_priv);
34500         dev_id_priv->internal_id = 1;
34501         dev_id_priv->afonly = id_priv->afonly;
34502 @@ -3169,6 +3174,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
34503         ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
34504         id_priv->id.port_num = p;
34505         cma_attach_to_dev(id_priv, cma_dev);
34506 +       rdma_restrack_add(&id_priv->res);
34507         cma_set_loopback(cma_src_addr(id_priv));
34508  out:
34509         mutex_unlock(&lock);
34510 @@ -3201,6 +3207,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
34511                 if (status)
34512                         pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
34513                                              status);
34514 +               rdma_restrack_add(&id_priv->res);
34515         } else if (status) {
34516                 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
34517         }
34518 @@ -3734,7 +3741,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
34519         }
34521         id_priv->backlog = backlog;
34522 -       if (id->device) {
34523 +       if (id_priv->cma_dev) {
34524                 if (rdma_cap_ib_cm(id->device, 1)) {
34525                         ret = cma_ib_listen(id_priv);
34526                         if (ret)
34527 @@ -3812,6 +3819,8 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
34528         if (ret)
34529                 goto err2;
34531 +       if (!cma_any_addr(addr))
34532 +               rdma_restrack_add(&id_priv->res);
34533         return 0;
34534  err2:
34535         if (id_priv->cma_dev)
34536 diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c
34537 index 9ec6971056fa..049684880ae0 100644
34538 --- a/drivers/infiniband/core/uverbs_std_types_device.c
34539 +++ b/drivers/infiniband/core/uverbs_std_types_device.c
34540 @@ -117,8 +117,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_INFO_HANDLES)(
34541                 return ret;
34543         uapi_object = uapi_get_object(attrs->ufile->device->uapi, object_id);
34544 -       if (!uapi_object)
34545 -               return -EINVAL;
34546 +       if (IS_ERR(uapi_object))
34547 +               return PTR_ERR(uapi_object);
34549         handles = gather_objects_handle(attrs->ufile, uapi_object, attrs,
34550                                         out_len, &total);
34551 @@ -331,6 +331,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_TABLE)(
34552         if (ret)
34553                 return ret;
34555 +       if (!user_entry_size)
34556 +               return -EINVAL;
34558         max_entries = uverbs_attr_ptr_get_array_size(
34559                 attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
34560                 user_entry_size);
34561 diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
34562 index 995d4633b0a1..d4d4959c2434 100644
34563 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
34564 +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
34565 @@ -2784,6 +2784,7 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
34566                 dev_err(&cq->hwq.pdev->dev,
34567                         "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
34568                         cqe_cons, rq->max_wqe);
34569 +               rc = -EINVAL;
34570                 goto done;
34571         }
34573 diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
34574 index fa7878336100..3ca47004b752 100644
34575 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
34576 +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
34577 @@ -854,6 +854,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res     *res,
34579  unmap_io:
34580         pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
34581 +       dpit->dbr_bar_reg_iomem = NULL;
34582         return -ENOMEM;
34585 diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
34586 index 5c95c789f302..e800e8e8bed5 100644
34587 --- a/drivers/infiniband/hw/cxgb4/resource.c
34588 +++ b/drivers/infiniband/hw/cxgb4/resource.c
34589 @@ -216,7 +216,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
34590                         goto out;
34591                 entry->qid = qid;
34592                 list_add_tail(&entry->entry, &uctx->cqids);
34593 -               for (i = qid; i & rdev->qpmask; i++) {
34594 +               for (i = qid + 1; i & rdev->qpmask; i++) {
34595                         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
34596                         if (!entry)
34597                                 goto out;
34598 diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
34599 index 0e83d4b61e46..2cf102b5abd4 100644
34600 --- a/drivers/infiniband/hw/hfi1/firmware.c
34601 +++ b/drivers/infiniband/hw/hfi1/firmware.c
34602 @@ -1916,6 +1916,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
34603                         dd_dev_err(dd, "%s: Failed CRC check at offset %ld\n",
34604                                    __func__, (ptr -
34605                                    (u32 *)dd->platform_config.data));
34606 +                       ret = -EINVAL;
34607                         goto bail;
34608                 }
34609                 /* Jump the CRC DWORD */
34610 diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h
34611 index f650cac9d424..d30c23b6527a 100644
34612 --- a/drivers/infiniband/hw/hfi1/ipoib.h
34613 +++ b/drivers/infiniband/hw/hfi1/ipoib.h
34614 @@ -52,8 +52,9 @@ union hfi1_ipoib_flow {
34615   * @producer_lock: producer sync lock
34616   * @consumer_lock: consumer sync lock
34617   */
34618 +struct ipoib_txreq;
34619  struct hfi1_ipoib_circ_buf {
34620 -       void **items;
34621 +       struct ipoib_txreq **items;
34622         unsigned long head;
34623         unsigned long tail;
34624         unsigned long max_items;
34625 diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
34626 index edd4eeac8dd1..cdc26ee3cf52 100644
34627 --- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
34628 +++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
34629 @@ -702,14 +702,14 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
34631         priv->tx_napis = kcalloc_node(dev->num_tx_queues,
34632                                       sizeof(struct napi_struct),
34633 -                                     GFP_ATOMIC,
34634 +                                     GFP_KERNEL,
34635                                       priv->dd->node);
34636         if (!priv->tx_napis)
34637                 goto free_txreq_cache;
34639         priv->txqs = kcalloc_node(dev->num_tx_queues,
34640                                   sizeof(struct hfi1_ipoib_txq),
34641 -                                 GFP_ATOMIC,
34642 +                                 GFP_KERNEL,
34643                                   priv->dd->node);
34644         if (!priv->txqs)
34645                 goto free_tx_napis;
34646 @@ -741,9 +741,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
34647                                              priv->dd->node);
34649                 txq->tx_ring.items =
34650 -                       vzalloc_node(array_size(tx_ring_size,
34651 -                                               sizeof(struct ipoib_txreq)),
34652 -                                    priv->dd->node);
34653 +                       kcalloc_node(tx_ring_size,
34654 +                                    sizeof(struct ipoib_txreq *),
34655 +                                    GFP_KERNEL, priv->dd->node);
34656                 if (!txq->tx_ring.items)
34657                         goto free_txqs;
34659 @@ -764,7 +764,7 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
34660                 struct hfi1_ipoib_txq *txq = &priv->txqs[i];
34662                 netif_napi_del(txq->napi);
34663 -               vfree(txq->tx_ring.items);
34664 +               kfree(txq->tx_ring.items);
34665         }
34667         kfree(priv->txqs);
34668 @@ -817,7 +817,7 @@ void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
34669                 hfi1_ipoib_drain_tx_list(txq);
34670                 netif_napi_del(txq->napi);
34671                 (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
34672 -               vfree(txq->tx_ring.items);
34673 +               kfree(txq->tx_ring.items);
34674         }
34676         kfree(priv->txqs);
34677 diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
34678 index f3fb28e3d5d7..d213f65d4cdd 100644
34679 --- a/drivers/infiniband/hw/hfi1/mmu_rb.c
34680 +++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
34681 @@ -89,7 +89,7 @@ int hfi1_mmu_rb_register(void *ops_arg,
34682         struct mmu_rb_handler *h;
34683         int ret;
34685 -       h = kmalloc(sizeof(*h), GFP_KERNEL);
34686 +       h = kzalloc(sizeof(*h), GFP_KERNEL);
34687         if (!h)
34688                 return -ENOMEM;
34690 diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
34691 index ce26f97b2ca2..ad3cee54140e 100644
34692 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
34693 +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
34694 @@ -5068,6 +5068,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
34695         qp_attr->cur_qp_state = qp_attr->qp_state;
34696         qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
34697         qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
34698 +       qp_attr->cap.max_inline_data = hr_qp->max_inline_data;
34700         if (!ibqp->uobject) {
34701                 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
34702 diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c
34703 index 53e5cd1a2bd6..146a4148219b 100644
34704 --- a/drivers/infiniband/hw/i40iw/i40iw_pble.c
34705 +++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c
34706 @@ -393,12 +393,9 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
34707         i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n",
34708                     pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
34709         pble_rsrc->unallocated_pble -= (chunk->size >> 3);
34710 -       list_add(&chunk->list, &pble_rsrc->pinfo.clist);
34711         sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ?
34712                         sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa;
34713 -       if (sd_entry->valid)
34714 -               return 0;
34715 -       if (dev->is_pf) {
34716 +       if (dev->is_pf && !sd_entry->valid) {
34717                 ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id,
34718                                             sd_reg_val, idx->sd_idx,
34719                                             sd_entry->entry_type, true);
34720 @@ -409,6 +406,7 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
34721         }
34723         sd_entry->valid = true;
34724 +       list_add(&chunk->list, &pble_rsrc->pinfo.clist);
34725         return 0;
34726   error:
34727         kfree(chunk);
34728 diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
34729 index 07b8350929cd..81276b4247f8 100644
34730 --- a/drivers/infiniband/hw/mlx5/devx.c
34731 +++ b/drivers/infiniband/hw/mlx5/devx.c
34732 @@ -630,9 +630,8 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
34733         case UVERBS_OBJECT_QP:
34734         {
34735                 struct mlx5_ib_qp *qp = to_mqp(uobj->object);
34736 -               enum ib_qp_type qp_type = qp->ibqp.qp_type;
34738 -               if (qp_type == IB_QPT_RAW_PACKET ||
34739 +               if (qp->type == IB_QPT_RAW_PACKET ||
34740                     (qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
34741                         struct mlx5_ib_raw_packet_qp *raw_packet_qp =
34742                                                          &qp->raw_packet_qp;
34743 @@ -649,10 +648,9 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
34744                                                sq->tisn) == obj_id);
34745                 }
34747 -               if (qp_type == MLX5_IB_QPT_DCT)
34748 +               if (qp->type == MLX5_IB_QPT_DCT)
34749                         return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
34750                                               qp->dct.mdct.mqp.qpn) == obj_id;
34752                 return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
34753                                       qp->ibqp.qp_num) == obj_id;
34754         }
34755 diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
34756 index 25da0b05b4e2..f0af3f1ae039 100644
34757 --- a/drivers/infiniband/hw/mlx5/fs.c
34758 +++ b/drivers/infiniband/hw/mlx5/fs.c
34759 @@ -1528,8 +1528,8 @@ static struct mlx5_ib_flow_handler *raw_fs_rule_add(
34760                 dst_num++;
34761         }
34763 -       handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher,
34764 -                                       flow_context, flow_act,
34765 +       handler = _create_raw_flow_rule(dev, ft_prio, dst_num ? dst : NULL,
34766 +                                       fs_matcher, flow_context, flow_act,
34767                                         cmd_in, inlen, dst_num);
34769         if (IS_ERR(handler)) {
34770 @@ -1885,8 +1885,9 @@ static int get_dests(struct uverbs_attr_bundle *attrs,
34771                 else
34772                         *dest_id = mqp->raw_packet_qp.rq.tirn;
34773                 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
34774 -       } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
34775 -                  fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
34776 +       } else if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
34777 +                   fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) &&
34778 +                  !(*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)) {
34779                 *dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
34780         }
34782 diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
34783 index 0d69a697d75f..59ffbbdda317 100644
34784 --- a/drivers/infiniband/hw/mlx5/main.c
34785 +++ b/drivers/infiniband/hw/mlx5/main.c
34786 @@ -499,7 +499,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
34787         translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
34788                                  &props->active_width, ext);
34790 -       if (!dev->is_rep && mlx5_is_roce_enabled(mdev)) {
34791 +       if (!dev->is_rep && dev->mdev->roce.roce_en) {
34792                 u16 qkey_viol_cntr;
34794                 props->port_cap_flags |= IB_PORT_CM_SUP;
34795 @@ -4174,7 +4174,7 @@ static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
34797                 /* Register only for native ports */
34798                 err = mlx5_add_netdev_notifier(dev, port_num);
34799 -               if (err || dev->is_rep || !mlx5_is_roce_enabled(mdev))
34800 +               if (err || dev->is_rep || !mlx5_is_roce_init_enabled(mdev))
34801                         /*
34802                          * We don't enable ETH interface for
34803                          * 1. IB representors
34804 @@ -4655,6 +4655,7 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev,
34806                 if (bound) {
34807                         rdma_roce_rescan_device(&dev->ib_dev);
34808 +                       mpi->ibdev->ib_active = true;
34809                         break;
34810                 }
34811         }
34812 @@ -4711,7 +4712,7 @@ static int mlx5r_probe(struct auxiliary_device *adev,
34813         dev->mdev = mdev;
34814         dev->num_ports = num_ports;
34816 -       if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_enabled(mdev))
34817 +       if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_init_enabled(mdev))
34818                 profile = &raw_eth_profile;
34819         else
34820                 profile = &pf_profile;
34821 diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
34822 index 88cc26e008fc..b085c02b53d0 100644
34823 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
34824 +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
34825 @@ -547,11 +547,6 @@ static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
34826         return container_of(wr, struct mlx5_umr_wr, wr);
34829 -struct mlx5_shared_mr_info {
34830 -       int mr_id;
34831 -       struct ib_umem          *umem;
34834  enum mlx5_ib_cq_pr_flags {
34835         MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0,
34836  };
34837 @@ -654,47 +649,69 @@ struct mlx5_ib_dm {
34838         atomic64_add(value, &((mr)->odp_stats.counter_name))
34840  struct mlx5_ib_mr {
34841 -       struct ib_mr            ibmr;
34842 -       void                    *descs;
34843 -       dma_addr_t              desc_map;
34844 -       int                     ndescs;
34845 -       int                     data_length;
34846 -       int                     meta_ndescs;
34847 -       int                     meta_length;
34848 -       int                     max_descs;
34849 -       int                     desc_size;
34850 -       int                     access_mode;
34851 -       unsigned int            page_shift;
34852 -       struct mlx5_core_mkey   mmkey;
34853 -       struct ib_umem         *umem;
34854 -       struct mlx5_shared_mr_info      *smr_info;
34855 -       struct list_head        list;
34856 -       struct mlx5_cache_ent  *cache_ent;
34857 -       u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
34858 -       struct mlx5_core_sig_ctx    *sig;
34859 -       void                    *descs_alloc;
34860 -       int                     access_flags; /* Needed for rereg MR */
34862 -       struct mlx5_ib_mr      *parent;
34863 -       /* Needed for IB_MR_TYPE_INTEGRITY */
34864 -       struct mlx5_ib_mr      *pi_mr;
34865 -       struct mlx5_ib_mr      *klm_mr;
34866 -       struct mlx5_ib_mr      *mtt_mr;
34867 -       u64                     data_iova;
34868 -       u64                     pi_iova;
34870 -       /* For ODP and implicit */
34871 -       struct xarray           implicit_children;
34872 -       union {
34873 -               struct list_head elm;
34874 -               struct work_struct work;
34875 -       } odp_destroy;
34876 -       struct ib_odp_counters  odp_stats;
34877 -       bool                    is_odp_implicit;
34878 +       struct ib_mr ibmr;
34879 +       struct mlx5_core_mkey mmkey;
34881 -       struct mlx5_async_work  cb_work;
34882 +       /* User MR data */
34883 +       struct mlx5_cache_ent *cache_ent;
34884 +       struct ib_umem *umem;
34886 +       /* This is zero'd when the MR is allocated */
34887 +       struct {
34888 +               /* Used only while the MR is in the cache */
34889 +               struct {
34890 +                       u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
34891 +                       struct mlx5_async_work cb_work;
34892 +                       /* Cache list element */
34893 +                       struct list_head list;
34894 +               };
34896 +               /* Used only by kernel MRs (umem == NULL) */
34897 +               struct {
34898 +                       void *descs;
34899 +                       void *descs_alloc;
34900 +                       dma_addr_t desc_map;
34901 +                       int max_descs;
34902 +                       int ndescs;
34903 +                       int desc_size;
34904 +                       int access_mode;
34906 +                       /* For Kernel IB_MR_TYPE_INTEGRITY */
34907 +                       struct mlx5_core_sig_ctx *sig;
34908 +                       struct mlx5_ib_mr *pi_mr;
34909 +                       struct mlx5_ib_mr *klm_mr;
34910 +                       struct mlx5_ib_mr *mtt_mr;
34911 +                       u64 data_iova;
34912 +                       u64 pi_iova;
34913 +                       int meta_ndescs;
34914 +                       int meta_length;
34915 +                       int data_length;
34916 +               };
34918 +               /* Used only by User MRs (umem != NULL) */
34919 +               struct {
34920 +                       unsigned int page_shift;
34921 +                       /* Current access_flags */
34922 +                       int access_flags;
34924 +                       /* For User ODP */
34925 +                       struct mlx5_ib_mr *parent;
34926 +                       struct xarray implicit_children;
34927 +                       union {
34928 +                               struct work_struct work;
34929 +                       } odp_destroy;
34930 +                       struct ib_odp_counters odp_stats;
34931 +                       bool is_odp_implicit;
34932 +               };
34933 +       };
34934  };
34936 +/* Zero the fields in the mr that are variant depending on usage */
34937 +static inline void mlx5_clear_mr(struct mlx5_ib_mr *mr)
34939 +       memset(mr->out, 0, sizeof(*mr) - offsetof(struct mlx5_ib_mr, out));
34942  static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
34944         return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
34945 diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
34946 index db05b0e0a8d7..ea8f068a6da3 100644
34947 --- a/drivers/infiniband/hw/mlx5/mr.c
34948 +++ b/drivers/infiniband/hw/mlx5/mr.c
34949 @@ -590,6 +590,8 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
34950                 ent->available_mrs--;
34951                 queue_adjust_cache_locked(ent);
34952                 spin_unlock_irq(&ent->lock);
34954 +               mlx5_clear_mr(mr);
34955         }
34956         mr->access_flags = access_flags;
34957         return mr;
34958 @@ -615,16 +617,14 @@ static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent)
34959                         ent->available_mrs--;
34960                         queue_adjust_cache_locked(ent);
34961                         spin_unlock_irq(&ent->lock);
34962 -                       break;
34963 +                       mlx5_clear_mr(mr);
34964 +                       return mr;
34965                 }
34966                 queue_adjust_cache_locked(ent);
34967                 spin_unlock_irq(&ent->lock);
34968         }
34970 -       if (!mr)
34971 -               req_ent->miss++;
34973 -       return mr;
34974 +       req_ent->miss++;
34975 +       return NULL;
34978  static void detach_mr_from_cache(struct mlx5_ib_mr *mr)
34979 @@ -993,8 +993,6 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
34981         mr->ibmr.pd = pd;
34982         mr->umem = umem;
34983 -       mr->access_flags = access_flags;
34984 -       mr->desc_size = sizeof(struct mlx5_mtt);
34985         mr->mmkey.iova = iova;
34986         mr->mmkey.size = umem->length;
34987         mr->mmkey.pd = to_mpd(pd)->pdn;
34988 diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
34989 index b103555b1f5d..d98755e78362 100644
34990 --- a/drivers/infiniband/hw/mlx5/odp.c
34991 +++ b/drivers/infiniband/hw/mlx5/odp.c
34992 @@ -227,7 +227,6 @@ static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt)
34994         dma_fence_odp_mr(mr);
34996 -       mr->parent = NULL;
34997         mlx5_mr_cache_free(mr_to_mdev(mr), mr);
34998         ib_umem_odp_release(odp);
35000 diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
35001 index f5a52a6fae43..843f9e7fe96f 100644
35002 --- a/drivers/infiniband/hw/mlx5/qp.c
35003 +++ b/drivers/infiniband/hw/mlx5/qp.c
35004 @@ -3146,6 +3146,19 @@ enum {
35005         MLX5_PATH_FLAG_COUNTER  = 1 << 2,
35006  };
35008 +static int mlx5_to_ib_rate_map(u8 rate)
35010 +       static const int rates[] = { IB_RATE_PORT_CURRENT, IB_RATE_56_GBPS,
35011 +                                    IB_RATE_25_GBPS,      IB_RATE_100_GBPS,
35012 +                                    IB_RATE_200_GBPS,     IB_RATE_50_GBPS,
35013 +                                    IB_RATE_400_GBPS };
35015 +       if (rate < ARRAY_SIZE(rates))
35016 +               return rates[rate];
35018 +       return rate - MLX5_STAT_RATE_OFFSET;
35021  static int ib_to_mlx5_rate_map(u8 rate)
35023         switch (rate) {
35024 @@ -4485,7 +4498,7 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
35025         rdma_ah_set_path_bits(ah_attr, MLX5_GET(ads, path, mlid));
35027         static_rate = MLX5_GET(ads, path, stat_rate);
35028 -       rdma_ah_set_static_rate(ah_attr, static_rate ? static_rate - 5 : 0);
35029 +       rdma_ah_set_static_rate(ah_attr, mlx5_to_ib_rate_map(static_rate));
35030         if (MLX5_GET(ads, path, grh) ||
35031             ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
35032                 rdma_ah_set_grh(ah_attr, NULL, MLX5_GET(ads, path, flow_label),
35033 diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
35034 index c4bc58736e48..1715fbe0719d 100644
35035 --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
35036 +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
35037 @@ -636,8 +636,10 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35038         memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN);
35040         if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
35041 -                            &qp->iwarp_cm_flags))
35042 +                            &qp->iwarp_cm_flags)) {
35043 +               rc = -ENODEV;
35044                 goto err; /* QP already being destroyed */
35045 +       }
35047         rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
35048         if (rc) {
35049 diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
35050 index df0d173d6acb..da2e867a1ed9 100644
35051 --- a/drivers/infiniband/sw/rxe/rxe_av.c
35052 +++ b/drivers/infiniband/sw/rxe/rxe_av.c
35053 @@ -88,7 +88,7 @@ void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr)
35054                 type = RXE_NETWORK_TYPE_IPV4;
35055                 break;
35056         case RDMA_NETWORK_IPV6:
35057 -               type = RXE_NETWORK_TYPE_IPV4;
35058 +               type = RXE_NETWORK_TYPE_IPV6;
35059                 break;
35060         default:
35061                 /* not reached - checked in rxe_av_chk_attr */
35062 diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
35063 index 17a361b8dbb1..06b556169867 100644
35064 --- a/drivers/infiniband/sw/rxe/rxe_comp.c
35065 +++ b/drivers/infiniband/sw/rxe/rxe_comp.c
35066 @@ -345,14 +345,16 @@ static inline enum comp_state do_read(struct rxe_qp *qp,
35068         ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
35069                         &wqe->dma, payload_addr(pkt),
35070 -                       payload_size(pkt), to_mem_obj, NULL);
35071 -       if (ret)
35072 +                       payload_size(pkt), to_mr_obj, NULL);
35073 +       if (ret) {
35074 +               wqe->status = IB_WC_LOC_PROT_ERR;
35075                 return COMPST_ERROR;
35076 +       }
35078         if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
35079                 return COMPST_COMP_ACK;
35080 -       else
35081 -               return COMPST_UPDATE_COMP;
35083 +       return COMPST_UPDATE_COMP;
35086  static inline enum comp_state do_atomic(struct rxe_qp *qp,
35087 @@ -365,11 +367,13 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp,
35089         ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
35090                         &wqe->dma, &atomic_orig,
35091 -                       sizeof(u64), to_mem_obj, NULL);
35092 -       if (ret)
35093 +                       sizeof(u64), to_mr_obj, NULL);
35094 +       if (ret) {
35095 +               wqe->status = IB_WC_LOC_PROT_ERR;
35096                 return COMPST_ERROR;
35097 -       else
35098 -               return COMPST_COMP_ACK;
35099 +       }
35101 +       return COMPST_COMP_ACK;
35104  static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
35105 diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
35106 index 0d758760b9ae..08e21fa9ec97 100644
35107 --- a/drivers/infiniband/sw/rxe/rxe_loc.h
35108 +++ b/drivers/infiniband/sw/rxe/rxe_loc.h
35109 @@ -72,40 +72,37 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
35111  /* rxe_mr.c */
35112  enum copy_direction {
35113 -       to_mem_obj,
35114 -       from_mem_obj,
35115 +       to_mr_obj,
35116 +       from_mr_obj,
35117  };
35119 -void rxe_mem_init_dma(struct rxe_pd *pd,
35120 -                     int access, struct rxe_mem *mem);
35121 +void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr);
35123 -int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
35124 -                     u64 length, u64 iova, int access, struct ib_udata *udata,
35125 -                     struct rxe_mem *mr);
35126 +int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
35127 +                    int access, struct ib_udata *udata, struct rxe_mr *mr);
35129 -int rxe_mem_init_fast(struct rxe_pd *pd,
35130 -                     int max_pages, struct rxe_mem *mem);
35131 +int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr);
35133 -int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr,
35134 -                int length, enum copy_direction dir, u32 *crcp);
35135 +int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
35136 +               enum copy_direction dir, u32 *crcp);
35138  int copy_data(struct rxe_pd *pd, int access,
35139               struct rxe_dma_info *dma, void *addr, int length,
35140               enum copy_direction dir, u32 *crcp);
35142 -void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length);
35143 +void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length);
35145  enum lookup_type {
35146         lookup_local,
35147         lookup_remote,
35148  };
35150 -struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
35151 -                          enum lookup_type type);
35152 +struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
35153 +                        enum lookup_type type);
35155 -int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length);
35156 +int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
35158 -void rxe_mem_cleanup(struct rxe_pool_entry *arg);
35159 +void rxe_mr_cleanup(struct rxe_pool_entry *arg);
35161  int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
35163 diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
35164 index 6e8c41567ba0..9f63947bab12 100644
35165 --- a/drivers/infiniband/sw/rxe/rxe_mr.c
35166 +++ b/drivers/infiniband/sw/rxe/rxe_mr.c
35167 @@ -24,16 +24,15 @@ static u8 rxe_get_key(void)
35168         return key;
35171 -int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
35172 +int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
35174 -       switch (mem->type) {
35175 -       case RXE_MEM_TYPE_DMA:
35176 +       switch (mr->type) {
35177 +       case RXE_MR_TYPE_DMA:
35178                 return 0;
35180 -       case RXE_MEM_TYPE_MR:
35181 -               if (iova < mem->iova ||
35182 -                   length > mem->length ||
35183 -                   iova > mem->iova + mem->length - length)
35184 +       case RXE_MR_TYPE_MR:
35185 +               if (iova < mr->iova || length > mr->length ||
35186 +                   iova > mr->iova + mr->length - length)
35187                         return -EFAULT;
35188                 return 0;
35190 @@ -46,85 +45,83 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
35191                                 | IB_ACCESS_REMOTE_WRITE        \
35192                                 | IB_ACCESS_REMOTE_ATOMIC)
35194 -static void rxe_mem_init(int access, struct rxe_mem *mem)
35195 +static void rxe_mr_init(int access, struct rxe_mr *mr)
35197 -       u32 lkey = mem->pelem.index << 8 | rxe_get_key();
35198 +       u32 lkey = mr->pelem.index << 8 | rxe_get_key();
35199         u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
35201 -       mem->ibmr.lkey          = lkey;
35202 -       mem->ibmr.rkey          = rkey;
35203 -       mem->state              = RXE_MEM_STATE_INVALID;
35204 -       mem->type               = RXE_MEM_TYPE_NONE;
35205 -       mem->map_shift          = ilog2(RXE_BUF_PER_MAP);
35206 +       mr->ibmr.lkey = lkey;
35207 +       mr->ibmr.rkey = rkey;
35208 +       mr->state = RXE_MR_STATE_INVALID;
35209 +       mr->type = RXE_MR_TYPE_NONE;
35210 +       mr->map_shift = ilog2(RXE_BUF_PER_MAP);
35213 -void rxe_mem_cleanup(struct rxe_pool_entry *arg)
35214 +void rxe_mr_cleanup(struct rxe_pool_entry *arg)
35216 -       struct rxe_mem *mem = container_of(arg, typeof(*mem), pelem);
35217 +       struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem);
35218         int i;
35220 -       ib_umem_release(mem->umem);
35221 +       ib_umem_release(mr->umem);
35223 -       if (mem->map) {
35224 -               for (i = 0; i < mem->num_map; i++)
35225 -                       kfree(mem->map[i]);
35226 +       if (mr->map) {
35227 +               for (i = 0; i < mr->num_map; i++)
35228 +                       kfree(mr->map[i]);
35230 -               kfree(mem->map);
35231 +               kfree(mr->map);
35232         }
35235 -static int rxe_mem_alloc(struct rxe_mem *mem, int num_buf)
35236 +static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
35238         int i;
35239         int num_map;
35240 -       struct rxe_map **map = mem->map;
35241 +       struct rxe_map **map = mr->map;
35243         num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
35245 -       mem->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
35246 -       if (!mem->map)
35247 +       mr->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
35248 +       if (!mr->map)
35249                 goto err1;
35251         for (i = 0; i < num_map; i++) {
35252 -               mem->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
35253 -               if (!mem->map[i])
35254 +               mr->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
35255 +               if (!mr->map[i])
35256                         goto err2;
35257         }
35259         BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP));
35261 -       mem->map_shift  = ilog2(RXE_BUF_PER_MAP);
35262 -       mem->map_mask   = RXE_BUF_PER_MAP - 1;
35263 +       mr->map_shift = ilog2(RXE_BUF_PER_MAP);
35264 +       mr->map_mask = RXE_BUF_PER_MAP - 1;
35266 -       mem->num_buf = num_buf;
35267 -       mem->num_map = num_map;
35268 -       mem->max_buf = num_map * RXE_BUF_PER_MAP;
35269 +       mr->num_buf = num_buf;
35270 +       mr->num_map = num_map;
35271 +       mr->max_buf = num_map * RXE_BUF_PER_MAP;
35273         return 0;
35275  err2:
35276         for (i--; i >= 0; i--)
35277 -               kfree(mem->map[i]);
35278 +               kfree(mr->map[i]);
35280 -       kfree(mem->map);
35281 +       kfree(mr->map);
35282  err1:
35283         return -ENOMEM;
35286 -void rxe_mem_init_dma(struct rxe_pd *pd,
35287 -                     int access, struct rxe_mem *mem)
35288 +void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
35290 -       rxe_mem_init(access, mem);
35291 +       rxe_mr_init(access, mr);
35293 -       mem->ibmr.pd            = &pd->ibpd;
35294 -       mem->access             = access;
35295 -       mem->state              = RXE_MEM_STATE_VALID;
35296 -       mem->type               = RXE_MEM_TYPE_DMA;
35297 +       mr->ibmr.pd = &pd->ibpd;
35298 +       mr->access = access;
35299 +       mr->state = RXE_MR_STATE_VALID;
35300 +       mr->type = RXE_MR_TYPE_DMA;
35303 -int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
35304 -                     u64 length, u64 iova, int access, struct ib_udata *udata,
35305 -                     struct rxe_mem *mem)
35306 +int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
35307 +                    int access, struct ib_udata *udata, struct rxe_mr *mr)
35309         struct rxe_map          **map;
35310         struct rxe_phys_buf     *buf = NULL;
35311 @@ -142,23 +139,23 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
35312                 goto err1;
35313         }
35315 -       mem->umem = umem;
35316 +       mr->umem = umem;
35317         num_buf = ib_umem_num_pages(umem);
35319 -       rxe_mem_init(access, mem);
35320 +       rxe_mr_init(access, mr);
35322 -       err = rxe_mem_alloc(mem, num_buf);
35323 +       err = rxe_mr_alloc(mr, num_buf);
35324         if (err) {
35325 -               pr_warn("err %d from rxe_mem_alloc\n", err);
35326 +               pr_warn("err %d from rxe_mr_alloc\n", err);
35327                 ib_umem_release(umem);
35328                 goto err1;
35329         }
35331 -       mem->page_shift         = PAGE_SHIFT;
35332 -       mem->page_mask = PAGE_SIZE - 1;
35333 +       mr->page_shift = PAGE_SHIFT;
35334 +       mr->page_mask = PAGE_SIZE - 1;
35336         num_buf                 = 0;
35337 -       map                     = mem->map;
35338 +       map = mr->map;
35339         if (length > 0) {
35340                 buf = map[0]->buf;
35342 @@ -185,15 +182,15 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
35343                 }
35344         }
35346 -       mem->ibmr.pd            = &pd->ibpd;
35347 -       mem->umem               = umem;
35348 -       mem->access             = access;
35349 -       mem->length             = length;
35350 -       mem->iova               = iova;
35351 -       mem->va                 = start;
35352 -       mem->offset             = ib_umem_offset(umem);
35353 -       mem->state              = RXE_MEM_STATE_VALID;
35354 -       mem->type               = RXE_MEM_TYPE_MR;
35355 +       mr->ibmr.pd = &pd->ibpd;
35356 +       mr->umem = umem;
35357 +       mr->access = access;
35358 +       mr->length = length;
35359 +       mr->iova = iova;
35360 +       mr->va = start;
35361 +       mr->offset = ib_umem_offset(umem);
35362 +       mr->state = RXE_MR_STATE_VALID;
35363 +       mr->type = RXE_MR_TYPE_MR;
35365         return 0;
35367 @@ -201,24 +198,23 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
35368         return err;
35371 -int rxe_mem_init_fast(struct rxe_pd *pd,
35372 -                     int max_pages, struct rxe_mem *mem)
35373 +int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
35375         int err;
35377 -       rxe_mem_init(0, mem);
35378 +       rxe_mr_init(0, mr);
35380         /* In fastreg, we also set the rkey */
35381 -       mem->ibmr.rkey = mem->ibmr.lkey;
35382 +       mr->ibmr.rkey = mr->ibmr.lkey;
35384 -       err = rxe_mem_alloc(mem, max_pages);
35385 +       err = rxe_mr_alloc(mr, max_pages);
35386         if (err)
35387                 goto err1;
35389 -       mem->ibmr.pd            = &pd->ibpd;
35390 -       mem->max_buf            = max_pages;
35391 -       mem->state              = RXE_MEM_STATE_FREE;
35392 -       mem->type               = RXE_MEM_TYPE_MR;
35393 +       mr->ibmr.pd = &pd->ibpd;
35394 +       mr->max_buf = max_pages;
35395 +       mr->state = RXE_MR_STATE_FREE;
35396 +       mr->type = RXE_MR_TYPE_MR;
35398         return 0;
35400 @@ -226,28 +222,24 @@ int rxe_mem_init_fast(struct rxe_pd *pd,
35401         return err;
35404 -static void lookup_iova(
35405 -       struct rxe_mem  *mem,
35406 -       u64                     iova,
35407 -       int                     *m_out,
35408 -       int                     *n_out,
35409 -       size_t                  *offset_out)
35410 +static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
35411 +                       size_t *offset_out)
35413 -       size_t                  offset = iova - mem->iova + mem->offset;
35414 +       size_t offset = iova - mr->iova + mr->offset;
35415         int                     map_index;
35416         int                     buf_index;
35417         u64                     length;
35419 -       if (likely(mem->page_shift)) {
35420 -               *offset_out = offset & mem->page_mask;
35421 -               offset >>= mem->page_shift;
35422 -               *n_out = offset & mem->map_mask;
35423 -               *m_out = offset >> mem->map_shift;
35424 +       if (likely(mr->page_shift)) {
35425 +               *offset_out = offset & mr->page_mask;
35426 +               offset >>= mr->page_shift;
35427 +               *n_out = offset & mr->map_mask;
35428 +               *m_out = offset >> mr->map_shift;
35429         } else {
35430                 map_index = 0;
35431                 buf_index = 0;
35433 -               length = mem->map[map_index]->buf[buf_index].size;
35434 +               length = mr->map[map_index]->buf[buf_index].size;
35436                 while (offset >= length) {
35437                         offset -= length;
35438 @@ -257,7 +249,7 @@ static void lookup_iova(
35439                                 map_index++;
35440                                 buf_index = 0;
35441                         }
35442 -                       length = mem->map[map_index]->buf[buf_index].size;
35443 +                       length = mr->map[map_index]->buf[buf_index].size;
35444                 }
35446                 *m_out = map_index;
35447 @@ -266,49 +258,49 @@ static void lookup_iova(
35448         }
35451 -void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length)
35452 +void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
35454         size_t offset;
35455         int m, n;
35456         void *addr;
35458 -       if (mem->state != RXE_MEM_STATE_VALID) {
35459 -               pr_warn("mem not in valid state\n");
35460 +       if (mr->state != RXE_MR_STATE_VALID) {
35461 +               pr_warn("mr not in valid state\n");
35462                 addr = NULL;
35463                 goto out;
35464         }
35466 -       if (!mem->map) {
35467 +       if (!mr->map) {
35468                 addr = (void *)(uintptr_t)iova;
35469                 goto out;
35470         }
35472 -       if (mem_check_range(mem, iova, length)) {
35473 +       if (mr_check_range(mr, iova, length)) {
35474                 pr_warn("range violation\n");
35475                 addr = NULL;
35476                 goto out;
35477         }
35479 -       lookup_iova(mem, iova, &m, &n, &offset);
35480 +       lookup_iova(mr, iova, &m, &n, &offset);
35482 -       if (offset + length > mem->map[m]->buf[n].size) {
35483 +       if (offset + length > mr->map[m]->buf[n].size) {
35484                 pr_warn("crosses page boundary\n");
35485                 addr = NULL;
35486                 goto out;
35487         }
35489 -       addr = (void *)(uintptr_t)mem->map[m]->buf[n].addr + offset;
35490 +       addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;
35492  out:
35493         return addr;
35496  /* copy data from a range (vaddr, vaddr+length-1) to or from
35497 - * a mem object starting at iova. Compute incremental value of
35498 - * crc32 if crcp is not zero. caller must hold a reference to mem
35499 + * a mr object starting at iova. Compute incremental value of
35500 + * crc32 if crcp is not zero. caller must hold a reference to mr
35501   */
35502 -int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
35503 -                enum copy_direction dir, u32 *crcp)
35504 +int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
35505 +               enum copy_direction dir, u32 *crcp)
35507         int                     err;
35508         int                     bytes;
35509 @@ -323,43 +315,41 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
35510         if (length == 0)
35511                 return 0;
35513 -       if (mem->type == RXE_MEM_TYPE_DMA) {
35514 +       if (mr->type == RXE_MR_TYPE_DMA) {
35515                 u8 *src, *dest;
35517 -               src  = (dir == to_mem_obj) ?
35518 -                       addr : ((void *)(uintptr_t)iova);
35519 +               src = (dir == to_mr_obj) ? addr : ((void *)(uintptr_t)iova);
35521 -               dest = (dir == to_mem_obj) ?
35522 -                       ((void *)(uintptr_t)iova) : addr;
35523 +               dest = (dir == to_mr_obj) ? ((void *)(uintptr_t)iova) : addr;
35525                 memcpy(dest, src, length);
35527                 if (crcp)
35528 -                       *crcp = rxe_crc32(to_rdev(mem->ibmr.device),
35529 -                                       *crcp, dest, length);
35530 +                       *crcp = rxe_crc32(to_rdev(mr->ibmr.device), *crcp, dest,
35531 +                                         length);
35533                 return 0;
35534         }
35536 -       WARN_ON_ONCE(!mem->map);
35537 +       WARN_ON_ONCE(!mr->map);
35539 -       err = mem_check_range(mem, iova, length);
35540 +       err = mr_check_range(mr, iova, length);
35541         if (err) {
35542                 err = -EFAULT;
35543                 goto err1;
35544         }
35546 -       lookup_iova(mem, iova, &m, &i, &offset);
35547 +       lookup_iova(mr, iova, &m, &i, &offset);
35549 -       map     = mem->map + m;
35550 +       map = mr->map + m;
35551         buf     = map[0]->buf + i;
35553         while (length > 0) {
35554                 u8 *src, *dest;
35556                 va      = (u8 *)(uintptr_t)buf->addr + offset;
35557 -               src  = (dir == to_mem_obj) ? addr : va;
35558 -               dest = (dir == to_mem_obj) ? va : addr;
35559 +               src = (dir == to_mr_obj) ? addr : va;
35560 +               dest = (dir == to_mr_obj) ? va : addr;
35562                 bytes   = buf->size - offset;
35564 @@ -369,8 +359,8 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
35565                 memcpy(dest, src, bytes);
35567                 if (crcp)
35568 -                       crc = rxe_crc32(to_rdev(mem->ibmr.device),
35569 -                                       crc, dest, bytes);
35570 +                       crc = rxe_crc32(to_rdev(mr->ibmr.device), crc, dest,
35571 +                                       bytes);
35573                 length  -= bytes;
35574                 addr    += bytes;
35575 @@ -411,7 +401,7 @@ int copy_data(
35576         struct rxe_sge          *sge    = &dma->sge[dma->cur_sge];
35577         int                     offset  = dma->sge_offset;
35578         int                     resid   = dma->resid;
35579 -       struct rxe_mem          *mem    = NULL;
35580 +       struct rxe_mr           *mr     = NULL;
35581         u64                     iova;
35582         int                     err;
35584 @@ -424,8 +414,8 @@ int copy_data(
35585         }
35587         if (sge->length && (offset < sge->length)) {
35588 -               mem = lookup_mem(pd, access, sge->lkey, lookup_local);
35589 -               if (!mem) {
35590 +               mr = lookup_mr(pd, access, sge->lkey, lookup_local);
35591 +               if (!mr) {
35592                         err = -EINVAL;
35593                         goto err1;
35594                 }
35595 @@ -435,9 +425,9 @@ int copy_data(
35596                 bytes = length;
35598                 if (offset >= sge->length) {
35599 -                       if (mem) {
35600 -                               rxe_drop_ref(mem);
35601 -                               mem = NULL;
35602 +                       if (mr) {
35603 +                               rxe_drop_ref(mr);
35604 +                               mr = NULL;
35605                         }
35606                         sge++;
35607                         dma->cur_sge++;
35608 @@ -449,9 +439,9 @@ int copy_data(
35609                         }
35611                         if (sge->length) {
35612 -                               mem = lookup_mem(pd, access, sge->lkey,
35613 -                                                lookup_local);
35614 -                               if (!mem) {
35615 +                               mr = lookup_mr(pd, access, sge->lkey,
35616 +                                              lookup_local);
35617 +                               if (!mr) {
35618                                         err = -EINVAL;
35619                                         goto err1;
35620                                 }
35621 @@ -466,7 +456,7 @@ int copy_data(
35622                 if (bytes > 0) {
35623                         iova = sge->addr + offset;
35625 -                       err = rxe_mem_copy(mem, iova, addr, bytes, dir, crcp);
35626 +                       err = rxe_mr_copy(mr, iova, addr, bytes, dir, crcp);
35627                         if (err)
35628                                 goto err2;
35630 @@ -480,14 +470,14 @@ int copy_data(
35631         dma->sge_offset = offset;
35632         dma->resid      = resid;
35634 -       if (mem)
35635 -               rxe_drop_ref(mem);
35636 +       if (mr)
35637 +               rxe_drop_ref(mr);
35639         return 0;
35641  err2:
35642 -       if (mem)
35643 -               rxe_drop_ref(mem);
35644 +       if (mr)
35645 +               rxe_drop_ref(mr);
35646  err1:
35647         return err;
35649 @@ -525,31 +515,30 @@ int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
35650         return 0;
35653 -/* (1) find the mem (mr or mw) corresponding to lkey/rkey
35654 +/* (1) find the mr corresponding to lkey/rkey
35655   *     depending on lookup_type
35656 - * (2) verify that the (qp) pd matches the mem pd
35657 - * (3) verify that the mem can support the requested access
35658 - * (4) verify that mem state is valid
35659 + * (2) verify that the (qp) pd matches the mr pd
35660 + * (3) verify that the mr can support the requested access
35661 + * (4) verify that mr state is valid
35662   */
35663 -struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
35664 -                          enum lookup_type type)
35665 +struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
35666 +                        enum lookup_type type)
35668 -       struct rxe_mem *mem;
35669 +       struct rxe_mr *mr;
35670         struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
35671         int index = key >> 8;
35673 -       mem = rxe_pool_get_index(&rxe->mr_pool, index);
35674 -       if (!mem)
35675 +       mr = rxe_pool_get_index(&rxe->mr_pool, index);
35676 +       if (!mr)
35677                 return NULL;
35679 -       if (unlikely((type == lookup_local && mr_lkey(mem) != key) ||
35680 -                    (type == lookup_remote && mr_rkey(mem) != key) ||
35681 -                    mr_pd(mem) != pd ||
35682 -                    (access && !(access & mem->access)) ||
35683 -                    mem->state != RXE_MEM_STATE_VALID)) {
35684 -               rxe_drop_ref(mem);
35685 -               mem = NULL;
35686 +       if (unlikely((type == lookup_local && mr_lkey(mr) != key) ||
35687 +                    (type == lookup_remote && mr_rkey(mr) != key) ||
35688 +                    mr_pd(mr) != pd || (access && !(access & mr->access)) ||
35689 +                    mr->state != RXE_MR_STATE_VALID)) {
35690 +               rxe_drop_ref(mr);
35691 +               mr = NULL;
35692         }
35694 -       return mem;
35695 +       return mr;
35697 diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
35698 index 307d8986e7c9..d24901f2af3f 100644
35699 --- a/drivers/infiniband/sw/rxe/rxe_pool.c
35700 +++ b/drivers/infiniband/sw/rxe/rxe_pool.c
35701 @@ -8,8 +8,6 @@
35702  #include "rxe_loc.h"
35704  /* info about object pools
35705 - * note that mr and mw share a single index space
35706 - * so that one can map an lkey to the correct type of object
35707   */
35708  struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
35709         [RXE_TYPE_UC] = {
35710 @@ -56,18 +54,18 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
35711         },
35712         [RXE_TYPE_MR] = {
35713                 .name           = "rxe-mr",
35714 -               .size           = sizeof(struct rxe_mem),
35715 -               .elem_offset    = offsetof(struct rxe_mem, pelem),
35716 -               .cleanup        = rxe_mem_cleanup,
35717 +               .size           = sizeof(struct rxe_mr),
35718 +               .elem_offset    = offsetof(struct rxe_mr, pelem),
35719 +               .cleanup        = rxe_mr_cleanup,
35720                 .flags          = RXE_POOL_INDEX,
35721                 .max_index      = RXE_MAX_MR_INDEX,
35722                 .min_index      = RXE_MIN_MR_INDEX,
35723         },
35724         [RXE_TYPE_MW] = {
35725                 .name           = "rxe-mw",
35726 -               .size           = sizeof(struct rxe_mem),
35727 -               .elem_offset    = offsetof(struct rxe_mem, pelem),
35728 -               .flags          = RXE_POOL_INDEX,
35729 +               .size           = sizeof(struct rxe_mw),
35730 +               .elem_offset    = offsetof(struct rxe_mw, pelem),
35731 +               .flags          = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
35732                 .max_index      = RXE_MAX_MW_INDEX,
35733                 .min_index      = RXE_MIN_MW_INDEX,
35734         },
35735 diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
35736 index 34ae957a315c..b0f350d674fd 100644
35737 --- a/drivers/infiniband/sw/rxe/rxe_qp.c
35738 +++ b/drivers/infiniband/sw/rxe/rxe_qp.c
35739 @@ -242,6 +242,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
35740         if (err) {
35741                 vfree(qp->sq.queue->buf);
35742                 kfree(qp->sq.queue);
35743 +               qp->sq.queue = NULL;
35744                 return err;
35745         }
35747 @@ -295,6 +296,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
35748                 if (err) {
35749                         vfree(qp->rq.queue->buf);
35750                         kfree(qp->rq.queue);
35751 +                       qp->rq.queue = NULL;
35752                         return err;
35753                 }
35754         }
35755 @@ -355,6 +357,11 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
35756  err2:
35757         rxe_queue_cleanup(qp->sq.queue);
35758  err1:
35759 +       qp->pd = NULL;
35760 +       qp->rcq = NULL;
35761 +       qp->scq = NULL;
35762 +       qp->srq = NULL;
35764         if (srq)
35765                 rxe_drop_ref(srq);
35766         rxe_drop_ref(scq);
35767 diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
35768 index 889290793d75..3664cdae7e1f 100644
35769 --- a/drivers/infiniband/sw/rxe/rxe_req.c
35770 +++ b/drivers/infiniband/sw/rxe/rxe_req.c
35771 @@ -464,7 +464,7 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
35772                 } else {
35773                         err = copy_data(qp->pd, 0, &wqe->dma,
35774                                         payload_addr(pkt), paylen,
35775 -                                       from_mem_obj,
35776 +                                       from_mr_obj,
35777                                         &crc);
35778                         if (err)
35779                                 return err;
35780 @@ -596,7 +596,7 @@ int rxe_requester(void *arg)
35781         if (wqe->mask & WR_REG_MASK) {
35782                 if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
35783                         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
35784 -                       struct rxe_mem *rmr;
35785 +                       struct rxe_mr *rmr;
35787                         rmr = rxe_pool_get_index(&rxe->mr_pool,
35788                                                  wqe->wr.ex.invalidate_rkey >> 8);
35789 @@ -607,14 +607,14 @@ int rxe_requester(void *arg)
35790                                 wqe->status = IB_WC_MW_BIND_ERR;
35791                                 goto exit;
35792                         }
35793 -                       rmr->state = RXE_MEM_STATE_FREE;
35794 +                       rmr->state = RXE_MR_STATE_FREE;
35795                         rxe_drop_ref(rmr);
35796                         wqe->state = wqe_state_done;
35797                         wqe->status = IB_WC_SUCCESS;
35798                 } else if (wqe->wr.opcode == IB_WR_REG_MR) {
35799 -                       struct rxe_mem *rmr = to_rmr(wqe->wr.wr.reg.mr);
35800 +                       struct rxe_mr *rmr = to_rmr(wqe->wr.wr.reg.mr);
35802 -                       rmr->state = RXE_MEM_STATE_VALID;
35803 +                       rmr->state = RXE_MR_STATE_VALID;
35804                         rmr->access = wqe->wr.wr.reg.access;
35805                         rmr->ibmr.lkey = wqe->wr.wr.reg.key;
35806                         rmr->ibmr.rkey = wqe->wr.wr.reg.key;
35807 diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
35808 index 142f3d8014d8..8e237b623b31 100644
35809 --- a/drivers/infiniband/sw/rxe/rxe_resp.c
35810 +++ b/drivers/infiniband/sw/rxe/rxe_resp.c
35811 @@ -391,7 +391,7 @@ static enum resp_states check_length(struct rxe_qp *qp,
35812  static enum resp_states check_rkey(struct rxe_qp *qp,
35813                                    struct rxe_pkt_info *pkt)
35815 -       struct rxe_mem *mem = NULL;
35816 +       struct rxe_mr *mr = NULL;
35817         u64 va;
35818         u32 rkey;
35819         u32 resid;
35820 @@ -430,18 +430,18 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
35821         resid   = qp->resp.resid;
35822         pktlen  = payload_size(pkt);
35824 -       mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
35825 -       if (!mem) {
35826 +       mr = lookup_mr(qp->pd, access, rkey, lookup_remote);
35827 +       if (!mr) {
35828                 state = RESPST_ERR_RKEY_VIOLATION;
35829                 goto err;
35830         }
35832 -       if (unlikely(mem->state == RXE_MEM_STATE_FREE)) {
35833 +       if (unlikely(mr->state == RXE_MR_STATE_FREE)) {
35834                 state = RESPST_ERR_RKEY_VIOLATION;
35835                 goto err;
35836         }
35838 -       if (mem_check_range(mem, va, resid)) {
35839 +       if (mr_check_range(mr, va, resid)) {
35840                 state = RESPST_ERR_RKEY_VIOLATION;
35841                 goto err;
35842         }
35843 @@ -469,12 +469,12 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
35845         WARN_ON_ONCE(qp->resp.mr);
35847 -       qp->resp.mr = mem;
35848 +       qp->resp.mr = mr;
35849         return RESPST_EXECUTE;
35851  err:
35852 -       if (mem)
35853 -               rxe_drop_ref(mem);
35854 +       if (mr)
35855 +               rxe_drop_ref(mr);
35856         return state;
35859 @@ -484,7 +484,7 @@ static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
35860         int err;
35862         err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
35863 -                       data_addr, data_len, to_mem_obj, NULL);
35864 +                       data_addr, data_len, to_mr_obj, NULL);
35865         if (unlikely(err))
35866                 return (err == -ENOSPC) ? RESPST_ERR_LENGTH
35867                                         : RESPST_ERR_MALFORMED_WQE;
35868 @@ -499,8 +499,8 @@ static enum resp_states write_data_in(struct rxe_qp *qp,
35869         int     err;
35870         int data_len = payload_size(pkt);
35872 -       err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt),
35873 -                          data_len, to_mem_obj, NULL);
35874 +       err = rxe_mr_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt), data_len,
35875 +                         to_mr_obj, NULL);
35876         if (err) {
35877                 rc = RESPST_ERR_RKEY_VIOLATION;
35878                 goto out;
35879 @@ -522,9 +522,9 @@ static enum resp_states process_atomic(struct rxe_qp *qp,
35880         u64 iova = atmeth_va(pkt);
35881         u64 *vaddr;
35882         enum resp_states ret;
35883 -       struct rxe_mem *mr = qp->resp.mr;
35884 +       struct rxe_mr *mr = qp->resp.mr;
35886 -       if (mr->state != RXE_MEM_STATE_VALID) {
35887 +       if (mr->state != RXE_MR_STATE_VALID) {
35888                 ret = RESPST_ERR_RKEY_VIOLATION;
35889                 goto out;
35890         }
35891 @@ -700,8 +700,8 @@ static enum resp_states read_reply(struct rxe_qp *qp,
35892         if (!skb)
35893                 return RESPST_ERR_RNR;
35895 -       err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
35896 -                          payload, from_mem_obj, &icrc);
35897 +       err = rxe_mr_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
35898 +                         payload, from_mr_obj, &icrc);
35899         if (err)
35900                 pr_err("Failed copying memory\n");
35902 @@ -883,7 +883,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
35903                         }
35905                         if (pkt->mask & RXE_IETH_MASK) {
35906 -                               struct rxe_mem *rmr;
35907 +                               struct rxe_mr *rmr;
35909                                 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
35910                                 wc->ex.invalidate_rkey = ieth_rkey(pkt);
35911 @@ -895,7 +895,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
35912                                                wc->ex.invalidate_rkey);
35913                                         return RESPST_ERROR;
35914                                 }
35915 -                               rmr->state = RXE_MEM_STATE_FREE;
35916 +                               rmr->state = RXE_MR_STATE_FREE;
35917                                 rxe_drop_ref(rmr);
35918                         }
35920 diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
35921 index dee5e0e919d2..38249c1a76a8 100644
35922 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c
35923 +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
35924 @@ -865,7 +865,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
35926         struct rxe_dev *rxe = to_rdev(ibpd->device);
35927         struct rxe_pd *pd = to_rpd(ibpd);
35928 -       struct rxe_mem *mr;
35929 +       struct rxe_mr *mr;
35931         mr = rxe_alloc(&rxe->mr_pool);
35932         if (!mr)
35933 @@ -873,7 +873,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
35935         rxe_add_index(mr);
35936         rxe_add_ref(pd);
35937 -       rxe_mem_init_dma(pd, access, mr);
35938 +       rxe_mr_init_dma(pd, access, mr);
35940         return &mr->ibmr;
35942 @@ -887,7 +887,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
35943         int err;
35944         struct rxe_dev *rxe = to_rdev(ibpd->device);
35945         struct rxe_pd *pd = to_rpd(ibpd);
35946 -       struct rxe_mem *mr;
35947 +       struct rxe_mr *mr;
35949         mr = rxe_alloc(&rxe->mr_pool);
35950         if (!mr) {
35951 @@ -899,8 +899,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
35953         rxe_add_ref(pd);
35955 -       err = rxe_mem_init_user(pd, start, length, iova,
35956 -                               access, udata, mr);
35957 +       err = rxe_mr_init_user(pd, start, length, iova, access, udata, mr);
35958         if (err)
35959                 goto err3;
35961 @@ -916,9 +915,9 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
35963  static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
35965 -       struct rxe_mem *mr = to_rmr(ibmr);
35966 +       struct rxe_mr *mr = to_rmr(ibmr);
35968 -       mr->state = RXE_MEM_STATE_ZOMBIE;
35969 +       mr->state = RXE_MR_STATE_ZOMBIE;
35970         rxe_drop_ref(mr_pd(mr));
35971         rxe_drop_index(mr);
35972         rxe_drop_ref(mr);
35973 @@ -930,7 +929,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
35975         struct rxe_dev *rxe = to_rdev(ibpd->device);
35976         struct rxe_pd *pd = to_rpd(ibpd);
35977 -       struct rxe_mem *mr;
35978 +       struct rxe_mr *mr;
35979         int err;
35981         if (mr_type != IB_MR_TYPE_MEM_REG)
35982 @@ -946,7 +945,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
35984         rxe_add_ref(pd);
35986 -       err = rxe_mem_init_fast(pd, max_num_sg, mr);
35987 +       err = rxe_mr_init_fast(pd, max_num_sg, mr);
35988         if (err)
35989                 goto err2;
35991 @@ -962,7 +961,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
35993  static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
35995 -       struct rxe_mem *mr = to_rmr(ibmr);
35996 +       struct rxe_mr *mr = to_rmr(ibmr);
35997         struct rxe_map *map;
35998         struct rxe_phys_buf *buf;
36000 @@ -982,7 +981,7 @@ static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
36001  static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
36002                          int sg_nents, unsigned int *sg_offset)
36004 -       struct rxe_mem *mr = to_rmr(ibmr);
36005 +       struct rxe_mr *mr = to_rmr(ibmr);
36006         int n;
36008         mr->nbuf = 0;
36009 @@ -1110,6 +1109,7 @@ static const struct ib_device_ops rxe_dev_ops = {
36010         INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
36011         INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
36012         INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
36013 +       INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw),
36014  };
36016  int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
36017 diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
36018 index 79e0a5a878da..11eba7a3ba8f 100644
36019 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h
36020 +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
36021 @@ -156,7 +156,7 @@ struct resp_res {
36022                         struct sk_buff  *skb;
36023                 } atomic;
36024                 struct {
36025 -                       struct rxe_mem  *mr;
36026 +                       struct rxe_mr   *mr;
36027                         u64             va_org;
36028                         u32             rkey;
36029                         u32             length;
36030 @@ -183,7 +183,7 @@ struct rxe_resp_info {
36032         /* RDMA read / atomic only */
36033         u64                     va;
36034 -       struct rxe_mem          *mr;
36035 +       struct rxe_mr           *mr;
36036         u32                     resid;
36037         u32                     rkey;
36038         u32                     length;
36039 @@ -262,18 +262,18 @@ struct rxe_qp {
36040         struct execute_work     cleanup_work;
36041  };
36043 -enum rxe_mem_state {
36044 -       RXE_MEM_STATE_ZOMBIE,
36045 -       RXE_MEM_STATE_INVALID,
36046 -       RXE_MEM_STATE_FREE,
36047 -       RXE_MEM_STATE_VALID,
36048 +enum rxe_mr_state {
36049 +       RXE_MR_STATE_ZOMBIE,
36050 +       RXE_MR_STATE_INVALID,
36051 +       RXE_MR_STATE_FREE,
36052 +       RXE_MR_STATE_VALID,
36053  };
36055 -enum rxe_mem_type {
36056 -       RXE_MEM_TYPE_NONE,
36057 -       RXE_MEM_TYPE_DMA,
36058 -       RXE_MEM_TYPE_MR,
36059 -       RXE_MEM_TYPE_MW,
36060 +enum rxe_mr_type {
36061 +       RXE_MR_TYPE_NONE,
36062 +       RXE_MR_TYPE_DMA,
36063 +       RXE_MR_TYPE_MR,
36064 +       RXE_MR_TYPE_MW,
36065  };
36067  #define RXE_BUF_PER_MAP                (PAGE_SIZE / sizeof(struct rxe_phys_buf))
36068 @@ -287,17 +287,14 @@ struct rxe_map {
36069         struct rxe_phys_buf     buf[RXE_BUF_PER_MAP];
36070  };
36072 -struct rxe_mem {
36073 +struct rxe_mr {
36074         struct rxe_pool_entry   pelem;
36075 -       union {
36076 -               struct ib_mr            ibmr;
36077 -               struct ib_mw            ibmw;
36078 -       };
36079 +       struct ib_mr            ibmr;
36081         struct ib_umem          *umem;
36083 -       enum rxe_mem_state      state;
36084 -       enum rxe_mem_type       type;
36085 +       enum rxe_mr_state       state;
36086 +       enum rxe_mr_type        type;
36087         u64                     va;
36088         u64                     iova;
36089         size_t                  length;
36090 @@ -318,6 +315,17 @@ struct rxe_mem {
36091         struct rxe_map          **map;
36092  };
36094 +enum rxe_mw_state {
36095 +       RXE_MW_STATE_INVALID = RXE_MR_STATE_INVALID,
36096 +       RXE_MW_STATE_FREE = RXE_MR_STATE_FREE,
36097 +       RXE_MW_STATE_VALID = RXE_MR_STATE_VALID,
36100 +struct rxe_mw {
36101 +       struct ib_mw ibmw;
36102 +       struct rxe_pool_entry pelem;
36105  struct rxe_mc_grp {
36106         struct rxe_pool_entry   pelem;
36107         spinlock_t              mcg_lock; /* guard group */
36108 @@ -422,27 +430,27 @@ static inline struct rxe_cq *to_rcq(struct ib_cq *cq)
36109         return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL;
36112 -static inline struct rxe_mem *to_rmr(struct ib_mr *mr)
36113 +static inline struct rxe_mr *to_rmr(struct ib_mr *mr)
36115 -       return mr ? container_of(mr, struct rxe_mem, ibmr) : NULL;
36116 +       return mr ? container_of(mr, struct rxe_mr, ibmr) : NULL;
36119 -static inline struct rxe_mem *to_rmw(struct ib_mw *mw)
36120 +static inline struct rxe_mw *to_rmw(struct ib_mw *mw)
36122 -       return mw ? container_of(mw, struct rxe_mem, ibmw) : NULL;
36123 +       return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL;
36126 -static inline struct rxe_pd *mr_pd(struct rxe_mem *mr)
36127 +static inline struct rxe_pd *mr_pd(struct rxe_mr *mr)
36129         return to_rpd(mr->ibmr.pd);
36132 -static inline u32 mr_lkey(struct rxe_mem *mr)
36133 +static inline u32 mr_lkey(struct rxe_mr *mr)
36135         return mr->ibmr.lkey;
36138 -static inline u32 mr_rkey(struct rxe_mem *mr)
36139 +static inline u32 mr_rkey(struct rxe_mr *mr)
36141         return mr->ibmr.rkey;
36143 diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
36144 index 34a910cf0edb..61c17db70d65 100644
36145 --- a/drivers/infiniband/sw/siw/siw_mem.c
36146 +++ b/drivers/infiniband/sw/siw/siw_mem.c
36147 @@ -106,8 +106,6 @@ int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
36148         mem->perms = rights & IWARP_ACCESS_MASK;
36149         kref_init(&mem->ref);
36151 -       mr->mem = mem;
36153         get_random_bytes(&next, 4);
36154         next &= 0x00ffffff;
36156 @@ -116,6 +114,8 @@ int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
36157                 kfree(mem);
36158                 return -ENOMEM;
36159         }
36161 +       mr->mem = mem;
36162         /* Set the STag index part */
36163         mem->stag = id << 8;
36164         mr->base_mr.lkey = mr->base_mr.rkey = mem->stag;
36165 diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
36166 index e389d44e5591..8a00c06e5f56 100644
36167 --- a/drivers/infiniband/sw/siw/siw_verbs.c
36168 +++ b/drivers/infiniband/sw/siw/siw_verbs.c
36169 @@ -300,7 +300,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
36170         struct siw_ucontext *uctx =
36171                 rdma_udata_to_drv_context(udata, struct siw_ucontext,
36172                                           base_ucontext);
36173 -       struct siw_cq *scq = NULL, *rcq = NULL;
36174         unsigned long flags;
36175         int num_sqe, num_rqe, rv = 0;
36176         size_t length;
36177 @@ -343,10 +342,8 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
36178                 rv = -EINVAL;
36179                 goto err_out;
36180         }
36181 -       scq = to_siw_cq(attrs->send_cq);
36182 -       rcq = to_siw_cq(attrs->recv_cq);
36184 -       if (!scq || (!rcq && !attrs->srq)) {
36185 +       if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) {
36186                 siw_dbg(base_dev, "send CQ or receive CQ invalid\n");
36187                 rv = -EINVAL;
36188                 goto err_out;
36189 @@ -378,7 +375,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
36190         else {
36191                 /* Zero sized SQ is not supported */
36192                 rv = -EINVAL;
36193 -               goto err_out;
36194 +               goto err_out_xa;
36195         }
36196         if (num_rqe)
36197                 num_rqe = roundup_pow_of_two(num_rqe);
36198 @@ -401,8 +398,8 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
36199                 }
36200         }
36201         qp->pd = pd;
36202 -       qp->scq = scq;
36203 -       qp->rcq = rcq;
36204 +       qp->scq = to_siw_cq(attrs->send_cq);
36205 +       qp->rcq = to_siw_cq(attrs->recv_cq);
36207         if (attrs->srq) {
36208                 /*
36209 diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
36210 index 7305ed8976c2..18266f07c58d 100644
36211 --- a/drivers/infiniband/ulp/isert/ib_isert.c
36212 +++ b/drivers/infiniband/ulp/isert/ib_isert.c
36213 @@ -438,23 +438,23 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
36214         isert_init_conn(isert_conn);
36215         isert_conn->cm_id = cma_id;
36217 -       ret = isert_alloc_login_buf(isert_conn, cma_id->device);
36218 -       if (ret)
36219 -               goto out;
36221         device = isert_device_get(cma_id);
36222         if (IS_ERR(device)) {
36223                 ret = PTR_ERR(device);
36224 -               goto out_rsp_dma_map;
36225 +               goto out;
36226         }
36227         isert_conn->device = device;
36229 +       ret = isert_alloc_login_buf(isert_conn, cma_id->device);
36230 +       if (ret)
36231 +               goto out_conn_dev;
36233         isert_set_nego_params(isert_conn, &event->param.conn);
36235         isert_conn->qp = isert_create_qp(isert_conn, cma_id);
36236         if (IS_ERR(isert_conn->qp)) {
36237                 ret = PTR_ERR(isert_conn->qp);
36238 -               goto out_conn_dev;
36239 +               goto out_rsp_dma_map;
36240         }
36242         ret = isert_login_post_recv(isert_conn);
36243 @@ -473,10 +473,10 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
36245  out_destroy_qp:
36246         isert_destroy_qp(isert_conn);
36247 -out_conn_dev:
36248 -       isert_device_put(device);
36249  out_rsp_dma_map:
36250         isert_free_login_buf(isert_conn);
36251 +out_conn_dev:
36252 +       isert_device_put(device);
36253  out:
36254         kfree(isert_conn);
36255         rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
36256 diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
36257 index 6734329cca33..959ba0462ef0 100644
36258 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
36259 +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
36260 @@ -2784,8 +2784,8 @@ int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
36261         } while (!changed && old_state != RTRS_CLT_DEAD);
36263         if (likely(changed)) {
36264 -               rtrs_clt_destroy_sess_files(sess, sysfs_self);
36265                 rtrs_clt_remove_path_from_arr(sess);
36266 +               rtrs_clt_destroy_sess_files(sess, sysfs_self);
36267                 kobject_put(&sess->kobj);
36268         }
36270 diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
36271 index 6be60aa5ffe2..7f0420ad9057 100644
36272 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c
36273 +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
36274 @@ -2378,6 +2378,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
36275                 pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
36276                         dev_name(&sdev->device->dev), port_num);
36277                 mutex_unlock(&sport->mutex);
36278 +               ret = -EINVAL;
36279                 goto reject;
36280         }
36282 diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
36283 index 5f7706febcb0..17540bdb1eaf 100644
36284 --- a/drivers/input/touchscreen/elants_i2c.c
36285 +++ b/drivers/input/touchscreen/elants_i2c.c
36286 @@ -38,6 +38,7 @@
36287  #include <linux/of.h>
36288  #include <linux/gpio/consumer.h>
36289  #include <linux/regulator/consumer.h>
36290 +#include <linux/uuid.h>
36291  #include <asm/unaligned.h>
36293  /* Device, Driver information */
36294 @@ -1334,6 +1335,40 @@ static void elants_i2c_power_off(void *_data)
36295         }
36298 +#ifdef CONFIG_ACPI
36299 +static const struct acpi_device_id i2c_hid_ids[] = {
36300 +       {"ACPI0C50", 0 },
36301 +       {"PNP0C50", 0 },
36302 +       { },
36305 +static const guid_t i2c_hid_guid =
36306 +       GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
36307 +                 0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
36309 +static bool elants_acpi_is_hid_device(struct device *dev)
36311 +       acpi_handle handle = ACPI_HANDLE(dev);
36312 +       union acpi_object *obj;
36314 +       if (acpi_match_device_ids(ACPI_COMPANION(dev), i2c_hid_ids))
36315 +               return false;
36317 +       obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL, ACPI_TYPE_INTEGER);
36318 +       if (obj) {
36319 +               ACPI_FREE(obj);
36320 +               return true;
36321 +       }
36323 +       return false;
36325 +#else
36326 +static bool elants_acpi_is_hid_device(struct device *dev)
36328 +       return false;
36330 +#endif
36332  static int elants_i2c_probe(struct i2c_client *client,
36333                             const struct i2c_device_id *id)
36335 @@ -1342,9 +1377,14 @@ static int elants_i2c_probe(struct i2c_client *client,
36336         unsigned long irqflags;
36337         int error;
36339 +       /* Don't bind to i2c-hid compatible devices, these are handled by the i2c-hid drv. */
36340 +       if (elants_acpi_is_hid_device(&client->dev)) {
36341 +               dev_warn(&client->dev, "This device appears to be an I2C-HID device, not binding\n");
36342 +               return -ENODEV;
36343 +       }
36345         if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
36346 -               dev_err(&client->dev,
36347 -                       "%s: i2c check functionality error\n", DEVICE_NAME);
36348 +               dev_err(&client->dev, "I2C check functionality error\n");
36349                 return -ENXIO;
36350         }
36352 diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
36353 index d8fccf048bf4..30576a5f2f04 100644
36354 --- a/drivers/input/touchscreen/ili210x.c
36355 +++ b/drivers/input/touchscreen/ili210x.c
36356 @@ -87,7 +87,7 @@ static bool ili210x_touchdata_to_coords(const u8 *touchdata,
36357                                         unsigned int *x, unsigned int *y,
36358                                         unsigned int *z)
36360 -       if (touchdata[0] & BIT(finger))
36361 +       if (!(touchdata[0] & BIT(finger)))
36362                 return false;
36364         *x = get_unaligned_be16(touchdata + 1 + (finger * 4) + 0);
36365 diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
36366 index 8fa2f3b7cfd8..e8b6c3137420 100644
36367 --- a/drivers/input/touchscreen/silead.c
36368 +++ b/drivers/input/touchscreen/silead.c
36369 @@ -20,6 +20,7 @@
36370  #include <linux/input/mt.h>
36371  #include <linux/input/touchscreen.h>
36372  #include <linux/pm.h>
36373 +#include <linux/pm_runtime.h>
36374  #include <linux/irq.h>
36375  #include <linux/regulator/consumer.h>
36377 @@ -335,10 +336,8 @@ static int silead_ts_get_id(struct i2c_client *client)
36379         error = i2c_smbus_read_i2c_block_data(client, SILEAD_REG_ID,
36380                                               sizeof(chip_id), (u8 *)&chip_id);
36381 -       if (error < 0) {
36382 -               dev_err(&client->dev, "Chip ID read error %d\n", error);
36383 +       if (error < 0)
36384                 return error;
36385 -       }
36387         data->chip_id = le32_to_cpu(chip_id);
36388         dev_info(&client->dev, "Silead chip ID: 0x%8X", data->chip_id);
36389 @@ -351,12 +350,49 @@ static int silead_ts_setup(struct i2c_client *client)
36390         int error;
36391         u32 status;
36393 +       /*
36394 +        * Some buggy BIOS-es bring up the chip in a stuck state where it
36395 +        * blocks the I2C bus. The following steps are necessary to
36396 +        * unstuck the chip / bus:
36397 +        * 1. Turn off the Silead chip.
36398 +        * 2. Try to do an I2C transfer with the chip, this will fail in
36399 +        *    response to which the I2C-bus-driver will call:
36400 +        *    i2c_recover_bus() which will unstuck the I2C-bus. Note the
36401 +        *    unstuck-ing of the I2C bus only works if we first drop the
36402 +        *    chip off the bus by turning it off.
36403 +        * 3. Turn the chip back on.
36404 +        *
36405 +        * On the x86/ACPI systems were this problem is seen, step 1. and
36406 +        * 3. require making ACPI calls and dealing with ACPI Power
36407 +        * Resources. The workaround below runtime-suspends the chip to
36408 +        * turn it off, leaving it up to the ACPI subsystem to deal with
36409 +        * this.
36410 +        */
36412 +       if (device_property_read_bool(&client->dev,
36413 +                                     "silead,stuck-controller-bug")) {
36414 +               pm_runtime_set_active(&client->dev);
36415 +               pm_runtime_enable(&client->dev);
36416 +               pm_runtime_allow(&client->dev);
36418 +               pm_runtime_suspend(&client->dev);
36420 +               dev_warn(&client->dev, FW_BUG "Stuck I2C bus: please ignore the next 'controller timed out' error\n");
36421 +               silead_ts_get_id(client);
36423 +               /* The forbid will also resume the device */
36424 +               pm_runtime_forbid(&client->dev);
36425 +               pm_runtime_disable(&client->dev);
36426 +       }
36428         silead_ts_set_power(client, SILEAD_POWER_OFF);
36429         silead_ts_set_power(client, SILEAD_POWER_ON);
36431         error = silead_ts_get_id(client);
36432 -       if (error)
36433 +       if (error) {
36434 +               dev_err(&client->dev, "Chip ID read error %d\n", error);
36435                 return error;
36436 +       }
36438         error = silead_ts_init(client);
36439         if (error)
36440 diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
36441 index 321f5906e6ed..df7b19ff0a9e 100644
36442 --- a/drivers/iommu/amd/init.c
36443 +++ b/drivers/iommu/amd/init.c
36444 @@ -12,7 +12,6 @@
36445  #include <linux/acpi.h>
36446  #include <linux/list.h>
36447  #include <linux/bitmap.h>
36448 -#include <linux/delay.h>
36449  #include <linux/slab.h>
36450  #include <linux/syscore_ops.h>
36451  #include <linux/interrupt.h>
36452 @@ -257,8 +256,6 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
36453  static int amd_iommu_enable_interrupts(void);
36454  static int __init iommu_go_to_state(enum iommu_init_state state);
36455  static void init_device_table_dma(void);
36456 -static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
36457 -                               u8 fxn, u64 *value, bool is_write);
36459  static bool amd_iommu_pre_enabled = true;
36461 @@ -1717,53 +1714,16 @@ static int __init init_iommu_all(struct acpi_table_header *table)
36462         return 0;
36465 -static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
36466 +static void init_iommu_perf_ctr(struct amd_iommu *iommu)
36468 -       int retry;
36469 +       u64 val;
36470         struct pci_dev *pdev = iommu->dev;
36471 -       u64 val = 0xabcd, val2 = 0, save_reg, save_src;
36473         if (!iommu_feature(iommu, FEATURE_PC))
36474                 return;
36476         amd_iommu_pc_present = true;
36478 -       /* save the value to restore, if writable */
36479 -       if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false) ||
36480 -           iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, false))
36481 -               goto pc_false;
36483 -       /*
36484 -        * Disable power gating by programing the performance counter
36485 -        * source to 20 (i.e. counts the reads and writes from/to IOMMU
36486 -        * Reserved Register [MMIO Offset 1FF8h] that are ignored.),
36487 -        * which never get incremented during this init phase.
36488 -        * (Note: The event is also deprecated.)
36489 -        */
36490 -       val = 20;
36491 -       if (iommu_pc_get_set_reg(iommu, 0, 0, 8, &val, true))
36492 -               goto pc_false;
36494 -       /* Check if the performance counters can be written to */
36495 -       val = 0xabcd;
36496 -       for (retry = 5; retry; retry--) {
36497 -               if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true) ||
36498 -                   iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false) ||
36499 -                   val2)
36500 -                       break;
36502 -               /* Wait about 20 msec for power gating to disable and retry. */
36503 -               msleep(20);
36504 -       }
36506 -       /* restore */
36507 -       if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true) ||
36508 -           iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, true))
36509 -               goto pc_false;
36511 -       if (val != val2)
36512 -               goto pc_false;
36514         pci_info(pdev, "IOMMU performance counters supported\n");
36516         val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
36517 @@ -1771,11 +1731,6 @@ static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
36518         iommu->max_counters = (u8) ((val >> 7) & 0xf);
36520         return;
36522 -pc_false:
36523 -       pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
36524 -       amd_iommu_pc_present = false;
36525 -       return;
36528  static ssize_t amd_iommu_show_cap(struct device *dev,
36529 @@ -1837,7 +1792,7 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu)
36530          * IVHD and MMIO conflict.
36531          */
36532         if (features != iommu->features)
36533 -               pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx\n).",
36534 +               pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx).\n",
36535                         features, iommu->features);
36538 diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
36539 index 8594b4a83043..941ba5484731 100644
36540 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
36541 +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
36542 @@ -2305,6 +2305,9 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
36544         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
36546 +       if (!gather->pgsize)
36547 +               return;
36549         arm_smmu_tlb_inv_range_domain(gather->start,
36550                                       gather->end - gather->start + 1,
36551                                       gather->pgsize, true, smmu_domain);
36552 diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
36553 index f985817c967a..230b6f6b3901 100644
36554 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
36555 +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
36556 @@ -115,7 +115,7 @@
36557  #define GERROR_PRIQ_ABT_ERR            (1 << 3)
36558  #define GERROR_EVTQ_ABT_ERR            (1 << 2)
36559  #define GERROR_CMDQ_ERR                        (1 << 0)
36560 -#define GERROR_ERR_MASK                        0xfd
36561 +#define GERROR_ERR_MASK                        0x1fd
36563  #define ARM_SMMU_GERRORN               0x64
36565 diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
36566 index af765c813cc8..fdd095e1fa52 100644
36567 --- a/drivers/iommu/dma-iommu.c
36568 +++ b/drivers/iommu/dma-iommu.c
36569 @@ -52,6 +52,17 @@ struct iommu_dma_cookie {
36570  };
36572  static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
36573 +bool iommu_dma_forcedac __read_mostly;
36575 +static int __init iommu_dma_forcedac_setup(char *str)
36577 +       int ret = kstrtobool(str, &iommu_dma_forcedac);
36579 +       if (!ret && iommu_dma_forcedac)
36580 +               pr_info("Forcing DAC for PCI devices\n");
36581 +       return ret;
36583 +early_param("iommu.forcedac", iommu_dma_forcedac_setup);
36585  void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
36586                 struct iommu_domain *domain)
36587 @@ -444,7 +455,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
36588                 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
36590         /* Try to get PCI devices a SAC address */
36591 -       if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
36592 +       if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev))
36593                 iova = alloc_iova_fast(iovad, iova_len,
36594                                        DMA_BIT_MASK(32) >> shift, false);
36596 diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
36597 index ee0932307d64..7e551da6c1fb 100644
36598 --- a/drivers/iommu/intel/iommu.c
36599 +++ b/drivers/iommu/intel/iommu.c
36600 @@ -360,7 +360,6 @@ int intel_iommu_enabled = 0;
36601  EXPORT_SYMBOL_GPL(intel_iommu_enabled);
36603  static int dmar_map_gfx = 1;
36604 -static int dmar_forcedac;
36605  static int intel_iommu_strict;
36606  static int intel_iommu_superpage = 1;
36607  static int iommu_identity_mapping;
36608 @@ -451,8 +450,8 @@ static int __init intel_iommu_setup(char *str)
36609                         dmar_map_gfx = 0;
36610                         pr_info("Disable GFX device mapping\n");
36611                 } else if (!strncmp(str, "forcedac", 8)) {
36612 -                       pr_info("Forcing DAC for PCI devices\n");
36613 -                       dmar_forcedac = 1;
36614 +                       pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
36615 +                       iommu_dma_forcedac = true;
36616                 } else if (!strncmp(str, "strict", 6)) {
36617                         pr_info("Disable batched IOTLB flush\n");
36618                         intel_iommu_strict = 1;
36619 @@ -658,7 +657,14 @@ static int domain_update_iommu_snooping(struct intel_iommu *skip)
36620         rcu_read_lock();
36621         for_each_active_iommu(iommu, drhd) {
36622                 if (iommu != skip) {
36623 -                       if (!ecap_sc_support(iommu->ecap)) {
36624 +                       /*
36625 +                        * If the hardware is operating in the scalable mode,
36626 +                        * the snooping control is always supported since we
36627 +                        * always set PASID-table-entry.PGSNP bit if the domain
36628 +                        * is managed outside (UNMANAGED).
36629 +                        */
36630 +                       if (!sm_supported(iommu) &&
36631 +                           !ecap_sc_support(iommu->ecap)) {
36632                                 ret = 0;
36633                                 break;
36634                         }
36635 @@ -1340,6 +1346,11 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
36636                       readl, (sts & DMA_GSTS_RTPS), sts);
36638         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
36640 +       iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
36641 +       if (sm_supported(iommu))
36642 +               qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
36643 +       iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
36646  void iommu_flush_write_buffer(struct intel_iommu *iommu)
36647 @@ -2289,6 +2300,41 @@ static inline int hardware_largepage_caps(struct dmar_domain *domain,
36648         return level;
36652 + * Ensure that old small page tables are removed to make room for superpage(s).
36653 + * We're going to add new large pages, so make sure we don't remove their parent
36654 + * tables. The IOTLB/devTLBs should be flushed if any PDE/PTEs are cleared.
36655 + */
36656 +static void switch_to_super_page(struct dmar_domain *domain,
36657 +                                unsigned long start_pfn,
36658 +                                unsigned long end_pfn, int level)
36660 +       unsigned long lvl_pages = lvl_to_nr_pages(level);
36661 +       struct dma_pte *pte = NULL;
36662 +       int i;
36664 +       while (start_pfn <= end_pfn) {
36665 +               if (!pte)
36666 +                       pte = pfn_to_dma_pte(domain, start_pfn, &level);
36668 +               if (dma_pte_present(pte)) {
36669 +                       dma_pte_free_pagetable(domain, start_pfn,
36670 +                                              start_pfn + lvl_pages - 1,
36671 +                                              level + 1);
36673 +                       for_each_domain_iommu(i, domain)
36674 +                               iommu_flush_iotlb_psi(g_iommus[i], domain,
36675 +                                                     start_pfn, lvl_pages,
36676 +                                                     0, 0);
36677 +               }
36679 +               pte++;
36680 +               start_pfn += lvl_pages;
36681 +               if (first_pte_in_page(pte))
36682 +                       pte = NULL;
36683 +       }
36686  static int
36687  __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
36688                  unsigned long phys_pfn, unsigned long nr_pages, int prot)
36689 @@ -2305,8 +2351,9 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
36690                 return -EINVAL;
36692         attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
36693 +       attr |= DMA_FL_PTE_PRESENT;
36694         if (domain_use_first_level(domain)) {
36695 -               attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
36696 +               attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
36698                 if (domain->domain.type == IOMMU_DOMAIN_DMA) {
36699                         attr |= DMA_FL_PTE_ACCESS;
36700 @@ -2329,22 +2376,11 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
36701                                 return -ENOMEM;
36702                         /* It is large page*/
36703                         if (largepage_lvl > 1) {
36704 -                               unsigned long nr_superpages, end_pfn;
36705 +                               unsigned long end_pfn;
36707                                 pteval |= DMA_PTE_LARGE_PAGE;
36708 -                               lvl_pages = lvl_to_nr_pages(largepage_lvl);
36710 -                               nr_superpages = nr_pages / lvl_pages;
36711 -                               end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
36713 -                               /*
36714 -                                * Ensure that old small page tables are
36715 -                                * removed to make room for superpage(s).
36716 -                                * We're adding new large pages, so make sure
36717 -                                * we don't remove their parent tables.
36718 -                                */
36719 -                               dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
36720 -                                                      largepage_lvl + 1);
36721 +                               end_pfn = ((iov_pfn + nr_pages) & level_mask(largepage_lvl)) - 1;
36722 +                               switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
36723                         } else {
36724                                 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
36725                         }
36726 @@ -2422,6 +2458,10 @@ static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn
36727                                    (((u16)bus) << 8) | devfn,
36728                                    DMA_CCMD_MASK_NOBIT,
36729                                    DMA_CCMD_DEVICE_INVL);
36731 +       if (sm_supported(iommu))
36732 +               qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0);
36734         iommu->flush.flush_iotlb(iommu,
36735                                  did_old,
36736                                  0,
36737 @@ -2505,6 +2545,9 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
36739         flags |= (level == 5) ? PASID_FLAG_FL5LP : 0;
36741 +       if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
36742 +               flags |= PASID_FLAG_PAGE_SNOOP;
36744         return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
36745                                              domain->iommu_did[iommu->seq_id],
36746                                              flags);
36747 @@ -3267,8 +3310,6 @@ static int __init init_dmars(void)
36748                 register_pasid_allocator(iommu);
36749  #endif
36750                 iommu_set_root_entry(iommu);
36751 -               iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
36752 -               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
36753         }
36755  #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
36756 @@ -3458,12 +3499,7 @@ static int init_iommu_hw(void)
36757                 }
36759                 iommu_flush_write_buffer(iommu);
36761                 iommu_set_root_entry(iommu);
36763 -               iommu->flush.flush_context(iommu, 0, 0, 0,
36764 -                                          DMA_CCMD_GLOBAL_INVL);
36765 -               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
36766                 iommu_enable_translation(iommu);
36767                 iommu_disable_protect_mem_regions(iommu);
36768         }
36769 @@ -3846,8 +3882,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
36770                 goto disable_iommu;
36772         iommu_set_root_entry(iommu);
36773 -       iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
36774 -       iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
36775         iommu_enable_translation(iommu);
36777         iommu_disable_protect_mem_regions(iommu);
36778 diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
36779 index 611ef5243cb6..5c16ebe037a1 100644
36780 --- a/drivers/iommu/intel/irq_remapping.c
36781 +++ b/drivers/iommu/intel/irq_remapping.c
36782 @@ -736,7 +736,7 @@ static int __init intel_prepare_irq_remapping(void)
36783                 return -ENODEV;
36785         if (intel_cap_audit(CAP_AUDIT_STATIC_IRQR, NULL))
36786 -               goto error;
36787 +               return -ENODEV;
36789         if (!dmar_ir_support())
36790                 return -ENODEV;
36791 diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
36792 index f26cb6195b2c..5093d317ff1a 100644
36793 --- a/drivers/iommu/intel/pasid.c
36794 +++ b/drivers/iommu/intel/pasid.c
36795 @@ -411,6 +411,16 @@ static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
36796         pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
36800 + * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
36801 + * PASID entry.
36802 + */
36803 +static inline void
36804 +pasid_set_pgsnp(struct pasid_entry *pe)
36806 +       pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
36809  /*
36810   * Setup the First Level Page table Pointer field (Bit 140~191)
36811   * of a scalable mode PASID entry.
36812 @@ -565,6 +575,9 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
36813                 }
36814         }
36816 +       if (flags & PASID_FLAG_PAGE_SNOOP)
36817 +               pasid_set_pgsnp(pte);
36819         pasid_set_domain_id(pte, did);
36820         pasid_set_address_width(pte, iommu->agaw);
36821         pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
36822 @@ -643,6 +656,9 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
36823         pasid_set_fault_enable(pte);
36824         pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
36826 +       if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
36827 +               pasid_set_pgsnp(pte);
36829         /*
36830          * Since it is a second level only translation setup, we should
36831          * set SRE bit as well (addresses are expected to be GPAs).
36832 diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
36833 index 444c0bec221a..086ebd697319 100644
36834 --- a/drivers/iommu/intel/pasid.h
36835 +++ b/drivers/iommu/intel/pasid.h
36836 @@ -48,6 +48,7 @@
36837   */
36838  #define PASID_FLAG_SUPERVISOR_MODE     BIT(0)
36839  #define PASID_FLAG_NESTED              BIT(1)
36840 +#define PASID_FLAG_PAGE_SNOOP          BIT(2)
36842  /*
36843   * The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first-
36844 diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
36845 index 574a7e657a9a..ecb6314fdd5c 100644
36846 --- a/drivers/iommu/intel/svm.c
36847 +++ b/drivers/iommu/intel/svm.c
36848 @@ -862,7 +862,7 @@ intel_svm_prq_report(struct device *dev, struct page_req_dsc *desc)
36849         /* Fill in event data for device specific processing */
36850         memset(&event, 0, sizeof(struct iommu_fault_event));
36851         event.fault.type = IOMMU_FAULT_PAGE_REQ;
36852 -       event.fault.prm.addr = desc->addr;
36853 +       event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT;
36854         event.fault.prm.pasid = desc->pasid;
36855         event.fault.prm.grpid = desc->prg_index;
36856         event.fault.prm.perm = prq_to_iommu_prot(desc);
36857 @@ -920,7 +920,17 @@ static irqreturn_t prq_event_thread(int irq, void *d)
36858                                ((unsigned long long *)req)[1]);
36859                         goto no_pasid;
36860                 }
36862 +               /* We shall not receive page request for supervisor SVM */
36863 +               if (req->pm_req && (req->rd_req | req->wr_req)) {
36864 +                       pr_err("Unexpected page request in Privilege Mode");
36865 +                       /* No need to find the matching sdev as for bad_req */
36866 +                       goto no_pasid;
36867 +               }
36868 +               /* DMA read with exec requeset is not supported. */
36869 +               if (req->exe_req && req->rd_req) {
36870 +                       pr_err("Execution request not supported\n");
36871 +                       goto no_pasid;
36872 +               }
36873                 if (!svm || svm->pasid != req->pasid) {
36874                         rcu_read_lock();
36875                         svm = ioasid_find(NULL, req->pasid, NULL);
36876 @@ -1021,12 +1031,12 @@ static irqreturn_t prq_event_thread(int irq, void *d)
36877                                 QI_PGRP_RESP_TYPE;
36878                         resp.qw1 = QI_PGRP_IDX(req->prg_index) |
36879                                 QI_PGRP_LPIG(req->lpig);
36880 +                       resp.qw2 = 0;
36881 +                       resp.qw3 = 0;
36883                         if (req->priv_data_present)
36884                                 memcpy(&resp.qw2, req->priv_data,
36885                                        sizeof(req->priv_data));
36886 -                       resp.qw2 = 0;
36887 -                       resp.qw3 = 0;
36888                         qi_submit_sync(iommu, &resp, 1, 0);
36889                 }
36890  prq_advance:
36891 diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
36892 index d0b0a15dba84..e10cfa99057c 100644
36893 --- a/drivers/iommu/iommu.c
36894 +++ b/drivers/iommu/iommu.c
36895 @@ -2878,10 +2878,12 @@ EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
36896   */
36897  int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
36899 -       const struct iommu_ops *ops = dev->bus->iommu_ops;
36900 +       if (dev->iommu && dev->iommu->iommu_dev) {
36901 +               const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
36903 -       if (ops && ops->dev_enable_feat)
36904 -               return ops->dev_enable_feat(dev, feat);
36905 +               if (ops->dev_enable_feat)
36906 +                       return ops->dev_enable_feat(dev, feat);
36907 +       }
36909         return -ENODEV;
36911 @@ -2894,10 +2896,12 @@ EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
36912   */
36913  int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
36915 -       const struct iommu_ops *ops = dev->bus->iommu_ops;
36916 +       if (dev->iommu && dev->iommu->iommu_dev) {
36917 +               const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
36919 -       if (ops && ops->dev_disable_feat)
36920 -               return ops->dev_disable_feat(dev, feat);
36921 +               if (ops->dev_disable_feat)
36922 +                       return ops->dev_disable_feat(dev, feat);
36923 +       }
36925         return -EBUSY;
36927 @@ -2905,10 +2909,12 @@ EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
36929  bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
36931 -       const struct iommu_ops *ops = dev->bus->iommu_ops;
36932 +       if (dev->iommu && dev->iommu->iommu_dev) {
36933 +               const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
36935 -       if (ops && ops->dev_feat_enabled)
36936 -               return ops->dev_feat_enabled(dev, feat);
36937 +               if (ops->dev_feat_enabled)
36938 +                       return ops->dev_feat_enabled(dev, feat);
36939 +       }
36941         return false;
36943 diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
36944 index 6ecc007f07cd..e168a682806a 100644
36945 --- a/drivers/iommu/mtk_iommu.c
36946 +++ b/drivers/iommu/mtk_iommu.c
36947 @@ -688,13 +688,6 @@ static const struct iommu_ops mtk_iommu_ops = {
36948  static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
36950         u32 regval;
36951 -       int ret;
36953 -       ret = clk_prepare_enable(data->bclk);
36954 -       if (ret) {
36955 -               dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
36956 -               return ret;
36957 -       }
36959         if (data->plat_data->m4u_plat == M4U_MT8173) {
36960                 regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
36961 @@ -760,7 +753,6 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
36962         if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
36963                              dev_name(data->dev), (void *)data)) {
36964                 writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
36965 -               clk_disable_unprepare(data->bclk);
36966                 dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
36967                 return -ENODEV;
36968         }
36969 @@ -977,14 +969,19 @@ static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev)
36970         void __iomem *base = data->base;
36971         int ret;
36973 -       /* Avoid first resume to affect the default value of registers below. */
36974 -       if (!m4u_dom)
36975 -               return 0;
36976         ret = clk_prepare_enable(data->bclk);
36977         if (ret) {
36978                 dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
36979                 return ret;
36980         }
36982 +       /*
36983 +        * Uppon first resume, only enable the clk and return, since the values of the
36984 +        * registers are not yet set.
36985 +        */
36986 +       if (!m4u_dom)
36987 +               return 0;
36989         writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL);
36990         writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL);
36991         writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
36992 diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
36993 index 563a9b366294..e81e89a81cb5 100644
36994 --- a/drivers/irqchip/irq-gic-v3-mbi.c
36995 +++ b/drivers/irqchip/irq-gic-v3-mbi.c
36996 @@ -303,7 +303,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent)
36997         reg = of_get_property(np, "mbi-alias", NULL);
36998         if (reg) {
36999                 mbi_phys_base = of_translate_address(np, reg);
37000 -               if (mbi_phys_base == OF_BAD_ADDR) {
37001 +               if (mbi_phys_base == (phys_addr_t)OF_BAD_ADDR) {
37002                         ret = -ENXIO;
37003                         goto err_free_mbi;
37004                 }
37005 diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
37006 index eb0ee356a629..00404024d7cd 100644
37007 --- a/drivers/irqchip/irq-gic-v3.c
37008 +++ b/drivers/irqchip/irq-gic-v3.c
37009 @@ -648,6 +648,10 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
37011         irqnr = gic_read_iar();
37013 +       /* Check for special IDs first */
37014 +       if ((irqnr >= 1020 && irqnr <= 1023))
37015 +               return;
37017         if (gic_supports_nmi() &&
37018             unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) {
37019                 gic_handle_nmi(irqnr, regs);
37020 @@ -659,10 +663,6 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
37021                 gic_arch_enable_irqs();
37022         }
37024 -       /* Check for special IDs first */
37025 -       if ((irqnr >= 1020 && irqnr <= 1023))
37026 -               return;
37028         if (static_branch_likely(&supports_deactivate_key))
37029                 gic_write_eoir(irqnr);
37030         else
37031 diff --git a/drivers/leds/blink/Kconfig b/drivers/leds/blink/Kconfig
37032 index 265b53476a80..6dedc58c47b3 100644
37033 --- a/drivers/leds/blink/Kconfig
37034 +++ b/drivers/leds/blink/Kconfig
37035 @@ -9,6 +9,7 @@ if LEDS_BLINK
37037  config LEDS_BLINK_LGM
37038         tristate "LED support for Intel LGM SoC series"
37039 +       depends on GPIOLIB
37040         depends on LEDS_CLASS
37041         depends on MFD_SYSCON
37042         depends on OF
37043 diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
37044 index fc433e63b1dc..b1590cb4a188 100644
37045 --- a/drivers/leds/leds-lp5523.c
37046 +++ b/drivers/leds/leds-lp5523.c
37047 @@ -307,7 +307,7 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
37048         usleep_range(3000, 6000);
37049         ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
37050         if (ret)
37051 -               return ret;
37052 +               goto out;
37053         status &= LP5523_ENG_STATUS_MASK;
37055         if (status != LP5523_ENG_STATUS_MASK) {
37056 diff --git a/drivers/mailbox/sprd-mailbox.c b/drivers/mailbox/sprd-mailbox.c
37057 index 4c325301a2fe..94d9067dc8d0 100644
37058 --- a/drivers/mailbox/sprd-mailbox.c
37059 +++ b/drivers/mailbox/sprd-mailbox.c
37060 @@ -60,6 +60,8 @@ struct sprd_mbox_priv {
37061         struct clk              *clk;
37062         u32                     outbox_fifo_depth;
37064 +       struct mutex            lock;
37065 +       u32                     refcnt;
37066         struct mbox_chan        chan[SPRD_MBOX_CHAN_MAX];
37067  };
37069 @@ -115,7 +117,11 @@ static irqreturn_t sprd_mbox_outbox_isr(int irq, void *data)
37070                 id = readl(priv->outbox_base + SPRD_MBOX_ID);
37072                 chan = &priv->chan[id];
37073 -               mbox_chan_received_data(chan, (void *)msg);
37074 +               if (chan->cl)
37075 +                       mbox_chan_received_data(chan, (void *)msg);
37076 +               else
37077 +                       dev_warn_ratelimited(priv->dev,
37078 +                                   "message's been dropped at ch[%d]\n", id);
37080                 /* Trigger to update outbox FIFO pointer */
37081                 writel(0x1, priv->outbox_base + SPRD_MBOX_TRIGGER);
37082 @@ -215,18 +221,22 @@ static int sprd_mbox_startup(struct mbox_chan *chan)
37083         struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
37084         u32 val;
37086 -       /* Select outbox FIFO mode and reset the outbox FIFO status */
37087 -       writel(0x0, priv->outbox_base + SPRD_MBOX_FIFO_RST);
37088 +       mutex_lock(&priv->lock);
37089 +       if (priv->refcnt++ == 0) {
37090 +               /* Select outbox FIFO mode and reset the outbox FIFO status */
37091 +               writel(0x0, priv->outbox_base + SPRD_MBOX_FIFO_RST);
37093 -       /* Enable inbox FIFO overflow and delivery interrupt */
37094 -       val = readl(priv->inbox_base + SPRD_MBOX_IRQ_MSK);
37095 -       val &= ~(SPRD_INBOX_FIFO_OVERFLOW_IRQ | SPRD_INBOX_FIFO_DELIVER_IRQ);
37096 -       writel(val, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
37097 +               /* Enable inbox FIFO overflow and delivery interrupt */
37098 +               val = readl(priv->inbox_base + SPRD_MBOX_IRQ_MSK);
37099 +               val &= ~(SPRD_INBOX_FIFO_OVERFLOW_IRQ | SPRD_INBOX_FIFO_DELIVER_IRQ);
37100 +               writel(val, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
37102 -       /* Enable outbox FIFO not empty interrupt */
37103 -       val = readl(priv->outbox_base + SPRD_MBOX_IRQ_MSK);
37104 -       val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ;
37105 -       writel(val, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
37106 +               /* Enable outbox FIFO not empty interrupt */
37107 +               val = readl(priv->outbox_base + SPRD_MBOX_IRQ_MSK);
37108 +               val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ;
37109 +               writel(val, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
37110 +       }
37111 +       mutex_unlock(&priv->lock);
37113         return 0;
37115 @@ -235,9 +245,13 @@ static void sprd_mbox_shutdown(struct mbox_chan *chan)
37117         struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
37119 -       /* Disable inbox & outbox interrupt */
37120 -       writel(SPRD_INBOX_FIFO_IRQ_MASK, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
37121 -       writel(SPRD_OUTBOX_FIFO_IRQ_MASK, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
37122 +       mutex_lock(&priv->lock);
37123 +       if (--priv->refcnt == 0) {
37124 +               /* Disable inbox & outbox interrupt */
37125 +               writel(SPRD_INBOX_FIFO_IRQ_MASK, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
37126 +               writel(SPRD_OUTBOX_FIFO_IRQ_MASK, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
37127 +       }
37128 +       mutex_unlock(&priv->lock);
37131  static const struct mbox_chan_ops sprd_mbox_ops = {
37132 @@ -266,6 +280,7 @@ static int sprd_mbox_probe(struct platform_device *pdev)
37133                 return -ENOMEM;
37135         priv->dev = dev;
37136 +       mutex_init(&priv->lock);
37138         /*
37139          * The Spreadtrum mailbox uses an inbox to send messages to the target
37140 diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
37141 index 82d4e0880a99..4fb635c0baa0 100644
37142 --- a/drivers/md/bcache/writeback.c
37143 +++ b/drivers/md/bcache/writeback.c
37144 @@ -110,13 +110,13 @@ static void __update_writeback_rate(struct cached_dev *dc)
37145                 int64_t fps;
37147                 if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID) {
37148 -                       fp_term = dc->writeback_rate_fp_term_low *
37149 +                       fp_term = (int64_t)dc->writeback_rate_fp_term_low *
37150                         (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW);
37151                 } else if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH) {
37152 -                       fp_term = dc->writeback_rate_fp_term_mid *
37153 +                       fp_term = (int64_t)dc->writeback_rate_fp_term_mid *
37154                         (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID);
37155                 } else {
37156 -                       fp_term = dc->writeback_rate_fp_term_high *
37157 +                       fp_term = (int64_t)dc->writeback_rate_fp_term_high *
37158                         (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH);
37159                 }
37160                 fps = div_s64(dirty, dirty_buckets) * fp_term;
37161 diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
37162 index 46b5d542b8fe..362c887d33b3 100644
37163 --- a/drivers/md/dm-integrity.c
37164 +++ b/drivers/md/dm-integrity.c
37165 @@ -4039,6 +4039,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
37166                         if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
37167                                 r = -EINVAL;
37168                                 ti->error = "Invalid bitmap_flush_interval argument";
37169 +                               goto bad;
37170                         }
37171                         ic->bitmap_flush_interval = msecs_to_jiffies(val);
37172                 } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
37173 diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
37174 index cab12b2251ba..91461b6904c1 100644
37175 --- a/drivers/md/dm-raid.c
37176 +++ b/drivers/md/dm-raid.c
37177 @@ -1868,6 +1868,14 @@ static bool rs_takeover_requested(struct raid_set *rs)
37178         return rs->md.new_level != rs->md.level;
37181 +/* True if layout is set to reshape. */
37182 +static bool rs_is_layout_change(struct raid_set *rs, bool use_mddev)
37184 +       return (use_mddev ? rs->md.delta_disks : rs->delta_disks) ||
37185 +              rs->md.new_layout != rs->md.layout ||
37186 +              rs->md.new_chunk_sectors != rs->md.chunk_sectors;
37189  /* True if @rs is requested to reshape by ctr */
37190  static bool rs_reshape_requested(struct raid_set *rs)
37192 @@ -1880,9 +1888,7 @@ static bool rs_reshape_requested(struct raid_set *rs)
37193         if (rs_is_raid0(rs))
37194                 return false;
37196 -       change = mddev->new_layout != mddev->layout ||
37197 -                mddev->new_chunk_sectors != mddev->chunk_sectors ||
37198 -                rs->delta_disks;
37199 +       change = rs_is_layout_change(rs, false);
37201         /* Historical case to support raid1 reshape without delta disks */
37202         if (rs_is_raid1(rs)) {
37203 @@ -2817,7 +2823,7 @@ static sector_t _get_reshape_sectors(struct raid_set *rs)
37206  /*
37207 - *
37208 + * Reshape:
37209   * - change raid layout
37210   * - change chunk size
37211   * - add disks
37212 @@ -2926,6 +2932,20 @@ static int rs_setup_reshape(struct raid_set *rs)
37213         return r;
37217 + * If the md resync thread has updated superblock with max reshape position
37218 + * at the end of a reshape but not (yet) reset the layout configuration
37219 + * changes -> reset the latter.
37220 + */
37221 +static void rs_reset_inconclusive_reshape(struct raid_set *rs)
37223 +       if (!rs_is_reshaping(rs) && rs_is_layout_change(rs, true)) {
37224 +               rs_set_cur(rs);
37225 +               rs->md.delta_disks = 0;
37226 +               rs->md.reshape_backwards = 0;
37227 +       }
37230  /*
37231   * Enable/disable discard support on RAID set depending on
37232   * RAID level and discard properties of underlying RAID members.
37233 @@ -3212,11 +3232,14 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
37234         if (r)
37235                 goto bad;
37237 +       /* Catch any inconclusive reshape superblock content. */
37238 +       rs_reset_inconclusive_reshape(rs);
37240         /* Start raid set read-only and assumed clean to change in raid_resume() */
37241         rs->md.ro = 1;
37242         rs->md.in_sync = 1;
37244 -       /* Keep array frozen */
37245 +       /* Keep array frozen until resume. */
37246         set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
37248         /* Has to be held on running the array */
37249 @@ -3230,7 +3253,6 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
37250         }
37252         r = md_start(&rs->md);
37254         if (r) {
37255                 ti->error = "Failed to start raid array";
37256                 mddev_unlock(&rs->md);
37257 diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
37258 index 13b4385f4d5a..9c3bc3711b33 100644
37259 --- a/drivers/md/dm-rq.c
37260 +++ b/drivers/md/dm-rq.c
37261 @@ -569,6 +569,7 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
37262         blk_mq_free_tag_set(md->tag_set);
37263  out_kfree_tag_set:
37264         kfree(md->tag_set);
37265 +       md->tag_set = NULL;
37267         return err;
37269 @@ -578,6 +579,7 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md)
37270         if (md->tag_set) {
37271                 blk_mq_free_tag_set(md->tag_set);
37272                 kfree(md->tag_set);
37273 +               md->tag_set = NULL;
37274         }
37277 diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
37278 index 11890db71f3f..962f7df0691e 100644
37279 --- a/drivers/md/dm-snap.c
37280 +++ b/drivers/md/dm-snap.c
37281 @@ -1408,6 +1408,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
37283         if (!s->store->chunk_size) {
37284                 ti->error = "Chunk size not set";
37285 +               r = -EINVAL;
37286                 goto bad_read_metadata;
37287         }
37289 diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
37290 index 200c5d0f08bf..ea3130e11680 100644
37291 --- a/drivers/md/md-bitmap.c
37292 +++ b/drivers/md/md-bitmap.c
37293 @@ -1722,6 +1722,8 @@ void md_bitmap_flush(struct mddev *mddev)
37294         md_bitmap_daemon_work(mddev);
37295         bitmap->daemon_lastrun -= sleep;
37296         md_bitmap_daemon_work(mddev);
37297 +       if (mddev->bitmap_info.external)
37298 +               md_super_wait(mddev);
37299         md_bitmap_update_sb(bitmap);
37302 diff --git a/drivers/md/md.c b/drivers/md/md.c
37303 index 21da0c48f6c2..2a9553efc2d1 100644
37304 --- a/drivers/md/md.c
37305 +++ b/drivers/md/md.c
37306 @@ -734,7 +734,34 @@ void mddev_init(struct mddev *mddev)
37308  EXPORT_SYMBOL_GPL(mddev_init);
37310 +static struct mddev *mddev_find_locked(dev_t unit)
37312 +       struct mddev *mddev;
37314 +       list_for_each_entry(mddev, &all_mddevs, all_mddevs)
37315 +               if (mddev->unit == unit)
37316 +                       return mddev;
37318 +       return NULL;
37321  static struct mddev *mddev_find(dev_t unit)
37323 +       struct mddev *mddev;
37325 +       if (MAJOR(unit) != MD_MAJOR)
37326 +               unit &= ~((1 << MdpMinorShift) - 1);
37328 +       spin_lock(&all_mddevs_lock);
37329 +       mddev = mddev_find_locked(unit);
37330 +       if (mddev)
37331 +               mddev_get(mddev);
37332 +       spin_unlock(&all_mddevs_lock);
37334 +       return mddev;
37337 +static struct mddev *mddev_find_or_alloc(dev_t unit)
37339         struct mddev *mddev, *new = NULL;
37341 @@ -745,13 +772,13 @@ static struct mddev *mddev_find(dev_t unit)
37342         spin_lock(&all_mddevs_lock);
37344         if (unit) {
37345 -               list_for_each_entry(mddev, &all_mddevs, all_mddevs)
37346 -                       if (mddev->unit == unit) {
37347 -                               mddev_get(mddev);
37348 -                               spin_unlock(&all_mddevs_lock);
37349 -                               kfree(new);
37350 -                               return mddev;
37351 -                       }
37352 +               mddev = mddev_find_locked(unit);
37353 +               if (mddev) {
37354 +                       mddev_get(mddev);
37355 +                       spin_unlock(&all_mddevs_lock);
37356 +                       kfree(new);
37357 +                       return mddev;
37358 +               }
37360                 if (new) {
37361                         list_add(&new->all_mddevs, &all_mddevs);
37362 @@ -777,12 +804,7 @@ static struct mddev *mddev_find(dev_t unit)
37363                                 return NULL;
37364                         }
37366 -                       is_free = 1;
37367 -                       list_for_each_entry(mddev, &all_mddevs, all_mddevs)
37368 -                               if (mddev->unit == dev) {
37369 -                                       is_free = 0;
37370 -                                       break;
37371 -                               }
37372 +                       is_free = !mddev_find_locked(dev);
37373                 }
37374                 new->unit = dev;
37375                 new->md_minor = MINOR(dev);
37376 @@ -5644,7 +5666,7 @@ static int md_alloc(dev_t dev, char *name)
37377          * writing to /sys/module/md_mod/parameters/new_array.
37378          */
37379         static DEFINE_MUTEX(disks_mutex);
37380 -       struct mddev *mddev = mddev_find(dev);
37381 +       struct mddev *mddev = mddev_find_or_alloc(dev);
37382         struct gendisk *disk;
37383         int partitioned;
37384         int shift;
37385 @@ -6524,11 +6546,9 @@ static void autorun_devices(int part)
37387                 md_probe(dev);
37388                 mddev = mddev_find(dev);
37389 -               if (!mddev || !mddev->gendisk) {
37390 -                       if (mddev)
37391 -                               mddev_put(mddev);
37392 +               if (!mddev)
37393                         break;
37394 -               }
37396                 if (mddev_lock(mddev))
37397                         pr_warn("md: %s locked, cannot run\n", mdname(mddev));
37398                 else if (mddev->raid_disks || mddev->major_version
37399 @@ -7821,8 +7841,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
37400                 /* Wait until bdev->bd_disk is definitely gone */
37401                 if (work_pending(&mddev->del_work))
37402                         flush_workqueue(md_misc_wq);
37403 -               /* Then retry the open from the top */
37404 -               return -ERESTARTSYS;
37405 +               return -EBUSY;
37406         }
37407         BUG_ON(mddev != bdev->bd_disk->private_data);
37409 @@ -8153,7 +8172,11 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos)
37410         loff_t l = *pos;
37411         struct mddev *mddev;
37413 -       if (l >= 0x10000)
37414 +       if (l == 0x10000) {
37415 +               ++*pos;
37416 +               return (void *)2;
37417 +       }
37418 +       if (l > 0x10000)
37419                 return NULL;
37420         if (!l--)
37421                 /* header */
37422 @@ -9251,11 +9274,11 @@ void md_check_recovery(struct mddev *mddev)
37423                 }
37425                 if (mddev_is_clustered(mddev)) {
37426 -                       struct md_rdev *rdev;
37427 +                       struct md_rdev *rdev, *tmp;
37428                         /* kick the device if another node issued a
37429                          * remove disk.
37430                          */
37431 -                       rdev_for_each(rdev, mddev) {
37432 +                       rdev_for_each_safe(rdev, tmp, mddev) {
37433                                 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
37434                                                 rdev->raid_disk < 0)
37435                                         md_kick_rdev_from_array(rdev);
37436 @@ -9569,7 +9592,7 @@ static int __init md_init(void)
37437  static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
37439         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
37440 -       struct md_rdev *rdev2;
37441 +       struct md_rdev *rdev2, *tmp;
37442         int role, ret;
37443         char b[BDEVNAME_SIZE];
37445 @@ -9586,7 +9609,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
37446         }
37448         /* Check for change of roles in the active devices */
37449 -       rdev_for_each(rdev2, mddev) {
37450 +       rdev_for_each_safe(rdev2, tmp, mddev) {
37451                 if (test_bit(Faulty, &rdev2->flags))
37452                         continue;
37454 diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
37455 index fe073d92f01e..70cfdea27efd 100644
37456 --- a/drivers/md/persistent-data/dm-btree-internal.h
37457 +++ b/drivers/md/persistent-data/dm-btree-internal.h
37458 @@ -34,12 +34,12 @@ struct node_header {
37459         __le32 max_entries;
37460         __le32 value_size;
37461         __le32 padding;
37462 -} __packed;
37463 +} __attribute__((packed, aligned(8)));
37465  struct btree_node {
37466         struct node_header header;
37467         __le64 keys[];
37468 -} __packed;
37469 +} __attribute__((packed, aligned(8)));
37472  /*
37473 diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
37474 index d8b4125e338c..a213bf11738f 100644
37475 --- a/drivers/md/persistent-data/dm-space-map-common.c
37476 +++ b/drivers/md/persistent-data/dm-space-map-common.c
37477 @@ -339,6 +339,8 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
37478          */
37479         begin = do_div(index_begin, ll->entries_per_block);
37480         end = do_div(end, ll->entries_per_block);
37481 +       if (end == 0)
37482 +               end = ll->entries_per_block;
37484         for (i = index_begin; i < index_end; i++, begin = 0) {
37485                 struct dm_block *blk;
37486 diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h
37487 index 8de63ce39bdd..87e17909ef52 100644
37488 --- a/drivers/md/persistent-data/dm-space-map-common.h
37489 +++ b/drivers/md/persistent-data/dm-space-map-common.h
37490 @@ -33,7 +33,7 @@ struct disk_index_entry {
37491         __le64 blocknr;
37492         __le32 nr_free;
37493         __le32 none_free_before;
37494 -} __packed;
37495 +} __attribute__ ((packed, aligned(8)));
37498  #define MAX_METADATA_BITMAPS 255
37499 @@ -43,7 +43,7 @@ struct disk_metadata_index {
37500         __le64 blocknr;
37502         struct disk_index_entry index[MAX_METADATA_BITMAPS];
37503 -} __packed;
37504 +} __attribute__ ((packed, aligned(8)));
37506  struct ll_disk;
37508 @@ -86,7 +86,7 @@ struct disk_sm_root {
37509         __le64 nr_allocated;
37510         __le64 bitmap_root;
37511         __le64 ref_count_root;
37512 -} __packed;
37513 +} __attribute__ ((packed, aligned(8)));
37515  #define ENTRIES_PER_BYTE 4
37517 @@ -94,7 +94,7 @@ struct disk_bitmap_header {
37518         __le32 csum;
37519         __le32 not_used;
37520         __le64 blocknr;
37521 -} __packed;
37522 +} __attribute__ ((packed, aligned(8)));
37524  enum allocation_event {
37525         SM_NONE,
37526 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
37527 index d2378765dc15..ced076ba560e 100644
37528 --- a/drivers/md/raid1.c
37529 +++ b/drivers/md/raid1.c
37530 @@ -478,6 +478,8 @@ static void raid1_end_write_request(struct bio *bio)
37531                 if (!test_bit(Faulty, &rdev->flags))
37532                         set_bit(R1BIO_WriteError, &r1_bio->state);
37533                 else {
37534 +                       /* Fail the request */
37535 +                       set_bit(R1BIO_Degraded, &r1_bio->state);
37536                         /* Finished with this branch */
37537                         r1_bio->bios[mirror] = NULL;
37538                         to_put = bio;
37539 diff --git a/drivers/media/common/saa7146/saa7146_core.c b/drivers/media/common/saa7146/saa7146_core.c
37540 index f2d13b71416c..e50fa0ff7c5d 100644
37541 --- a/drivers/media/common/saa7146/saa7146_core.c
37542 +++ b/drivers/media/common/saa7146/saa7146_core.c
37543 @@ -253,7 +253,7 @@ int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt
37544                          i, sg_dma_address(list), sg_dma_len(list),
37545                          list->offset);
37546  */
37547 -               for (p = 0; p * 4096 < list->length; p++, ptr++) {
37548 +               for (p = 0; p * 4096 < sg_dma_len(list); p++, ptr++) {
37549                         *ptr = cpu_to_le32(sg_dma_address(list) + p * 4096);
37550                         nr_pages++;
37551                 }
37552 diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c
37553 index 7b8795eca589..66215d9106a4 100644
37554 --- a/drivers/media/common/saa7146/saa7146_video.c
37555 +++ b/drivers/media/common/saa7146/saa7146_video.c
37556 @@ -247,9 +247,8 @@ static int saa7146_pgtable_build(struct saa7146_dev *dev, struct saa7146_buf *bu
37558                 /* walk all pages, copy all page addresses to ptr1 */
37559                 for (i = 0; i < length; i++, list++) {
37560 -                       for (p = 0; p * 4096 < list->length; p++, ptr1++) {
37561 +                       for (p = 0; p * 4096 < sg_dma_len(list); p++, ptr1++)
37562                                 *ptr1 = cpu_to_le32(sg_dma_address(list) - list->offset);
37563 -                       }
37564                 }
37565  /*
37566                 ptr1 = pt1->cpu;
37567 diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
37568 index 5ff7bedee247..3862ddc86ec4 100644
37569 --- a/drivers/media/dvb-core/dvbdev.c
37570 +++ b/drivers/media/dvb-core/dvbdev.c
37571 @@ -241,6 +241,7 @@ static void dvb_media_device_free(struct dvb_device *dvbdev)
37573         if (dvbdev->adapter->conn) {
37574                 media_device_unregister_entity(dvbdev->adapter->conn);
37575 +               kfree(dvbdev->adapter->conn);
37576                 dvbdev->adapter->conn = NULL;
37577                 kfree(dvbdev->adapter->conn_pads);
37578                 dvbdev->adapter->conn_pads = NULL;
37579 diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
37580 index cfa4cdde99d8..02e8aa11e36e 100644
37581 --- a/drivers/media/dvb-frontends/m88ds3103.c
37582 +++ b/drivers/media/dvb-frontends/m88ds3103.c
37583 @@ -1904,8 +1904,8 @@ static int m88ds3103_probe(struct i2c_client *client,
37585                 dev->dt_client = i2c_new_dummy_device(client->adapter,
37586                                                       dev->dt_addr);
37587 -               if (!dev->dt_client) {
37588 -                       ret = -ENODEV;
37589 +               if (IS_ERR(dev->dt_client)) {
37590 +                       ret = PTR_ERR(dev->dt_client);
37591                         goto err_kfree;
37592                 }
37593         }
37594 diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
37595 index a3161d709015..ab7883cff8b2 100644
37596 --- a/drivers/media/i2c/adv7511-v4l2.c
37597 +++ b/drivers/media/i2c/adv7511-v4l2.c
37598 @@ -1964,7 +1964,7 @@ static int adv7511_remove(struct i2c_client *client)
37600         adv7511_set_isr(sd, false);
37601         adv7511_init_setup(sd);
37602 -       cancel_delayed_work(&state->edid_handler);
37603 +       cancel_delayed_work_sync(&state->edid_handler);
37604         i2c_unregister_device(state->i2c_edid);
37605         i2c_unregister_device(state->i2c_cec);
37606         i2c_unregister_device(state->i2c_pktmem);
37607 diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
37608 index 09004d928d11..d1f58795794f 100644
37609 --- a/drivers/media/i2c/adv7604.c
37610 +++ b/drivers/media/i2c/adv7604.c
37611 @@ -3616,7 +3616,7 @@ static int adv76xx_remove(struct i2c_client *client)
37612         io_write(sd, 0x6e, 0);
37613         io_write(sd, 0x73, 0);
37615 -       cancel_delayed_work(&state->delayed_work_enable_hotplug);
37616 +       cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
37617         v4l2_async_unregister_subdev(sd);
37618         media_entity_cleanup(&sd->entity);
37619         adv76xx_unregister_clients(to_state(sd));
37620 diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
37621 index 0855f648416d..f7d2b6cd3008 100644
37622 --- a/drivers/media/i2c/adv7842.c
37623 +++ b/drivers/media/i2c/adv7842.c
37624 @@ -3586,7 +3586,7 @@ static int adv7842_remove(struct i2c_client *client)
37625         struct adv7842_state *state = to_state(sd);
37627         adv7842_irq_enable(sd, false);
37628 -       cancel_delayed_work(&state->delayed_work_enable_hotplug);
37629 +       cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
37630         v4l2_device_unregister_subdev(sd);
37631         media_entity_cleanup(&sd->entity);
37632         adv7842_unregister_clients(sd);
37633 diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
37634 index 15afbb4f5b31..4505594996bd 100644
37635 --- a/drivers/media/i2c/ccs/ccs-core.c
37636 +++ b/drivers/media/i2c/ccs/ccs-core.c
37637 @@ -3522,11 +3522,11 @@ static int ccs_probe(struct i2c_client *client)
37638         sensor->pll.scale_n = CCS_LIM(sensor, SCALER_N_MIN);
37640         ccs_create_subdev(sensor, sensor->scaler, " scaler", 2,
37641 -                         MEDIA_ENT_F_CAM_SENSOR);
37642 +                         MEDIA_ENT_F_PROC_VIDEO_SCALER);
37643         ccs_create_subdev(sensor, sensor->binner, " binner", 2,
37644                           MEDIA_ENT_F_PROC_VIDEO_SCALER);
37645         ccs_create_subdev(sensor, sensor->pixel_array, " pixel_array", 1,
37646 -                         MEDIA_ENT_F_PROC_VIDEO_SCALER);
37647 +                         MEDIA_ENT_F_CAM_SENSOR);
37649         rval = ccs_init_controls(sensor);
37650         if (rval < 0)
37651 diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
37652 index 6e3382b85a90..49ba39418360 100644
37653 --- a/drivers/media/i2c/imx219.c
37654 +++ b/drivers/media/i2c/imx219.c
37655 @@ -1035,29 +1035,47 @@ static int imx219_start_streaming(struct imx219 *imx219)
37656         const struct imx219_reg_list *reg_list;
37657         int ret;
37659 +       ret = pm_runtime_get_sync(&client->dev);
37660 +       if (ret < 0) {
37661 +               pm_runtime_put_noidle(&client->dev);
37662 +               return ret;
37663 +       }
37665         /* Apply default values of current mode */
37666         reg_list = &imx219->mode->reg_list;
37667         ret = imx219_write_regs(imx219, reg_list->regs, reg_list->num_of_regs);
37668         if (ret) {
37669                 dev_err(&client->dev, "%s failed to set mode\n", __func__);
37670 -               return ret;
37671 +               goto err_rpm_put;
37672         }
37674         ret = imx219_set_framefmt(imx219);
37675         if (ret) {
37676                 dev_err(&client->dev, "%s failed to set frame format: %d\n",
37677                         __func__, ret);
37678 -               return ret;
37679 +               goto err_rpm_put;
37680         }
37682         /* Apply customized values from user */
37683         ret =  __v4l2_ctrl_handler_setup(imx219->sd.ctrl_handler);
37684         if (ret)
37685 -               return ret;
37686 +               goto err_rpm_put;
37688         /* set stream on register */
37689 -       return imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
37690 -                               IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
37691 +       ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
37692 +                              IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
37693 +       if (ret)
37694 +               goto err_rpm_put;
37696 +       /* vflip and hflip cannot change during streaming */
37697 +       __v4l2_ctrl_grab(imx219->vflip, true);
37698 +       __v4l2_ctrl_grab(imx219->hflip, true);
37700 +       return 0;
37702 +err_rpm_put:
37703 +       pm_runtime_put(&client->dev);
37704 +       return ret;
37707  static void imx219_stop_streaming(struct imx219 *imx219)
37708 @@ -1070,12 +1088,16 @@ static void imx219_stop_streaming(struct imx219 *imx219)
37709                                IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY);
37710         if (ret)
37711                 dev_err(&client->dev, "%s failed to set stream\n", __func__);
37713 +       __v4l2_ctrl_grab(imx219->vflip, false);
37714 +       __v4l2_ctrl_grab(imx219->hflip, false);
37716 +       pm_runtime_put(&client->dev);
37719  static int imx219_set_stream(struct v4l2_subdev *sd, int enable)
37721         struct imx219 *imx219 = to_imx219(sd);
37722 -       struct i2c_client *client = v4l2_get_subdevdata(sd);
37723         int ret = 0;
37725         mutex_lock(&imx219->mutex);
37726 @@ -1085,36 +1107,23 @@ static int imx219_set_stream(struct v4l2_subdev *sd, int enable)
37727         }
37729         if (enable) {
37730 -               ret = pm_runtime_get_sync(&client->dev);
37731 -               if (ret < 0) {
37732 -                       pm_runtime_put_noidle(&client->dev);
37733 -                       goto err_unlock;
37734 -               }
37736                 /*
37737                  * Apply default & customized values
37738                  * and then start streaming.
37739                  */
37740                 ret = imx219_start_streaming(imx219);
37741                 if (ret)
37742 -                       goto err_rpm_put;
37743 +                       goto err_unlock;
37744         } else {
37745                 imx219_stop_streaming(imx219);
37746 -               pm_runtime_put(&client->dev);
37747         }
37749         imx219->streaming = enable;
37751 -       /* vflip and hflip cannot change during streaming */
37752 -       __v4l2_ctrl_grab(imx219->vflip, enable);
37753 -       __v4l2_ctrl_grab(imx219->hflip, enable);
37755         mutex_unlock(&imx219->mutex);
37757         return ret;
37759 -err_rpm_put:
37760 -       pm_runtime_put(&client->dev);
37761  err_unlock:
37762         mutex_unlock(&imx219->mutex);
37764 diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
37765 index 39530d43590e..a7caf2eb5771 100644
37766 --- a/drivers/media/i2c/msp3400-driver.c
37767 +++ b/drivers/media/i2c/msp3400-driver.c
37768 @@ -170,7 +170,7 @@ static int msp_read(struct i2c_client *client, int dev, int addr)
37769                         break;
37770                 dev_warn(&client->dev, "I/O error #%d (read 0x%02x/0x%02x)\n", err,
37771                        dev, addr);
37772 -               schedule_timeout_interruptible(msecs_to_jiffies(10));
37773 +               schedule_msec_hrtimeout_interruptible((10));
37774         }
37775         if (err == 3) {
37776                 dev_warn(&client->dev, "resetting chip, sound will go off.\n");
37777 @@ -211,7 +211,7 @@ static int msp_write(struct i2c_client *client, int dev, int addr, int val)
37778                         break;
37779                 dev_warn(&client->dev, "I/O error #%d (write 0x%02x/0x%02x)\n", err,
37780                        dev, addr);
37781 -               schedule_timeout_interruptible(msecs_to_jiffies(10));
37782 +               schedule_msec_hrtimeout_interruptible((10));
37783         }
37784         if (err == 3) {
37785                 dev_warn(&client->dev, "resetting chip, sound will go off.\n");
37786 diff --git a/drivers/media/i2c/rdacm21.c b/drivers/media/i2c/rdacm21.c
37787 index dcc21515e5a4..179d107f494c 100644
37788 --- a/drivers/media/i2c/rdacm21.c
37789 +++ b/drivers/media/i2c/rdacm21.c
37790 @@ -345,7 +345,7 @@ static int ov10640_initialize(struct rdacm21_device *dev)
37791         /* Read OV10640 ID to test communications. */
37792         ov490_write_reg(dev, OV490_SCCB_SLAVE0_DIR, OV490_SCCB_SLAVE_READ);
37793         ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_HIGH, OV10640_CHIP_ID >> 8);
37794 -       ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_LOW, (u8)OV10640_CHIP_ID);
37795 +       ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_LOW, OV10640_CHIP_ID & 0xff);
37797         /* Trigger SCCB slave transaction and give it some time to complete. */
37798         ov490_write_reg(dev, OV490_HOST_CMD, OV490_HOST_CMD_TRIGGER);
37799 diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
37800 index 831b5b54fd78..1b309bb743c7 100644
37801 --- a/drivers/media/i2c/tc358743.c
37802 +++ b/drivers/media/i2c/tc358743.c
37803 @@ -2193,7 +2193,7 @@ static int tc358743_remove(struct i2c_client *client)
37804                 del_timer_sync(&state->timer);
37805                 flush_work(&state->work_i2c_poll);
37806         }
37807 -       cancel_delayed_work(&state->delayed_work_enable_hotplug);
37808 +       cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
37809         cec_unregister_adapter(state->cec_adap);
37810         v4l2_async_unregister_subdev(sd);
37811         v4l2_device_unregister_subdev(sd);
37812 diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
37813 index a09bf0a39d05..89bb7e6dc7a4 100644
37814 --- a/drivers/media/i2c/tda1997x.c
37815 +++ b/drivers/media/i2c/tda1997x.c
37816 @@ -2804,7 +2804,7 @@ static int tda1997x_remove(struct i2c_client *client)
37817         media_entity_cleanup(&sd->entity);
37818         v4l2_ctrl_handler_free(&state->hdl);
37819         regulator_bulk_disable(TDA1997X_NUM_SUPPLIES, state->supplies);
37820 -       cancel_delayed_work(&state->delayed_work_enable_hpd);
37821 +       cancel_delayed_work_sync(&state->delayed_work_enable_hpd);
37822         mutex_destroy(&state->page_lock);
37823         mutex_destroy(&state->lock);
37825 diff --git a/drivers/media/pci/cx18/cx18-gpio.c b/drivers/media/pci/cx18/cx18-gpio.c
37826 index cf7cfda94107..f63e17489547 100644
37827 --- a/drivers/media/pci/cx18/cx18-gpio.c
37828 +++ b/drivers/media/pci/cx18/cx18-gpio.c
37829 @@ -81,11 +81,11 @@ static void gpio_reset_seq(struct cx18 *cx, u32 active_lo, u32 active_hi,
37831         /* Assert */
37832         gpio_update(cx, mask, ~active_lo);
37833 -       schedule_timeout_uninterruptible(msecs_to_jiffies(assert_msecs));
37834 +       schedule_msec_hrtimeout_uninterruptible((assert_msecs));
37836         /* Deassert */
37837         gpio_update(cx, mask, ~active_hi);
37838 -       schedule_timeout_uninterruptible(msecs_to_jiffies(recovery_msecs));
37839 +       schedule_msec_hrtimeout_uninterruptible((recovery_msecs));
37842  /*
37843 diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
37844 index 22f55a7840a6..d0ca260ecf70 100644
37845 --- a/drivers/media/pci/cx23885/cx23885-core.c
37846 +++ b/drivers/media/pci/cx23885/cx23885-core.c
37847 @@ -2077,6 +2077,15 @@ static struct {
37848          * 0x1423 is the PCI ID for the IOMMU found on Kaveri
37849          */
37850         { PCI_VENDOR_ID_AMD, 0x1423 },
37851 +       /* 0x1481 is the PCI ID for the IOMMU found on Starship/Matisse
37852 +        */
37853 +       { PCI_VENDOR_ID_AMD, 0x1481 },
37854 +       /* 0x1419 is the PCI ID for the IOMMU found on 15h (Models 10h-1fh) family
37855 +        */
37856 +       { PCI_VENDOR_ID_AMD, 0x1419 },
37857 +       /* 0x5a23 is the PCI ID for the IOMMU found on RD890S/RD990
37858 +        */
37859 +       { PCI_VENDOR_ID_ATI, 0x5a23 },
37860  };
37862  static bool cx23885_does_need_dma_reset(void)
37863 diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
37864 index 6e8c0c230e11..fecef85bd62e 100644
37865 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
37866 +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
37867 @@ -302,7 +302,7 @@ static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
37868         if (!q->sensor)
37869                 return -ENODEV;
37871 -       freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes);
37872 +       freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes * 2);
37873         if (freq < 0) {
37874                 dev_err(dev, "error %lld, invalid link_freq\n", freq);
37875                 return freq;
37876 diff --git a/drivers/media/pci/ivtv/ivtv-gpio.c b/drivers/media/pci/ivtv/ivtv-gpio.c
37877 index 856e7ab7f33e..766a26251337 100644
37878 --- a/drivers/media/pci/ivtv/ivtv-gpio.c
37879 +++ b/drivers/media/pci/ivtv/ivtv-gpio.c
37880 @@ -105,7 +105,7 @@ void ivtv_reset_ir_gpio(struct ivtv *itv)
37881         curout = (curout & ~0xF) | 1;
37882         write_reg(curout, IVTV_REG_GPIO_OUT);
37883         /* We could use something else for smaller time */
37884 -       schedule_timeout_interruptible(msecs_to_jiffies(1));
37885 +       schedule_msec_hrtimeout_interruptible((1));
37886         curout |= 2;
37887         write_reg(curout, IVTV_REG_GPIO_OUT);
37888         curdir &= ~0x80;
37889 @@ -125,11 +125,11 @@ int ivtv_reset_tuner_gpio(void *dev, int component, int cmd, int value)
37890         curout = read_reg(IVTV_REG_GPIO_OUT);
37891         curout &= ~(1 << itv->card->xceive_pin);
37892         write_reg(curout, IVTV_REG_GPIO_OUT);
37893 -       schedule_timeout_interruptible(msecs_to_jiffies(1));
37894 +       schedule_msec_hrtimeout_interruptible((1));
37896         curout |= 1 << itv->card->xceive_pin;
37897         write_reg(curout, IVTV_REG_GPIO_OUT);
37898 -       schedule_timeout_interruptible(msecs_to_jiffies(1));
37899 +       schedule_msec_hrtimeout_interruptible((1));
37900         return 0;
37903 diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
37904 index 35dccb31174c..8181cd65e876 100644
37905 --- a/drivers/media/pci/ivtv/ivtv-ioctl.c
37906 +++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
37907 @@ -1139,7 +1139,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id std)
37908                                 TASK_UNINTERRUPTIBLE);
37909                 if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
37910                         break;
37911 -               schedule_timeout(msecs_to_jiffies(25));
37912 +               schedule_msec_hrtimeout((25));
37913         }
37914         finish_wait(&itv->vsync_waitq, &wait);
37915         mutex_lock(&itv->serialize_lock);
37916 diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
37917 index f04ee84bab5f..c4469b4b8f99 100644
37918 --- a/drivers/media/pci/ivtv/ivtv-streams.c
37919 +++ b/drivers/media/pci/ivtv/ivtv-streams.c
37920 @@ -849,7 +849,7 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
37921                         while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) &&
37922                                 time_before(jiffies,
37923                                             then + msecs_to_jiffies(2000))) {
37924 -                               schedule_timeout(msecs_to_jiffies(10));
37925 +                               schedule_msec_hrtimeout((10));
37926                         }
37928                         /* To convert jiffies to ms, we must multiply by 1000
37929 diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
37930 index 391572a6ec76..efb757d5168a 100644
37931 --- a/drivers/media/pci/saa7134/saa7134-core.c
37932 +++ b/drivers/media/pci/saa7134/saa7134-core.c
37933 @@ -243,7 +243,7 @@ int saa7134_pgtable_build(struct pci_dev *pci, struct saa7134_pgtable *pt,
37935         ptr = pt->cpu + startpage;
37936         for (i = 0; i < length; i++, list = sg_next(list)) {
37937 -               for (p = 0; p * 4096 < list->length; p++, ptr++)
37938 +               for (p = 0; p * 4096 < sg_dma_len(list); p++, ptr++)
37939                         *ptr = cpu_to_le32(sg_dma_address(list) +
37940                                                 list->offset + p * 4096);
37941         }
37942 diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
37943 index 11e1eb6a6809..1d1d32e043f1 100644
37944 --- a/drivers/media/pci/saa7164/saa7164-encoder.c
37945 +++ b/drivers/media/pci/saa7164/saa7164-encoder.c
37946 @@ -1008,7 +1008,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
37947                 printk(KERN_ERR "%s() failed (errno = %d), NO PCI configuration\n",
37948                         __func__, result);
37949                 result = -ENOMEM;
37950 -               goto failed;
37951 +               goto fail_pci;
37952         }
37954         /* Establish encoder defaults here */
37955 @@ -1062,7 +1062,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
37956                           100000, ENCODER_DEF_BITRATE);
37957         if (hdl->error) {
37958                 result = hdl->error;
37959 -               goto failed;
37960 +               goto fail_hdl;
37961         }
37963         port->std = V4L2_STD_NTSC_M;
37964 @@ -1080,7 +1080,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
37965                 printk(KERN_INFO "%s: can't allocate mpeg device\n",
37966                         dev->name);
37967                 result = -ENOMEM;
37968 -               goto failed;
37969 +               goto fail_hdl;
37970         }
37972         port->v4l_device->ctrl_handler = hdl;
37973 @@ -1091,10 +1091,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
37974         if (result < 0) {
37975                 printk(KERN_INFO "%s: can't register mpeg device\n",
37976                         dev->name);
37977 -               /* TODO: We're going to leak here if we don't dealloc
37978 -                The buffers above. The unreg function can't deal wit it.
37979 -               */
37980 -               goto failed;
37981 +               goto fail_reg;
37982         }
37984         printk(KERN_INFO "%s: registered device video%d [mpeg]\n",
37985 @@ -1116,9 +1113,14 @@ int saa7164_encoder_register(struct saa7164_port *port)
37987         saa7164_api_set_encoder(port);
37988         saa7164_api_get_encoder(port);
37989 +       return 0;
37991 -       result = 0;
37992 -failed:
37993 +fail_reg:
37994 +       video_device_release(port->v4l_device);
37995 +       port->v4l_device = NULL;
37996 +fail_hdl:
37997 +       v4l2_ctrl_handler_free(hdl);
37998 +fail_pci:
37999         return result;
38002 diff --git a/drivers/media/pci/sta2x11/Kconfig b/drivers/media/pci/sta2x11/Kconfig
38003 index 4dd98f94a91e..27bb78513631 100644
38004 --- a/drivers/media/pci/sta2x11/Kconfig
38005 +++ b/drivers/media/pci/sta2x11/Kconfig
38006 @@ -3,6 +3,7 @@ config STA2X11_VIP
38007         tristate "STA2X11 VIP Video For Linux"
38008         depends on PCI && VIDEO_V4L2 && VIRT_TO_BUS && I2C
38009         depends on STA2X11 || COMPILE_TEST
38010 +       select GPIOLIB if MEDIA_SUBDRV_AUTOSELECT
38011         select VIDEO_ADV7180 if MEDIA_SUBDRV_AUTOSELECT
38012         select VIDEOBUF2_DMA_CONTIG
38013         select MEDIA_CONTROLLER
38014 diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
38015 index fd1831e97b22..1ddb5d6354cf 100644
38016 --- a/drivers/media/platform/Kconfig
38017 +++ b/drivers/media/platform/Kconfig
38018 @@ -244,6 +244,7 @@ config VIDEO_MEDIATEK_JPEG
38019         depends on MTK_IOMMU_V1 || MTK_IOMMU || COMPILE_TEST
38020         depends on VIDEO_DEV && VIDEO_V4L2
38021         depends on ARCH_MEDIATEK || COMPILE_TEST
38022 +       depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
38023         select VIDEOBUF2_DMA_CONTIG
38024         select V4L2_MEM2MEM_DEV
38025         help
38026 @@ -271,6 +272,7 @@ config VIDEO_MEDIATEK_MDP
38027         depends on MTK_IOMMU || COMPILE_TEST
38028         depends on VIDEO_DEV && VIDEO_V4L2
38029         depends on ARCH_MEDIATEK || COMPILE_TEST
38030 +       depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
38031         select VIDEOBUF2_DMA_CONTIG
38032         select V4L2_MEM2MEM_DEV
38033         select VIDEO_MEDIATEK_VPU
38034 @@ -291,6 +293,7 @@ config VIDEO_MEDIATEK_VCODEC
38035         # our dependencies, to avoid missing symbols during link.
38036         depends on VIDEO_MEDIATEK_VPU || !VIDEO_MEDIATEK_VPU
38037         depends on MTK_SCP || !MTK_SCP
38038 +       depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
38039         select VIDEOBUF2_DMA_CONTIG
38040         select V4L2_MEM2MEM_DEV
38041         select VIDEO_MEDIATEK_VCODEC_VPU if VIDEO_MEDIATEK_VPU
38042 diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
38043 index f2c4dadd6a0e..7bb6babdcade 100644
38044 --- a/drivers/media/platform/aspeed-video.c
38045 +++ b/drivers/media/platform/aspeed-video.c
38046 @@ -514,8 +514,8 @@ static void aspeed_video_off(struct aspeed_video *video)
38047         aspeed_video_write(video, VE_INTERRUPT_STATUS, 0xffffffff);
38049         /* Turn off the relevant clocks */
38050 -       clk_disable(video->vclk);
38051         clk_disable(video->eclk);
38052 +       clk_disable(video->vclk);
38054         clear_bit(VIDEO_CLOCKS_ON, &video->flags);
38056 @@ -526,8 +526,8 @@ static void aspeed_video_on(struct aspeed_video *video)
38057                 return;
38059         /* Turn on the relevant clocks */
38060 -       clk_enable(video->eclk);
38061         clk_enable(video->vclk);
38062 +       clk_enable(video->eclk);
38064         set_bit(VIDEO_CLOCKS_ON, &video->flags);
38066 @@ -1719,8 +1719,11 @@ static int aspeed_video_probe(struct platform_device *pdev)
38067                 return rc;
38069         rc = aspeed_video_setup_video(video);
38070 -       if (rc)
38071 +       if (rc) {
38072 +               clk_unprepare(video->vclk);
38073 +               clk_unprepare(video->eclk);
38074                 return rc;
38075 +       }
38077         return 0;
38079 diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
38080 index 995e95272e51..e600764dce96 100644
38081 --- a/drivers/media/platform/coda/coda-common.c
38082 +++ b/drivers/media/platform/coda/coda-common.c
38083 @@ -2062,7 +2062,9 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
38084         if (q_data_dst->fourcc == V4L2_PIX_FMT_JPEG)
38085                 ctx->params.gop_size = 1;
38086         ctx->gopcounter = ctx->params.gop_size - 1;
38087 -       v4l2_ctrl_s_ctrl(ctx->mb_err_cnt_ctrl, 0);
38088 +       /* Only decoders have this control */
38089 +       if (ctx->mb_err_cnt_ctrl)
38090 +               v4l2_ctrl_s_ctrl(ctx->mb_err_cnt_ctrl, 0);
38092         ret = ctx->ops->start_streaming(ctx);
38093         if (ctx->inst_type == CODA_INST_DECODER) {
38094 diff --git a/drivers/media/platform/meson/ge2d/ge2d.c b/drivers/media/platform/meson/ge2d/ge2d.c
38095 index 153612ca96fc..a1393fefa8ae 100644
38096 --- a/drivers/media/platform/meson/ge2d/ge2d.c
38097 +++ b/drivers/media/platform/meson/ge2d/ge2d.c
38098 @@ -757,7 +757,7 @@ static int ge2d_s_ctrl(struct v4l2_ctrl *ctrl)
38100                 if (ctrl->val == 90) {
38101                         ctx->hflip = 0;
38102 -                       ctx->vflip = 0;
38103 +                       ctx->vflip = 1;
38104                         ctx->xy_swap = 1;
38105                 } else if (ctrl->val == 180) {
38106                         ctx->hflip = 1;
38107 @@ -765,7 +765,7 @@ static int ge2d_s_ctrl(struct v4l2_ctrl *ctrl)
38108                         ctx->xy_swap = 0;
38109                 } else if (ctrl->val == 270) {
38110                         ctx->hflip = 1;
38111 -                       ctx->vflip = 1;
38112 +                       ctx->vflip = 0;
38113                         ctx->xy_swap = 1;
38114                 } else {
38115                         ctx->hflip = 0;
38116 diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
38117 index f9896c121fd8..ae374bb2a48f 100644
38118 --- a/drivers/media/platform/qcom/venus/core.c
38119 +++ b/drivers/media/platform/qcom/venus/core.c
38120 @@ -218,18 +218,17 @@ static int venus_probe(struct platform_device *pdev)
38121                 return -ENOMEM;
38123         core->dev = dev;
38124 -       platform_set_drvdata(pdev, core);
38126         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
38127         core->base = devm_ioremap_resource(dev, r);
38128         if (IS_ERR(core->base))
38129                 return PTR_ERR(core->base);
38131 -       core->video_path = of_icc_get(dev, "video-mem");
38132 +       core->video_path = devm_of_icc_get(dev, "video-mem");
38133         if (IS_ERR(core->video_path))
38134                 return PTR_ERR(core->video_path);
38136 -       core->cpucfg_path = of_icc_get(dev, "cpu-cfg");
38137 +       core->cpucfg_path = devm_of_icc_get(dev, "cpu-cfg");
38138         if (IS_ERR(core->cpucfg_path))
38139                 return PTR_ERR(core->cpucfg_path);
38141 @@ -248,7 +247,7 @@ static int venus_probe(struct platform_device *pdev)
38142                 return -ENODEV;
38144         if (core->pm_ops->core_get) {
38145 -               ret = core->pm_ops->core_get(dev);
38146 +               ret = core->pm_ops->core_get(core);
38147                 if (ret)
38148                         return ret;
38149         }
38150 @@ -273,6 +272,12 @@ static int venus_probe(struct platform_device *pdev)
38151         if (ret)
38152                 goto err_core_put;
38154 +       ret = v4l2_device_register(dev, &core->v4l2_dev);
38155 +       if (ret)
38156 +               goto err_core_deinit;
38158 +       platform_set_drvdata(pdev, core);
38160         pm_runtime_enable(dev);
38162         ret = pm_runtime_get_sync(dev);
38163 @@ -307,10 +312,6 @@ static int venus_probe(struct platform_device *pdev)
38164         if (ret)
38165                 goto err_venus_shutdown;
38167 -       ret = v4l2_device_register(dev, &core->v4l2_dev);
38168 -       if (ret)
38169 -               goto err_core_deinit;
38171         ret = pm_runtime_put_sync(dev);
38172         if (ret) {
38173                 pm_runtime_get_noresume(dev);
38174 @@ -323,8 +324,6 @@ static int venus_probe(struct platform_device *pdev)
38176  err_dev_unregister:
38177         v4l2_device_unregister(&core->v4l2_dev);
38178 -err_core_deinit:
38179 -       hfi_core_deinit(core, false);
38180  err_venus_shutdown:
38181         venus_shutdown(core);
38182  err_runtime_disable:
38183 @@ -332,9 +331,11 @@ static int venus_probe(struct platform_device *pdev)
38184         pm_runtime_set_suspended(dev);
38185         pm_runtime_disable(dev);
38186         hfi_destroy(core);
38187 +err_core_deinit:
38188 +       hfi_core_deinit(core, false);
38189  err_core_put:
38190         if (core->pm_ops->core_put)
38191 -               core->pm_ops->core_put(dev);
38192 +               core->pm_ops->core_put(core);
38193         return ret;
38196 @@ -360,14 +361,14 @@ static int venus_remove(struct platform_device *pdev)
38197         pm_runtime_disable(dev);
38199         if (pm_ops->core_put)
38200 -               pm_ops->core_put(dev);
38201 +               pm_ops->core_put(core);
38203 -       hfi_destroy(core);
38204 +       v4l2_device_unregister(&core->v4l2_dev);
38206 -       icc_put(core->video_path);
38207 -       icc_put(core->cpucfg_path);
38208 +       hfi_destroy(core);
38210         v4l2_device_unregister(&core->v4l2_dev);
38212         mutex_destroy(&core->pm_lock);
38213         mutex_destroy(&core->lock);
38214         venus_dbgfs_deinit(core);
38215 @@ -396,7 +397,7 @@ static __maybe_unused int venus_runtime_suspend(struct device *dev)
38216                 return ret;
38218         if (pm_ops->core_power) {
38219 -               ret = pm_ops->core_power(dev, POWER_OFF);
38220 +               ret = pm_ops->core_power(core, POWER_OFF);
38221                 if (ret)
38222                         return ret;
38223         }
38224 @@ -414,7 +415,7 @@ static __maybe_unused int venus_runtime_suspend(struct device *dev)
38225  err_video_path:
38226         icc_set_bw(core->cpucfg_path, kbps_to_icc(1000), 0);
38227  err_cpucfg_path:
38228 -       pm_ops->core_power(dev, POWER_ON);
38229 +       pm_ops->core_power(core, POWER_ON);
38231         return ret;
38233 @@ -434,7 +435,7 @@ static __maybe_unused int venus_runtime_resume(struct device *dev)
38234                 return ret;
38236         if (pm_ops->core_power) {
38237 -               ret = pm_ops->core_power(dev, POWER_ON);
38238 +               ret = pm_ops->core_power(core, POWER_ON);
38239                 if (ret)
38240                         return ret;
38241         }
38242 diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
38243 index 4f7565834469..558510a8dfc8 100644
38244 --- a/drivers/media/platform/qcom/venus/hfi_cmds.c
38245 +++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
38246 @@ -1039,6 +1039,18 @@ static int pkt_session_set_property_1x(struct hfi_session_set_property_pkt *pkt,
38247                 pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hierp);
38248                 break;
38249         }
38250 +       case HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO: {
38251 +               struct hfi_uncompressed_plane_actual_info *in = pdata;
38252 +               struct hfi_uncompressed_plane_actual_info *info = prop_data;
38254 +               info->buffer_type = in->buffer_type;
38255 +               info->num_planes = in->num_planes;
38256 +               info->plane_format[0] = in->plane_format[0];
38257 +               if (in->num_planes > 1)
38258 +                       info->plane_format[1] = in->plane_format[1];
38259 +               pkt->shdr.hdr.size += sizeof(u32) + sizeof(*info);
38260 +               break;
38261 +       }
38263         /* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
38264         case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
38265 @@ -1205,18 +1217,6 @@ pkt_session_set_property_4xx(struct hfi_session_set_property_pkt *pkt,
38266                 pkt->shdr.hdr.size += sizeof(u32) + sizeof(*cu);
38267                 break;
38268         }
38269 -       case HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO: {
38270 -               struct hfi_uncompressed_plane_actual_info *in = pdata;
38271 -               struct hfi_uncompressed_plane_actual_info *info = prop_data;
38273 -               info->buffer_type = in->buffer_type;
38274 -               info->num_planes = in->num_planes;
38275 -               info->plane_format[0] = in->plane_format[0];
38276 -               if (in->num_planes > 1)
38277 -                       info->plane_format[1] = in->plane_format[1];
38278 -               pkt->shdr.hdr.size += sizeof(u32) + sizeof(*info);
38279 -               break;
38280 -       }
38281         case HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE:
38282         case HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER:
38283         case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE:
38284 diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
38285 index 7263c0c32695..5b8389b98299 100644
38286 --- a/drivers/media/platform/qcom/venus/hfi_parser.c
38287 +++ b/drivers/media/platform/qcom/venus/hfi_parser.c
38288 @@ -235,13 +235,13 @@ static int hfi_platform_parser(struct venus_core *core, struct venus_inst *inst)
38289         u32 enc_codecs, dec_codecs, count = 0;
38290         unsigned int entries;
38292 -       if (inst)
38293 -               return 0;
38295         plat = hfi_platform_get(core->res->hfi_version);
38296         if (!plat)
38297                 return -EINVAL;
38299 +       if (inst)
38300 +               return 0;
38302         if (plat->codecs)
38303                 plat->codecs(&enc_codecs, &dec_codecs, &count);
38305 @@ -277,8 +277,10 @@ u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf,
38307         parser_init(inst, &codecs, &domain);
38309 -       core->codecs_count = 0;
38310 -       memset(core->caps, 0, sizeof(core->caps));
38311 +       if (core->res->hfi_version > HFI_VERSION_1XX) {
38312 +               core->codecs_count = 0;
38313 +               memset(core->caps, 0, sizeof(core->caps));
38314 +       }
38316         while (words_count) {
38317                 data = word + 1;
38318 diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
38319 index 43c4e3d9e281..95b4d40ff6a5 100644
38320 --- a/drivers/media/platform/qcom/venus/pm_helpers.c
38321 +++ b/drivers/media/platform/qcom/venus/pm_helpers.c
38322 @@ -277,16 +277,28 @@ static int load_scale_v1(struct venus_inst *inst)
38323         return 0;
38326 -static int core_get_v1(struct device *dev)
38327 +static int core_get_v1(struct venus_core *core)
38329 -       struct venus_core *core = dev_get_drvdata(dev);
38330 +       int ret;
38332 +       ret = core_clks_get(core);
38333 +       if (ret)
38334 +               return ret;
38336 -       return core_clks_get(core);
38337 +       core->opp_table = dev_pm_opp_set_clkname(core->dev, "core");
38338 +       if (IS_ERR(core->opp_table))
38339 +               return PTR_ERR(core->opp_table);
38341 +       return 0;
38344 -static int core_power_v1(struct device *dev, int on)
38345 +static void core_put_v1(struct venus_core *core)
38347 +       dev_pm_opp_put_clkname(core->opp_table);
38350 +static int core_power_v1(struct venus_core *core, int on)
38352 -       struct venus_core *core = dev_get_drvdata(dev);
38353         int ret = 0;
38355         if (on == POWER_ON)
38356 @@ -299,6 +311,7 @@ static int core_power_v1(struct device *dev, int on)
38358  static const struct venus_pm_ops pm_ops_v1 = {
38359         .core_get = core_get_v1,
38360 +       .core_put = core_put_v1,
38361         .core_power = core_power_v1,
38362         .load_scale = load_scale_v1,
38363  };
38364 @@ -371,6 +384,7 @@ static int venc_power_v3(struct device *dev, int on)
38366  static const struct venus_pm_ops pm_ops_v3 = {
38367         .core_get = core_get_v1,
38368 +       .core_put = core_put_v1,
38369         .core_power = core_power_v1,
38370         .vdec_get = vdec_get_v3,
38371         .vdec_power = vdec_power_v3,
38372 @@ -753,12 +767,12 @@ static int venc_power_v4(struct device *dev, int on)
38373         return ret;
38376 -static int vcodec_domains_get(struct device *dev)
38377 +static int vcodec_domains_get(struct venus_core *core)
38379         int ret;
38380         struct opp_table *opp_table;
38381         struct device **opp_virt_dev;
38382 -       struct venus_core *core = dev_get_drvdata(dev);
38383 +       struct device *dev = core->dev;
38384         const struct venus_resources *res = core->res;
38385         struct device *pd;
38386         unsigned int i;
38387 @@ -809,9 +823,8 @@ static int vcodec_domains_get(struct device *dev)
38388         return ret;
38391 -static void vcodec_domains_put(struct device *dev)
38392 +static void vcodec_domains_put(struct venus_core *core)
38394 -       struct venus_core *core = dev_get_drvdata(dev);
38395         const struct venus_resources *res = core->res;
38396         unsigned int i;
38398 @@ -834,9 +847,9 @@ static void vcodec_domains_put(struct device *dev)
38399         dev_pm_opp_detach_genpd(core->opp_table);
38402 -static int core_get_v4(struct device *dev)
38403 +static int core_get_v4(struct venus_core *core)
38405 -       struct venus_core *core = dev_get_drvdata(dev);
38406 +       struct device *dev = core->dev;
38407         const struct venus_resources *res = core->res;
38408         int ret;
38410 @@ -875,7 +888,7 @@ static int core_get_v4(struct device *dev)
38411                 }
38412         }
38414 -       ret = vcodec_domains_get(dev);
38415 +       ret = vcodec_domains_get(core);
38416         if (ret) {
38417                 if (core->has_opp_table)
38418                         dev_pm_opp_of_remove_table(dev);
38419 @@ -886,14 +899,14 @@ static int core_get_v4(struct device *dev)
38420         return 0;
38423 -static void core_put_v4(struct device *dev)
38424 +static void core_put_v4(struct venus_core *core)
38426 -       struct venus_core *core = dev_get_drvdata(dev);
38427 +       struct device *dev = core->dev;
38429         if (legacy_binding)
38430                 return;
38432 -       vcodec_domains_put(dev);
38433 +       vcodec_domains_put(core);
38435         if (core->has_opp_table)
38436                 dev_pm_opp_of_remove_table(dev);
38437 @@ -901,9 +914,9 @@ static void core_put_v4(struct device *dev)
38441 -static int core_power_v4(struct device *dev, int on)
38442 +static int core_power_v4(struct venus_core *core, int on)
38444 -       struct venus_core *core = dev_get_drvdata(dev);
38445 +       struct device *dev = core->dev;
38446         struct device *pmctrl = core->pmdomains[0];
38447         int ret = 0;
38449 diff --git a/drivers/media/platform/qcom/venus/pm_helpers.h b/drivers/media/platform/qcom/venus/pm_helpers.h
38450 index aa2f6afa2354..a492c50c5543 100644
38451 --- a/drivers/media/platform/qcom/venus/pm_helpers.h
38452 +++ b/drivers/media/platform/qcom/venus/pm_helpers.h
38453 @@ -4,14 +4,15 @@
38454  #define __VENUS_PM_HELPERS_H__
38456  struct device;
38457 +struct venus_core;
38459  #define POWER_ON       1
38460  #define POWER_OFF      0
38462  struct venus_pm_ops {
38463 -       int (*core_get)(struct device *dev);
38464 -       void (*core_put)(struct device *dev);
38465 -       int (*core_power)(struct device *dev, int on);
38466 +       int (*core_get)(struct venus_core *core);
38467 +       void (*core_put)(struct venus_core *core);
38468 +       int (*core_power)(struct venus_core *core, int on);
38470         int (*vdec_get)(struct device *dev);
38471         void (*vdec_put)(struct device *dev);
38472 diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c
38473 index a52b80055173..abef0037bf55 100644
38474 --- a/drivers/media/platform/qcom/venus/venc_ctrls.c
38475 +++ b/drivers/media/platform/qcom/venus/venc_ctrls.c
38476 @@ -359,7 +359,7 @@ int venc_ctrl_init(struct venus_inst *inst)
38477                 V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
38478                 ~((1 << V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) |
38479                 (1 << V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME)),
38480 -               V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE);
38481 +               V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME);
38483         v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
38484                 V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
38485 diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
38486 index 83bd9a412a56..1e3b68a8743a 100644
38487 --- a/drivers/media/platform/rcar_drif.c
38488 +++ b/drivers/media/platform/rcar_drif.c
38489 @@ -915,7 +915,6 @@ static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv,
38491         struct rcar_drif_sdr *sdr = video_drvdata(file);
38493 -       memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
38494         f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
38495         f->fmt.sdr.buffersize = sdr->fmt->buffersize;
38497 diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
38498 index 813670ed9577..79deed8adcea 100644
38499 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
38500 +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
38501 @@ -520,14 +520,15 @@ static void rkisp1_rsz_set_src_fmt(struct rkisp1_resizer *rsz,
38502                                    struct v4l2_mbus_framefmt *format,
38503                                    unsigned int which)
38505 -       const struct rkisp1_isp_mbus_info *mbus_info;
38506 -       struct v4l2_mbus_framefmt *src_fmt;
38507 +       const struct rkisp1_isp_mbus_info *sink_mbus_info;
38508 +       struct v4l2_mbus_framefmt *src_fmt, *sink_fmt;
38510 +       sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SINK, which);
38511         src_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SRC, which);
38512 -       mbus_info = rkisp1_isp_mbus_info_get(src_fmt->code);
38513 +       sink_mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
38515         /* for YUV formats, userspace can change the mbus code on the src pad if it is supported */
38516 -       if (mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV &&
38517 +       if (sink_mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV &&
38518             rkisp1_rsz_get_yuv_mbus_info(format->code))
38519                 src_fmt->code = format->code;
38521 diff --git a/drivers/media/platform/sti/bdisp/bdisp-debug.c b/drivers/media/platform/sti/bdisp/bdisp-debug.c
38522 index 2b270093009c..a27f638df11c 100644
38523 --- a/drivers/media/platform/sti/bdisp/bdisp-debug.c
38524 +++ b/drivers/media/platform/sti/bdisp/bdisp-debug.c
38525 @@ -480,7 +480,7 @@ static int regs_show(struct seq_file *s, void *data)
38526         int ret;
38527         unsigned int i;
38529 -       ret = pm_runtime_get_sync(bdisp->dev);
38530 +       ret = pm_runtime_resume_and_get(bdisp->dev);
38531         if (ret < 0) {
38532                 seq_puts(s, "Cannot wake up IP\n");
38533                 return 0;
38534 diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
38535 index b55de9ab64d8..3181d0781b61 100644
38536 --- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
38537 +++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
38538 @@ -151,8 +151,10 @@ static int sun6i_video_start_streaming(struct vb2_queue *vq, unsigned int count)
38539         }
38541         subdev = sun6i_video_remote_subdev(video, NULL);
38542 -       if (!subdev)
38543 +       if (!subdev) {
38544 +               ret = -EINVAL;
38545                 goto stop_media_pipeline;
38546 +       }
38548         config.pixelformat = video->fmt.fmt.pix.pixelformat;
38549         config.code = video->mbus_code;
38550 diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
38551 index ed863bf5ea80..671e4a928993 100644
38552 --- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
38553 +++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
38554 @@ -589,7 +589,7 @@ static int deinterlace_start_streaming(struct vb2_queue *vq, unsigned int count)
38555         int ret;
38557         if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
38558 -               ret = pm_runtime_get_sync(dev);
38559 +               ret = pm_runtime_resume_and_get(dev);
38560                 if (ret < 0) {
38561                         dev_err(dev, "Failed to enable module\n");
38563 diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
38564 index cb0437b4c331..163fffc0e1d4 100644
38565 --- a/drivers/media/radio/radio-mr800.c
38566 +++ b/drivers/media/radio/radio-mr800.c
38567 @@ -366,7 +366,7 @@ static int vidioc_s_hw_freq_seek(struct file *file, void *priv,
38568                         retval = -ENODATA;
38569                         break;
38570                 }
38571 -               if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
38572 +               if (schedule_msec_hrtimeout_interruptible((10))) {
38573                         retval = -ERESTARTSYS;
38574                         break;
38575                 }
38576 diff --git a/drivers/media/radio/radio-tea5777.c b/drivers/media/radio/radio-tea5777.c
38577 index fb9de7bbcd19..e53cf45e7f3f 100644
38578 --- a/drivers/media/radio/radio-tea5777.c
38579 +++ b/drivers/media/radio/radio-tea5777.c
38580 @@ -235,7 +235,7 @@ static int radio_tea5777_update_read_reg(struct radio_tea5777 *tea, int wait)
38581         }
38583         if (wait) {
38584 -               if (schedule_timeout_interruptible(msecs_to_jiffies(wait)))
38585 +               if (schedule_msec_hrtimeout_interruptible((wait)))
38586                         return -ERESTARTSYS;
38587         }
38589 diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c
38590 index c37315226c42..e73e6393403c 100644
38591 --- a/drivers/media/radio/tea575x.c
38592 +++ b/drivers/media/radio/tea575x.c
38593 @@ -401,7 +401,7 @@ int snd_tea575x_s_hw_freq_seek(struct file *file, struct snd_tea575x *tea,
38594         for (;;) {
38595                 if (time_after(jiffies, timeout))
38596                         break;
38597 -               if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
38598 +               if (schedule_msec_hrtimeout_interruptible((10))) {
38599                         /* some signal arrived, stop search */
38600                         tea->val &= ~TEA575X_BIT_SEARCH;
38601                         snd_tea575x_set_freq(tea);
38602 diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
38603 index 0c6229592e13..e5c4a6941d26 100644
38604 --- a/drivers/media/rc/ite-cir.c
38605 +++ b/drivers/media/rc/ite-cir.c
38606 @@ -276,8 +276,14 @@ static irqreturn_t ite_cir_isr(int irq, void *data)
38607         /* read the interrupt flags */
38608         iflags = dev->params.get_irq_causes(dev);
38610 +       /* Check for RX overflow */
38611 +       if (iflags & ITE_IRQ_RX_FIFO_OVERRUN) {
38612 +               dev_warn(&dev->rdev->dev, "receive overflow\n");
38613 +               ir_raw_event_reset(dev->rdev);
38614 +       }
38616         /* check for the receive interrupt */
38617 -       if (iflags & (ITE_IRQ_RX_FIFO | ITE_IRQ_RX_FIFO_OVERRUN)) {
38618 +       if (iflags & ITE_IRQ_RX_FIFO) {
38619                 /* read the FIFO bytes */
38620                 rx_bytes =
38621                         dev->params.get_rx_bytes(dev, rx_buf,
38622 diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
38623 index 0dc65ef3aa14..ca0ebf6ad9cc 100644
38624 --- a/drivers/media/test-drivers/vivid/vivid-core.c
38625 +++ b/drivers/media/test-drivers/vivid/vivid-core.c
38626 @@ -205,13 +205,13 @@ static const u8 vivid_hdmi_edid[256] = {
38627         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
38628         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7b,
38630 -       0x02, 0x03, 0x3f, 0xf0, 0x51, 0x61, 0x60, 0x5f,
38631 +       0x02, 0x03, 0x3f, 0xf1, 0x51, 0x61, 0x60, 0x5f,
38632         0x5e, 0x5d, 0x10, 0x1f, 0x04, 0x13, 0x22, 0x21,
38633         0x20, 0x05, 0x14, 0x02, 0x11, 0x01, 0x23, 0x09,
38634         0x07, 0x07, 0x83, 0x01, 0x00, 0x00, 0x6d, 0x03,
38635         0x0c, 0x00, 0x10, 0x00, 0x00, 0x3c, 0x21, 0x00,
38636         0x60, 0x01, 0x02, 0x03, 0x67, 0xd8, 0x5d, 0xc4,
38637 -       0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xea, 0xe3,
38638 +       0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xca, 0xe3,
38639         0x05, 0x00, 0x00, 0xe3, 0x06, 0x01, 0x00, 0x4d,
38640         0xd0, 0x00, 0xa0, 0xf0, 0x70, 0x3e, 0x80, 0x30,
38641         0x20, 0x35, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00,
38642 @@ -220,7 +220,7 @@ static const u8 vivid_hdmi_edid[256] = {
38643         0x00, 0x00, 0x1a, 0x1a, 0x1d, 0x00, 0x80, 0x51,
38644         0xd0, 0x1c, 0x20, 0x40, 0x80, 0x35, 0x00, 0xc0,
38645         0x1c, 0x32, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00,
38646 -       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63,
38647 +       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x82,
38648  };
38650  static int vidioc_querycap(struct file *file, void  *priv,
38651 diff --git a/drivers/media/test-drivers/vivid/vivid-vid-out.c b/drivers/media/test-drivers/vivid/vivid-vid-out.c
38652 index ac1e981e8342..9f731f085179 100644
38653 --- a/drivers/media/test-drivers/vivid/vivid-vid-out.c
38654 +++ b/drivers/media/test-drivers/vivid/vivid-vid-out.c
38655 @@ -1021,7 +1021,7 @@ int vivid_vid_out_s_fbuf(struct file *file, void *fh,
38656                 return -EINVAL;
38657         }
38658         dev->fbuf_out_flags &= ~(chroma_flags | alpha_flags);
38659 -       dev->fbuf_out_flags = a->flags & (chroma_flags | alpha_flags);
38660 +       dev->fbuf_out_flags |= a->flags & (chroma_flags | alpha_flags);
38661         return 0;
38664 diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c
38665 index b3505f402476..8647c50b66e5 100644
38666 --- a/drivers/media/tuners/m88rs6000t.c
38667 +++ b/drivers/media/tuners/m88rs6000t.c
38668 @@ -525,7 +525,7 @@ static int m88rs6000t_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
38669         PGA2_cri = PGA2_GC >> 2;
38670         PGA2_crf = PGA2_GC & 0x03;
38672 -       for (i = 0; i <= RF_GC; i++)
38673 +       for (i = 0; i <= RF_GC && i < ARRAY_SIZE(RFGS); i++)
38674                 RFG += RFGS[i];
38676         if (RF_GC == 0)
38677 @@ -537,12 +537,12 @@ static int m88rs6000t_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
38678         if (RF_GC == 3)
38679                 RFG += 100;
38681 -       for (i = 0; i <= IF_GC; i++)
38682 +       for (i = 0; i <= IF_GC && i < ARRAY_SIZE(IFGS); i++)
38683                 IFG += IFGS[i];
38685         TIAG = TIA_GC * TIA_GS;
38687 -       for (i = 0; i <= BB_GC; i++)
38688 +       for (i = 0; i <= BB_GC && i < ARRAY_SIZE(BBGS); i++)
38689                 BBG += BBGS[i];
38691         PGA2G = PGA2_cri * PGA2_cri_GS + PGA2_crf * PGA2_crf_GS;
38692 diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
38693 index c1a7634e27b4..28e1fd64dd3c 100644
38694 --- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
38695 +++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
38696 @@ -79,11 +79,17 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
38697                         }
38698                 }
38700 -               if ((ret = dvb_usb_adapter_stream_init(adap)) ||
38701 -                       (ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs)) ||
38702 -                       (ret = dvb_usb_adapter_frontend_init(adap))) {
38703 +               ret = dvb_usb_adapter_stream_init(adap);
38704 +               if (ret)
38705                         return ret;
38706 -               }
38708 +               ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs);
38709 +               if (ret)
38710 +                       goto dvb_init_err;
38712 +               ret = dvb_usb_adapter_frontend_init(adap);
38713 +               if (ret)
38714 +                       goto frontend_init_err;
38716                 /* use exclusive FE lock if there is multiple shared FEs */
38717                 if (adap->fe_adap[1].fe)
38718 @@ -103,6 +109,12 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
38719         }
38721         return 0;
38723 +frontend_init_err:
38724 +       dvb_usb_adapter_dvb_exit(adap);
38725 +dvb_init_err:
38726 +       dvb_usb_adapter_stream_exit(adap);
38727 +       return ret;
38730  static int dvb_usb_adapter_exit(struct dvb_usb_device *d)
38731 @@ -158,22 +170,20 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
38733                 if (d->props.priv_init != NULL) {
38734                         ret = d->props.priv_init(d);
38735 -                       if (ret != 0) {
38736 -                               kfree(d->priv);
38737 -                               d->priv = NULL;
38738 -                               return ret;
38739 -                       }
38740 +                       if (ret != 0)
38741 +                               goto err_priv_init;
38742                 }
38743         }
38745         /* check the capabilities and set appropriate variables */
38746         dvb_usb_device_power_ctrl(d, 1);
38748 -       if ((ret = dvb_usb_i2c_init(d)) ||
38749 -               (ret = dvb_usb_adapter_init(d, adapter_nums))) {
38750 -               dvb_usb_exit(d);
38751 -               return ret;
38752 -       }
38753 +       ret = dvb_usb_i2c_init(d);
38754 +       if (ret)
38755 +               goto err_i2c_init;
38756 +       ret = dvb_usb_adapter_init(d, adapter_nums);
38757 +       if (ret)
38758 +               goto err_adapter_init;
38760         if ((ret = dvb_usb_remote_init(d)))
38761                 err("could not initialize remote control.");
38762 @@ -181,6 +191,17 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
38763         dvb_usb_device_power_ctrl(d, 0);
38765         return 0;
38767 +err_adapter_init:
38768 +       dvb_usb_adapter_exit(d);
38769 +err_i2c_init:
38770 +       dvb_usb_i2c_exit(d);
38771 +       if (d->priv && d->props.priv_destroy)
38772 +               d->props.priv_destroy(d);
38773 +err_priv_init:
38774 +       kfree(d->priv);
38775 +       d->priv = NULL;
38776 +       return ret;
38779  /* determine the name and the state of the just found USB device */
38780 @@ -255,41 +276,50 @@ int dvb_usb_device_init(struct usb_interface *intf,
38781         if (du != NULL)
38782                 *du = NULL;
38784 -       if ((desc = dvb_usb_find_device(udev, props, &cold)) == NULL) {
38785 +       d = kzalloc(sizeof(*d), GFP_KERNEL);
38786 +       if (!d) {
38787 +               err("no memory for 'struct dvb_usb_device'");
38788 +               return -ENOMEM;
38789 +       }
38791 +       memcpy(&d->props, props, sizeof(struct dvb_usb_device_properties));
38793 +       desc = dvb_usb_find_device(udev, &d->props, &cold);
38794 +       if (!desc) {
38795                 deb_err("something went very wrong, device was not found in current device list - let's see what comes next.\n");
38796 -               return -ENODEV;
38797 +               ret = -ENODEV;
38798 +               goto error;
38799         }
38801         if (cold) {
38802                 info("found a '%s' in cold state, will try to load a firmware", desc->name);
38803                 ret = dvb_usb_download_firmware(udev, props);
38804                 if (!props->no_reconnect || ret != 0)
38805 -                       return ret;
38806 +                       goto error;
38807         }
38809         info("found a '%s' in warm state.", desc->name);
38810 -       d = kzalloc(sizeof(struct dvb_usb_device), GFP_KERNEL);
38811 -       if (d == NULL) {
38812 -               err("no memory for 'struct dvb_usb_device'");
38813 -               return -ENOMEM;
38814 -       }
38816         d->udev = udev;
38817 -       memcpy(&d->props, props, sizeof(struct dvb_usb_device_properties));
38818         d->desc = desc;
38819         d->owner = owner;
38821         usb_set_intfdata(intf, d);
38823 -       if (du != NULL)
38824 +       ret = dvb_usb_init(d, adapter_nums);
38825 +       if (ret) {
38826 +               info("%s error while loading driver (%d)", desc->name, ret);
38827 +               goto error;
38828 +       }
38830 +       if (du)
38831                 *du = d;
38833 -       ret = dvb_usb_init(d, adapter_nums);
38834 +       info("%s successfully initialized and connected.", desc->name);
38835 +       return 0;
38837 -       if (ret == 0)
38838 -               info("%s successfully initialized and connected.", desc->name);
38839 -       else
38840 -               info("%s error while loading driver (%d)", desc->name, ret);
38841 + error:
38842 +       usb_set_intfdata(intf, NULL);
38843 +       kfree(d);
38844         return ret;
38846  EXPORT_SYMBOL(dvb_usb_device_init);
38847 diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
38848 index 741be0e69447..2b8ad2bde8a4 100644
38849 --- a/drivers/media/usb/dvb-usb/dvb-usb.h
38850 +++ b/drivers/media/usb/dvb-usb/dvb-usb.h
38851 @@ -487,7 +487,7 @@ extern int __must_check
38852  dvb_usb_generic_write(struct dvb_usb_device *, u8 *, u16);
38854  /* commonly used remote control parsing */
38855 -extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[], u32 *, int *);
38856 +extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[5], u32 *, int *);
38858  /* commonly used firmware download types and function */
38859  struct hexline {
38860 diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
38861 index 526424279637..471bd74667e3 100644
38862 --- a/drivers/media/usb/em28xx/em28xx-dvb.c
38863 +++ b/drivers/media/usb/em28xx/em28xx-dvb.c
38864 @@ -2010,6 +2010,7 @@ static int em28xx_dvb_init(struct em28xx *dev)
38865         return result;
38867  out_free:
38868 +       em28xx_uninit_usb_xfer(dev, EM28XX_DIGITAL_MODE);
38869         kfree(dvb);
38870         dev->dvb = NULL;
38871         goto ret;
38872 diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
38873 index 158c8e28ed2c..47d8f28bfdfc 100644
38874 --- a/drivers/media/usb/gspca/gspca.c
38875 +++ b/drivers/media/usb/gspca/gspca.c
38876 @@ -1576,6 +1576,8 @@ int gspca_dev_probe2(struct usb_interface *intf,
38877  #endif
38878         v4l2_ctrl_handler_free(gspca_dev->vdev.ctrl_handler);
38879         v4l2_device_unregister(&gspca_dev->v4l2_dev);
38880 +       if (sd_desc->probe_error)
38881 +               sd_desc->probe_error(gspca_dev);
38882         kfree(gspca_dev->usb_buf);
38883         kfree(gspca_dev);
38884         return ret;
38885 diff --git a/drivers/media/usb/gspca/gspca.h b/drivers/media/usb/gspca/gspca.h
38886 index b0ced2e14006..a6554d5e9e1a 100644
38887 --- a/drivers/media/usb/gspca/gspca.h
38888 +++ b/drivers/media/usb/gspca/gspca.h
38889 @@ -105,6 +105,7 @@ struct sd_desc {
38890         cam_cf_op config;       /* called on probe */
38891         cam_op init;            /* called on probe and resume */
38892         cam_op init_controls;   /* called on probe */
38893 +       cam_v_op probe_error;   /* called if probe failed, do cleanup here */
38894         cam_op start;           /* called on stream on after URBs creation */
38895         cam_pkt_op pkt_scan;
38896  /* optional operations */
38897 diff --git a/drivers/media/usb/gspca/sq905.c b/drivers/media/usb/gspca/sq905.c
38898 index 97799cfb832e..949111070971 100644
38899 --- a/drivers/media/usb/gspca/sq905.c
38900 +++ b/drivers/media/usb/gspca/sq905.c
38901 @@ -158,7 +158,7 @@ static int
38902  sq905_read_data(struct gspca_dev *gspca_dev, u8 *data, int size, int need_lock)
38904         int ret;
38905 -       int act_len;
38906 +       int act_len = 0;
38908         gspca_dev->usb_buf[0] = '\0';
38909         if (need_lock)
38910 diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c
38911 index 95673fc0a99c..d9bc2aacc885 100644
38912 --- a/drivers/media/usb/gspca/stv06xx/stv06xx.c
38913 +++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c
38914 @@ -529,12 +529,21 @@ static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
38915  static int stv06xx_config(struct gspca_dev *gspca_dev,
38916                           const struct usb_device_id *id);
38918 +static void stv06xx_probe_error(struct gspca_dev *gspca_dev)
38920 +       struct sd *sd = (struct sd *)gspca_dev;
38922 +       kfree(sd->sensor_priv);
38923 +       sd->sensor_priv = NULL;
38926  /* sub-driver description */
38927  static const struct sd_desc sd_desc = {
38928         .name = MODULE_NAME,
38929         .config = stv06xx_config,
38930         .init = stv06xx_init,
38931         .init_controls = stv06xx_init_controls,
38932 +       .probe_error = stv06xx_probe_error,
38933         .start = stv06xx_start,
38934         .stopN = stv06xx_stopN,
38935         .pkt_scan = stv06xx_pkt_scan,
38936 diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
38937 index 30ef2a3110f7..9a791d8ef200 100644
38938 --- a/drivers/media/usb/uvc/uvc_driver.c
38939 +++ b/drivers/media/usb/uvc/uvc_driver.c
38940 @@ -1712,10 +1712,35 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain,
38941                         if (forward->bNrInPins != 1) {
38942                                 uvc_dbg(chain->dev, DESCR,
38943                                         "Extension unit %d has more than 1 input pin\n",
38944 -                                       entity->id);
38945 +                                       forward->id);
38946                                 return -EINVAL;
38947                         }
38949 +                       /*
38950 +                        * Some devices reference an output terminal as the
38951 +                        * source of extension units. This is incorrect, as
38952 +                        * output terminals only have an input pin, and thus
38953 +                        * can't be connected to any entity in the forward
38954 +                        * direction. The resulting topology would cause issues
38955 +                        * when registering the media controller graph. To
38956 +                        * avoid this problem, connect the extension unit to
38957 +                        * the source of the output terminal instead.
38958 +                        */
38959 +                       if (UVC_ENTITY_IS_OTERM(entity)) {
38960 +                               struct uvc_entity *source;
38962 +                               source = uvc_entity_by_id(chain->dev,
38963 +                                                         entity->baSourceID[0]);
38964 +                               if (!source) {
38965 +                                       uvc_dbg(chain->dev, DESCR,
38966 +                                               "Can't connect extension unit %u in chain\n",
38967 +                                               forward->id);
38968 +                                       break;
38969 +                               }
38971 +                               forward->baSourceID[0] = source->id;
38972 +                       }
38974                         list_add_tail(&forward->chain, &chain->entities);
38975                         if (!found)
38976                                 uvc_dbg_cont(PROBE, " (->");
38977 @@ -1735,6 +1760,13 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain,
38978                                 return -EINVAL;
38979                         }
38981 +                       if (UVC_ENTITY_IS_OTERM(entity)) {
38982 +                               uvc_dbg(chain->dev, DESCR,
38983 +                                       "Unsupported connection between output terminals %u and %u\n",
38984 +                                       entity->id, forward->id);
38985 +                               break;
38986 +                       }
38988                         list_add_tail(&forward->chain, &chain->entities);
38989                         if (!found)
38990                                 uvc_dbg_cont(PROBE, " (->");
38991 diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
38992 index d29b861367ea..1ef611e08323 100644
38993 --- a/drivers/media/usb/zr364xx/zr364xx.c
38994 +++ b/drivers/media/usb/zr364xx/zr364xx.c
38995 @@ -1430,7 +1430,7 @@ static int zr364xx_probe(struct usb_interface *intf,
38996         if (hdl->error) {
38997                 err = hdl->error;
38998                 dev_err(&udev->dev, "couldn't register control\n");
38999 -               goto unregister;
39000 +               goto free_hdlr_and_unreg_dev;
39001         }
39002         /* save the init method used by this camera */
39003         cam->method = id->driver_info;
39004 @@ -1503,7 +1503,7 @@ static int zr364xx_probe(struct usb_interface *intf,
39005         if (!cam->read_endpoint) {
39006                 err = -ENOMEM;
39007                 dev_err(&intf->dev, "Could not find bulk-in endpoint\n");
39008 -               goto unregister;
39009 +               goto free_hdlr_and_unreg_dev;
39010         }
39012         /* v4l */
39013 @@ -1515,7 +1515,7 @@ static int zr364xx_probe(struct usb_interface *intf,
39014         /* load zr364xx board specific */
39015         err = zr364xx_board_init(cam);
39016         if (err)
39017 -               goto unregister;
39018 +               goto free_hdlr_and_unreg_dev;
39019         err = v4l2_ctrl_handler_setup(hdl);
39020         if (err)
39021                 goto board_uninit;
39022 @@ -1533,7 +1533,7 @@ static int zr364xx_probe(struct usb_interface *intf,
39023         err = video_register_device(&cam->vdev, VFL_TYPE_VIDEO, -1);
39024         if (err) {
39025                 dev_err(&udev->dev, "video_register_device failed\n");
39026 -               goto free_handler;
39027 +               goto board_uninit;
39028         }
39029         cam->v4l2_dev.release = zr364xx_release;
39031 @@ -1541,11 +1541,10 @@ static int zr364xx_probe(struct usb_interface *intf,
39032                  video_device_node_name(&cam->vdev));
39033         return 0;
39035 -free_handler:
39036 -       v4l2_ctrl_handler_free(hdl);
39037  board_uninit:
39038         zr364xx_board_uninit(cam);
39039 -unregister:
39040 +free_hdlr_and_unreg_dev:
39041 +       v4l2_ctrl_handler_free(hdl);
39042         v4l2_device_unregister(&cam->v4l2_dev);
39043  free_cam:
39044         kfree(cam);
39045 diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
39046 index 016cf6204cbb..6219c8185782 100644
39047 --- a/drivers/media/v4l2-core/v4l2-ctrls.c
39048 +++ b/drivers/media/v4l2-core/v4l2-ctrls.c
39049 @@ -1675,6 +1675,8 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
39050                 p_fwht_params->version = V4L2_FWHT_VERSION;
39051                 p_fwht_params->width = 1280;
39052                 p_fwht_params->height = 720;
39053 +               p_fwht_params->flags = V4L2_FWHT_FL_PIXENC_YUV |
39054 +                       (2 << V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET);
39055                 break;
39056         }
39058 @@ -2395,7 +2397,16 @@ static void new_to_req(struct v4l2_ctrl_ref *ref)
39059         if (!ref)
39060                 return;
39061         ptr_to_ptr(ref->ctrl, ref->ctrl->p_new, ref->p_req);
39062 -       ref->req = ref;
39063 +       ref->valid_p_req = true;
39066 +/* Copy the current value to the request value */
39067 +static void cur_to_req(struct v4l2_ctrl_ref *ref)
39069 +       if (!ref)
39070 +               return;
39071 +       ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->p_req);
39072 +       ref->valid_p_req = true;
39075  /* Copy the request value to the new value */
39076 @@ -2403,8 +2414,8 @@ static void req_to_new(struct v4l2_ctrl_ref *ref)
39078         if (!ref)
39079                 return;
39080 -       if (ref->req)
39081 -               ptr_to_ptr(ref->ctrl, ref->req->p_req, ref->ctrl->p_new);
39082 +       if (ref->valid_p_req)
39083 +               ptr_to_ptr(ref->ctrl, ref->p_req, ref->ctrl->p_new);
39084         else
39085                 ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->ctrl->p_new);
39087 @@ -2541,7 +2552,15 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
39088         if (hdl == NULL || hdl->buckets == NULL)
39089                 return;
39091 -       if (!hdl->req_obj.req && !list_empty(&hdl->requests)) {
39092 +       /*
39093 +        * If the main handler is freed and it is used by handler objects in
39094 +        * outstanding requests, then unbind and put those objects before
39095 +        * freeing the main handler.
39096 +        *
39097 +        * The main handler can be identified by having a NULL ops pointer in
39098 +        * the request object.
39099 +        */
39100 +       if (!hdl->req_obj.ops && !list_empty(&hdl->requests)) {
39101                 struct v4l2_ctrl_handler *req, *next_req;
39103                 list_for_each_entry_safe(req, next_req, &hdl->requests, requests) {
39104 @@ -3571,39 +3590,8 @@ static void v4l2_ctrl_request_queue(struct media_request_object *obj)
39105         struct v4l2_ctrl_handler *hdl =
39106                 container_of(obj, struct v4l2_ctrl_handler, req_obj);
39107         struct v4l2_ctrl_handler *main_hdl = obj->priv;
39108 -       struct v4l2_ctrl_handler *prev_hdl = NULL;
39109 -       struct v4l2_ctrl_ref *ref_ctrl, *ref_ctrl_prev = NULL;
39111         mutex_lock(main_hdl->lock);
39112 -       if (list_empty(&main_hdl->requests_queued))
39113 -               goto queue;
39115 -       prev_hdl = list_last_entry(&main_hdl->requests_queued,
39116 -                                  struct v4l2_ctrl_handler, requests_queued);
39117 -       /*
39118 -        * Note: prev_hdl and hdl must contain the same list of control
39119 -        * references, so if any differences are detected then that is a
39120 -        * driver bug and the WARN_ON is triggered.
39121 -        */
39122 -       mutex_lock(prev_hdl->lock);
39123 -       ref_ctrl_prev = list_first_entry(&prev_hdl->ctrl_refs,
39124 -                                        struct v4l2_ctrl_ref, node);
39125 -       list_for_each_entry(ref_ctrl, &hdl->ctrl_refs, node) {
39126 -               if (ref_ctrl->req)
39127 -                       continue;
39128 -               while (ref_ctrl_prev->ctrl->id < ref_ctrl->ctrl->id) {
39129 -                       /* Should never happen, but just in case... */
39130 -                       if (list_is_last(&ref_ctrl_prev->node,
39131 -                                        &prev_hdl->ctrl_refs))
39132 -                               break;
39133 -                       ref_ctrl_prev = list_next_entry(ref_ctrl_prev, node);
39134 -               }
39135 -               if (WARN_ON(ref_ctrl_prev->ctrl->id != ref_ctrl->ctrl->id))
39136 -                       break;
39137 -               ref_ctrl->req = ref_ctrl_prev->req;
39138 -       }
39139 -       mutex_unlock(prev_hdl->lock);
39140 -queue:
39141         list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued);
39142         hdl->request_is_queued = true;
39143         mutex_unlock(main_hdl->lock);
39144 @@ -3615,8 +3603,8 @@ static void v4l2_ctrl_request_unbind(struct media_request_object *obj)
39145                 container_of(obj, struct v4l2_ctrl_handler, req_obj);
39146         struct v4l2_ctrl_handler *main_hdl = obj->priv;
39148 -       list_del_init(&hdl->requests);
39149         mutex_lock(main_hdl->lock);
39150 +       list_del_init(&hdl->requests);
39151         if (hdl->request_is_queued) {
39152                 list_del_init(&hdl->requests_queued);
39153                 hdl->request_is_queued = false;
39154 @@ -3660,7 +3648,7 @@ v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
39156         struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
39158 -       return (ref && ref->req == ref) ? ref->ctrl : NULL;
39159 +       return (ref && ref->valid_p_req) ? ref->ctrl : NULL;
39161  EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_ctrl_find);
39163 @@ -3675,8 +3663,11 @@ static int v4l2_ctrl_request_bind(struct media_request *req,
39164         if (!ret) {
39165                 ret = media_request_object_bind(req, &req_ops,
39166                                                 from, false, &hdl->req_obj);
39167 -               if (!ret)
39168 +               if (!ret) {
39169 +                       mutex_lock(from->lock);
39170                         list_add_tail(&hdl->requests, &from->requests);
39171 +                       mutex_unlock(from->lock);
39172 +               }
39173         }
39174         return ret;
39176 @@ -3846,7 +3837,13 @@ static int class_check(struct v4l2_ctrl_handler *hdl, u32 which)
39177         return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL;
39180 -/* Get extended controls. Allocates the helpers array if needed. */
39182 + * Get extended controls. Allocates the helpers array if needed.
39183 + *
39184 + * Note that v4l2_g_ext_ctrls_common() with 'which' set to
39185 + * V4L2_CTRL_WHICH_REQUEST_VAL is only called if the request was
39186 + * completed, and in that case valid_p_req is true for all controls.
39187 + */
39188  static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
39189                                    struct v4l2_ext_controls *cs,
39190                                    struct video_device *vdev)
39191 @@ -3855,9 +3852,10 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
39192         struct v4l2_ctrl_helper *helpers = helper;
39193         int ret;
39194         int i, j;
39195 -       bool def_value;
39196 +       bool is_default, is_request;
39198 -       def_value = (cs->which == V4L2_CTRL_WHICH_DEF_VAL);
39199 +       is_default = (cs->which == V4L2_CTRL_WHICH_DEF_VAL);
39200 +       is_request = (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL);
39202         cs->error_idx = cs->count;
39203         cs->which = V4L2_CTRL_ID2WHICH(cs->which);
39204 @@ -3883,11 +3881,9 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
39205                         ret = -EACCES;
39207         for (i = 0; !ret && i < cs->count; i++) {
39208 -               int (*ctrl_to_user)(struct v4l2_ext_control *c,
39209 -                                   struct v4l2_ctrl *ctrl);
39210                 struct v4l2_ctrl *master;
39212 -               ctrl_to_user = def_value ? def_to_user : cur_to_user;
39213 +               bool is_volatile = false;
39214 +               u32 idx = i;
39216                 if (helpers[i].mref == NULL)
39217                         continue;
39218 @@ -3897,31 +3893,48 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
39220                 v4l2_ctrl_lock(master);
39222 -               /* g_volatile_ctrl will update the new control values */
39223 -               if (!def_value &&
39224 +               /*
39225 +                * g_volatile_ctrl will update the new control values.
39226 +                * This makes no sense for V4L2_CTRL_WHICH_DEF_VAL and
39227 +                * V4L2_CTRL_WHICH_REQUEST_VAL. In the case of requests
39228 +                * it is v4l2_ctrl_request_complete() that copies the
39229 +                * volatile controls at the time of request completion
39230 +                * to the request, so you don't want to do that again.
39231 +                */
39232 +               if (!is_default && !is_request &&
39233                     ((master->flags & V4L2_CTRL_FLAG_VOLATILE) ||
39234                     (master->has_volatiles && !is_cur_manual(master)))) {
39235                         for (j = 0; j < master->ncontrols; j++)
39236                                 cur_to_new(master->cluster[j]);
39237                         ret = call_op(master, g_volatile_ctrl);
39238 -                       ctrl_to_user = new_to_user;
39239 +                       is_volatile = true;
39240                 }
39241 -               /* If OK, then copy the current (for non-volatile controls)
39242 -                  or the new (for volatile controls) control values to the
39243 -                  caller */
39244 -               if (!ret) {
39245 -                       u32 idx = i;
39247 -                       do {
39248 -                               if (helpers[idx].ref->req)
39249 -                                       ret = req_to_user(cs->controls + idx,
39250 -                                               helpers[idx].ref->req);
39251 -                               else
39252 -                                       ret = ctrl_to_user(cs->controls + idx,
39253 -                                               helpers[idx].ref->ctrl);
39254 -                               idx = helpers[idx].next;
39255 -                       } while (!ret && idx);
39256 +               if (ret) {
39257 +                       v4l2_ctrl_unlock(master);
39258 +                       break;
39259                 }
39261 +               /*
39262 +                * Copy the default value (if is_default is true), the
39263 +                * request value (if is_request is true and p_req is valid),
39264 +                * the new volatile value (if is_volatile is true) or the
39265 +                * current value.
39266 +                */
39267 +               do {
39268 +                       struct v4l2_ctrl_ref *ref = helpers[idx].ref;
39270 +                       if (is_default)
39271 +                               ret = def_to_user(cs->controls + idx, ref->ctrl);
39272 +                       else if (is_request && ref->valid_p_req)
39273 +                               ret = req_to_user(cs->controls + idx, ref);
39274 +                       else if (is_volatile)
39275 +                               ret = new_to_user(cs->controls + idx, ref->ctrl);
39276 +                       else
39277 +                               ret = cur_to_user(cs->controls + idx, ref->ctrl);
39278 +                       idx = helpers[idx].next;
39279 +               } while (!ret && idx);
39281                 v4l2_ctrl_unlock(master);
39282         }
39284 @@ -4564,8 +4577,6 @@ void v4l2_ctrl_request_complete(struct media_request *req,
39285                 unsigned int i;
39287                 if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
39288 -                       ref->req = ref;
39290                         v4l2_ctrl_lock(master);
39291                         /* g_volatile_ctrl will update the current control values */
39292                         for (i = 0; i < master->ncontrols; i++)
39293 @@ -4575,21 +4586,12 @@ void v4l2_ctrl_request_complete(struct media_request *req,
39294                         v4l2_ctrl_unlock(master);
39295                         continue;
39296                 }
39297 -               if (ref->req == ref)
39298 +               if (ref->valid_p_req)
39299                         continue;
39301 +               /* Copy the current control value into the request */
39302                 v4l2_ctrl_lock(ctrl);
39303 -               if (ref->req) {
39304 -                       ptr_to_ptr(ctrl, ref->req->p_req, ref->p_req);
39305 -               } else {
39306 -                       ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req);
39307 -                       /*
39308 -                        * Set ref->req to ensure that when userspace wants to
39309 -                        * obtain the controls of this request it will take
39310 -                        * this value and not the current value of the control.
39311 -                        */
39312 -                       ref->req = ref;
39313 -               }
39314 +               cur_to_req(ref);
39315                 v4l2_ctrl_unlock(ctrl);
39316         }
39318 @@ -4653,7 +4655,7 @@ int v4l2_ctrl_request_setup(struct media_request *req,
39319                                 struct v4l2_ctrl_ref *r =
39320                                         find_ref(hdl, master->cluster[i]->id);
39322 -                               if (r->req && r == r->req) {
39323 +                               if (r->valid_p_req) {
39324                                         have_new_data = true;
39325                                         break;
39326                                 }
39327 diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
39328 index cfa730cfd145..f80c2ea39ca4 100644
39329 --- a/drivers/memory/omap-gpmc.c
39330 +++ b/drivers/memory/omap-gpmc.c
39331 @@ -1009,8 +1009,8 @@ EXPORT_SYMBOL(gpmc_cs_request);
39333  void gpmc_cs_free(int cs)
39335 -       struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
39336 -       struct resource *res = &gpmc->mem;
39337 +       struct gpmc_cs_data *gpmc;
39338 +       struct resource *res;
39340         spin_lock(&gpmc_mem_lock);
39341         if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) {
39342 @@ -1018,6 +1018,9 @@ void gpmc_cs_free(int cs)
39343                 spin_unlock(&gpmc_mem_lock);
39344                 return;
39345         }
39346 +       gpmc = &gpmc_cs[cs];
39347 +       res = &gpmc->mem;
39349         gpmc_cs_disable_mem(cs);
39350         if (res->flags)
39351                 release_resource(res);
39352 diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c
39353 index 3b5b1045edd9..9c0a28416777 100644
39354 --- a/drivers/memory/pl353-smc.c
39355 +++ b/drivers/memory/pl353-smc.c
39356 @@ -63,7 +63,7 @@
39357  /* ECC memory config register specific constants */
39358  #define PL353_SMC_ECC_MEMCFG_MODE_MASK 0xC
39359  #define PL353_SMC_ECC_MEMCFG_MODE_SHIFT        2
39360 -#define PL353_SMC_ECC_MEMCFG_PGSIZE_MASK       0xC
39361 +#define PL353_SMC_ECC_MEMCFG_PGSIZE_MASK       0x3
39363  #define PL353_SMC_DC_UPT_NAND_REGS     ((4 << 23) |    /* CS: NAND chip */ \
39364                                  (2 << 21))     /* UpdateRegs operation */
39365 diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
39366 index 8d36e221def1..45eed659b0c6 100644
39367 --- a/drivers/memory/renesas-rpc-if.c
39368 +++ b/drivers/memory/renesas-rpc-if.c
39369 @@ -192,10 +192,10 @@ int rpcif_sw_init(struct rpcif *rpc, struct device *dev)
39370         }
39372         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap");
39373 -       rpc->size = resource_size(res);
39374         rpc->dirmap = devm_ioremap_resource(&pdev->dev, res);
39375         if (IS_ERR(rpc->dirmap))
39376                 rpc->dirmap = NULL;
39377 +       rpc->size = resource_size(res);
39379         rpc->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
39381 diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
39382 index 1dabb509dec3..dee503640e12 100644
39383 --- a/drivers/memory/samsung/exynos5422-dmc.c
39384 +++ b/drivers/memory/samsung/exynos5422-dmc.c
39385 @@ -1298,7 +1298,9 @@ static int exynos5_dmc_init_clks(struct exynos5_dmc *dmc)
39387         dmc->curr_volt = target_volt;
39389 -       clk_set_parent(dmc->mout_mx_mspll_ccore, dmc->mout_spll);
39390 +       ret = clk_set_parent(dmc->mout_mx_mspll_ccore, dmc->mout_spll);
39391 +       if (ret)
39392 +               return ret;
39394         clk_prepare_enable(dmc->fout_bpll);
39395         clk_prepare_enable(dmc->mout_bpll);
39396 diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
39397 index 077d9ab112b7..d919ae9691e2 100644
39398 --- a/drivers/mfd/arizona-irq.c
39399 +++ b/drivers/mfd/arizona-irq.c
39400 @@ -100,7 +100,7 @@ static irqreturn_t arizona_irq_thread(int irq, void *data)
39401         unsigned int val;
39402         int ret;
39404 -       ret = pm_runtime_get_sync(arizona->dev);
39405 +       ret = pm_runtime_resume_and_get(arizona->dev);
39406         if (ret < 0) {
39407                 dev_err(arizona->dev, "Failed to resume device: %d\n", ret);
39408                 return IRQ_NONE;
39409 diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
39410 index 3781d0bb7786..783a14af18e2 100644
39411 --- a/drivers/mfd/da9063-i2c.c
39412 +++ b/drivers/mfd/da9063-i2c.c
39413 @@ -442,6 +442,16 @@ static int da9063_i2c_probe(struct i2c_client *i2c,
39414                 return ret;
39415         }
39417 +       /* If SMBus is not available and only I2C is possible, enter I2C mode */
39418 +       if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C)) {
39419 +               ret = regmap_clear_bits(da9063->regmap, DA9063_REG_CONFIG_J,
39420 +                                       DA9063_TWOWIRE_TO);
39421 +               if (ret < 0) {
39422 +                       dev_err(da9063->dev, "Failed to set Two-Wire Bus Mode.\n");
39423 +                       return -EIO;
39424 +               }
39425 +       }
39427         return da9063_device_init(da9063, i2c->irq);
39430 diff --git a/drivers/mfd/intel_pmt.c b/drivers/mfd/intel_pmt.c
39431 index 744b230cdcca..65da2b17a204 100644
39432 --- a/drivers/mfd/intel_pmt.c
39433 +++ b/drivers/mfd/intel_pmt.c
39434 @@ -79,19 +79,18 @@ static int pmt_add_dev(struct pci_dev *pdev, struct intel_dvsec_header *header,
39435         case DVSEC_INTEL_ID_WATCHER:
39436                 if (quirks & PMT_QUIRK_NO_WATCHER) {
39437                         dev_info(dev, "Watcher not supported\n");
39438 -                       return 0;
39439 +                       return -EINVAL;
39440                 }
39441                 name = "pmt_watcher";
39442                 break;
39443         case DVSEC_INTEL_ID_CRASHLOG:
39444                 if (quirks & PMT_QUIRK_NO_CRASHLOG) {
39445                         dev_info(dev, "Crashlog not supported\n");
39446 -                       return 0;
39447 +                       return -EINVAL;
39448                 }
39449                 name = "pmt_crashlog";
39450                 break;
39451         default:
39452 -               dev_err(dev, "Unrecognized PMT capability: %d\n", id);
39453                 return -EINVAL;
39454         }
39456 @@ -174,12 +173,8 @@ static int pmt_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
39457                 header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
39459                 ret = pmt_add_dev(pdev, &header, quirks);
39460 -               if (ret) {
39461 -                       dev_warn(&pdev->dev,
39462 -                                "Failed to add device for DVSEC id %d\n",
39463 -                                header.id);
39464 +               if (ret)
39465                         continue;
39466 -               }
39468                 found_devices = true;
39469         } while (true);
39470 diff --git a/drivers/mfd/stm32-timers.c b/drivers/mfd/stm32-timers.c
39471 index add603359124..44ed2fce0319 100644
39472 --- a/drivers/mfd/stm32-timers.c
39473 +++ b/drivers/mfd/stm32-timers.c
39474 @@ -158,13 +158,18 @@ static const struct regmap_config stm32_timers_regmap_cfg = {
39476  static void stm32_timers_get_arr_size(struct stm32_timers *ddata)
39478 +       u32 arr;
39480 +       /* Backup ARR to restore it after getting the maximum value */
39481 +       regmap_read(ddata->regmap, TIM_ARR, &arr);
39483         /*
39484          * Only the available bits will be written so when readback
39485          * we get the maximum value of auto reload register
39486          */
39487         regmap_write(ddata->regmap, TIM_ARR, ~0L);
39488         regmap_read(ddata->regmap, TIM_ARR, &ddata->max_arr);
39489 -       regmap_write(ddata->regmap, TIM_ARR, 0x0);
39490 +       regmap_write(ddata->regmap, TIM_ARR, arr);
39493  static int stm32_timers_dma_probe(struct device *dev,
39494 diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
39495 index 90f3292230c9..1dd39483e7c1 100644
39496 --- a/drivers/mfd/stmpe.c
39497 +++ b/drivers/mfd/stmpe.c
39498 @@ -312,7 +312,7 @@ EXPORT_SYMBOL_GPL(stmpe_set_altfunc);
39499   * GPIO (all variants)
39500   */
39502 -static const struct resource stmpe_gpio_resources[] = {
39503 +static struct resource stmpe_gpio_resources[] = {
39504         /* Start and end filled dynamically */
39505         {
39506                 .flags  = IORESOURCE_IRQ,
39507 @@ -336,7 +336,8 @@ static const struct mfd_cell stmpe_gpio_cell_noirq = {
39508   * Keypad (1601, 2401, 2403)
39509   */
39511 -static const struct resource stmpe_keypad_resources[] = {
39512 +static struct resource stmpe_keypad_resources[] = {
39513 +       /* Start and end filled dynamically */
39514         {
39515                 .name   = "KEYPAD",
39516                 .flags  = IORESOURCE_IRQ,
39517 @@ -357,7 +358,8 @@ static const struct mfd_cell stmpe_keypad_cell = {
39518  /*
39519   * PWM (1601, 2401, 2403)
39520   */
39521 -static const struct resource stmpe_pwm_resources[] = {
39522 +static struct resource stmpe_pwm_resources[] = {
39523 +       /* Start and end filled dynamically */
39524         {
39525                 .name   = "PWM0",
39526                 .flags  = IORESOURCE_IRQ,
39527 @@ -445,7 +447,8 @@ static struct stmpe_variant_info stmpe801_noirq = {
39528   * Touchscreen (STMPE811 or STMPE610)
39529   */
39531 -static const struct resource stmpe_ts_resources[] = {
39532 +static struct resource stmpe_ts_resources[] = {
39533 +       /* Start and end filled dynamically */
39534         {
39535                 .name   = "TOUCH_DET",
39536                 .flags  = IORESOURCE_IRQ,
39537 @@ -467,7 +470,8 @@ static const struct mfd_cell stmpe_ts_cell = {
39538   * ADC (STMPE811)
39539   */
39541 -static const struct resource stmpe_adc_resources[] = {
39542 +static struct resource stmpe_adc_resources[] = {
39543 +       /* Start and end filled dynamically */
39544         {
39545                 .name   = "STMPE_TEMP_SENS",
39546                 .flags  = IORESOURCE_IRQ,
39547 diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
39548 index b690796d24d4..448b13da62b4 100644
39549 --- a/drivers/mfd/ucb1x00-core.c
39550 +++ b/drivers/mfd/ucb1x00-core.c
39551 @@ -250,7 +250,7 @@ unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
39552                         break;
39553                 /* yield to other processes */
39554                 set_current_state(TASK_INTERRUPTIBLE);
39555 -               schedule_timeout(1);
39556 +               schedule_min_hrtimeout();
39557         }
39559         return UCB_ADC_DAT(val);
39560 diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
39561 index 926408b41270..7a6f01ace78a 100644
39562 --- a/drivers/misc/eeprom/at24.c
39563 +++ b/drivers/misc/eeprom/at24.c
39564 @@ -763,7 +763,8 @@ static int at24_probe(struct i2c_client *client)
39565         at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
39566         if (IS_ERR(at24->nvmem)) {
39567                 pm_runtime_disable(dev);
39568 -               regulator_disable(at24->vcc_reg);
39569 +               if (!pm_runtime_status_suspended(dev))
39570 +                       regulator_disable(at24->vcc_reg);
39571                 return PTR_ERR(at24->nvmem);
39572         }
39574 @@ -774,7 +775,8 @@ static int at24_probe(struct i2c_client *client)
39575         err = at24_read(at24, 0, &test_byte, 1);
39576         if (err) {
39577                 pm_runtime_disable(dev);
39578 -               regulator_disable(at24->vcc_reg);
39579 +               if (!pm_runtime_status_suspended(dev))
39580 +                       regulator_disable(at24->vcc_reg);
39581                 return -ENODEV;
39582         }
39584 diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
39585 index 9152242778f5..ecdedd87f8cc 100644
39586 --- a/drivers/misc/habanalabs/gaudi/gaudi.c
39587 +++ b/drivers/misc/habanalabs/gaudi/gaudi.c
39588 @@ -5546,6 +5546,7 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
39589         struct hl_cs_job *job;
39590         u32 cb_size, ctl, err_cause;
39591         struct hl_cb *cb;
39592 +       u64 id;
39593         int rc;
39595         cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
39596 @@ -5612,8 +5613,9 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
39597         }
39599  release_cb:
39600 +       id = cb->id;
39601         hl_cb_put(cb);
39602 -       hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
39603 +       hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, id << PAGE_SHIFT);
39605         return rc;
39607 diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
39608 index 2bdf560ee681..0f9ea75b0b18 100644
39609 --- a/drivers/misc/ics932s401.c
39610 +++ b/drivers/misc/ics932s401.c
39611 @@ -134,7 +134,7 @@ static struct ics932s401_data *ics932s401_update_device(struct device *dev)
39612         for (i = 0; i < NUM_MIRRORED_REGS; i++) {
39613                 temp = i2c_smbus_read_word_data(client, regs_to_copy[i]);
39614                 if (temp < 0)
39615 -                       data->regs[regs_to_copy[i]] = 0;
39616 +                       temp = 0;
39617                 data->regs[regs_to_copy[i]] = temp >> 8;
39618         }
39620 diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
39621 index 945701bce553..2e081a58da6c 100644
39622 --- a/drivers/misc/kgdbts.c
39623 +++ b/drivers/misc/kgdbts.c
39624 @@ -95,19 +95,19 @@
39626  #include <asm/sections.h>
39628 -#define v1printk(a...) do { \
39629 -       if (verbose) \
39630 -               printk(KERN_INFO a); \
39631 -       } while (0)
39632 -#define v2printk(a...) do { \
39633 -       if (verbose > 1) \
39634 -               printk(KERN_INFO a); \
39635 -               touch_nmi_watchdog();   \
39636 -       } while (0)
39637 -#define eprintk(a...) do { \
39638 -               printk(KERN_ERR a); \
39639 -               WARN_ON(1); \
39640 -       } while (0)
39641 +#define v1printk(a...) do {            \
39642 +       if (verbose)                    \
39643 +               printk(KERN_INFO a);    \
39644 +} while (0)
39645 +#define v2printk(a...) do {            \
39646 +       if (verbose > 1)                \
39647 +               printk(KERN_INFO a);    \
39648 +       touch_nmi_watchdog();           \
39649 +} while (0)
39650 +#define eprintk(a...) do {             \
39651 +       printk(KERN_ERR a);             \
39652 +       WARN_ON(1);                     \
39653 +} while (0)
39654  #define MAX_CONFIG_LEN         40
39656  static struct kgdb_io kgdbts_io_ops;
39657 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
39658 index dd65cedf3b12..9d14bf444481 100644
39659 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
39660 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
39661 @@ -208,7 +208,7 @@ static int lis3_3dc_rates[16] = {0, 1, 10, 25, 50, 100, 200, 400, 1600, 5000};
39662  static int lis3_3dlh_rates[4] = {50, 100, 400, 1000};
39664  /* ODR is Output Data Rate */
39665 -static int lis3lv02d_get_odr(struct lis3lv02d *lis3)
39666 +static int lis3lv02d_get_odr_index(struct lis3lv02d *lis3)
39668         u8 ctrl;
39669         int shift;
39670 @@ -216,15 +216,23 @@ static int lis3lv02d_get_odr(struct lis3lv02d *lis3)
39671         lis3->read(lis3, CTRL_REG1, &ctrl);
39672         ctrl &= lis3->odr_mask;
39673         shift = ffs(lis3->odr_mask) - 1;
39674 -       return lis3->odrs[(ctrl >> shift)];
39675 +       return (ctrl >> shift);
39678  static int lis3lv02d_get_pwron_wait(struct lis3lv02d *lis3)
39680 -       int div = lis3lv02d_get_odr(lis3);
39681 +       int odr_idx = lis3lv02d_get_odr_index(lis3);
39682 +       int div = lis3->odrs[odr_idx];
39684 -       if (WARN_ONCE(div == 0, "device returned spurious data"))
39685 +       if (div == 0) {
39686 +               if (odr_idx == 0) {
39687 +                       /* Power-down mode, not sampling no need to sleep */
39688 +                       return 0;
39689 +               }
39691 +               dev_err(&lis3->pdev->dev, "Error unknown odrs-index: %d\n", odr_idx);
39692                 return -ENXIO;
39693 +       }
39695         /* LIS3 power on delay is quite long */
39696         msleep(lis3->pwron_delay / div);
39697 @@ -816,9 +824,12 @@ static ssize_t lis3lv02d_rate_show(struct device *dev,
39698                         struct device_attribute *attr, char *buf)
39700         struct lis3lv02d *lis3 = dev_get_drvdata(dev);
39701 +       int odr_idx;
39703         lis3lv02d_sysfs_poweron(lis3);
39704 -       return sprintf(buf, "%d\n", lis3lv02d_get_odr(lis3));
39706 +       odr_idx = lis3lv02d_get_odr_index(lis3);
39707 +       return sprintf(buf, "%d\n", lis3->odrs[odr_idx]);
39710  static ssize_t lis3lv02d_rate_set(struct device *dev,
39711 diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
39712 index 14be76d4c2e6..cb34925e10f1 100644
39713 --- a/drivers/misc/mei/hw-me-regs.h
39714 +++ b/drivers/misc/mei/hw-me-regs.h
39715 @@ -105,6 +105,7 @@
39717  #define MEI_DEV_ID_ADP_S      0x7AE8  /* Alder Lake Point S */
39718  #define MEI_DEV_ID_ADP_LP     0x7A60  /* Alder Lake Point LP */
39719 +#define MEI_DEV_ID_ADP_P      0x51E0  /* Alder Lake Point P */
39721  /*
39722   * MEI HW Section
39723 diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
39724 index a7e179626b63..c3393b383e59 100644
39725 --- a/drivers/misc/mei/pci-me.c
39726 +++ b/drivers/misc/mei/pci-me.c
39727 @@ -111,6 +111,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
39729         {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)},
39730         {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_LP, MEI_ME_PCH15_CFG)},
39731 +       {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
39733         /* required last entry */
39734         {0, }
39735 diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
39736 index 8e6607fc8a67..b9ab770bbdb5 100644
39737 --- a/drivers/misc/sgi-xp/xpc_channel.c
39738 +++ b/drivers/misc/sgi-xp/xpc_channel.c
39739 @@ -834,7 +834,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
39741         atomic_inc(&ch->n_on_msg_allocate_wq);
39742         prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE);
39743 -       ret = schedule_timeout(1);
39744 +       ret = schedule_min_hrtimeout();
39745         finish_wait(&ch->msg_allocate_wq, &wait);
39746         atomic_dec(&ch->n_on_msg_allocate_wq);
39748 diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
39749 index 345addd9306d..fa8a7fce4481 100644
39750 --- a/drivers/misc/vmw_vmci/vmci_doorbell.c
39751 +++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
39752 @@ -326,7 +326,7 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
39753  bool vmci_dbell_register_notification_bitmap(u64 bitmap_ppn)
39755         int result;
39756 -       struct vmci_notify_bm_set_msg bitmap_set_msg;
39757 +       struct vmci_notify_bm_set_msg bitmap_set_msg = { };
39759         bitmap_set_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
39760                                                   VMCI_SET_NOTIFY_BITMAP);
39761 diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
39762 index cc8eeb361fcd..1018dc77269d 100644
39763 --- a/drivers/misc/vmw_vmci/vmci_guest.c
39764 +++ b/drivers/misc/vmw_vmci/vmci_guest.c
39765 @@ -168,7 +168,7 @@ static int vmci_check_host_caps(struct pci_dev *pdev)
39766                                 VMCI_UTIL_NUM_RESOURCES * sizeof(u32);
39767         struct vmci_datagram *check_msg;
39769 -       check_msg = kmalloc(msg_size, GFP_KERNEL);
39770 +       check_msg = kzalloc(msg_size, GFP_KERNEL);
39771         if (!check_msg) {
39772                 dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__);
39773                 return -ENOMEM;
39774 diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
39775 index d666e24fbe0e..a4c06ef67394 100644
39776 --- a/drivers/mmc/core/block.c
39777 +++ b/drivers/mmc/core/block.c
39778 @@ -572,6 +572,18 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
39779                 main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK;
39780         }
39782 +       /*
39783 +        * Make sure to update CACHE_CTRL in case it was changed. The cache
39784 +        * will get turned back on if the card is re-initialized, e.g.
39785 +        * suspend/resume or hw reset in recovery.
39786 +        */
39787 +       if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_CACHE_CTRL) &&
39788 +           (cmd.opcode == MMC_SWITCH)) {
39789 +               u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg) & 1;
39791 +               card->ext_csd.cache_ctrl = value;
39792 +       }
39794         /*
39795          * According to the SD specs, some commands require a delay after
39796          * issuing the command.
39797 @@ -2224,6 +2236,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
39798         case MMC_ISSUE_ASYNC:
39799                 switch (req_op(req)) {
39800                 case REQ_OP_FLUSH:
39801 +                       if (!mmc_cache_enabled(host)) {
39802 +                               blk_mq_end_request(req, BLK_STS_OK);
39803 +                               return MMC_REQ_FINISHED;
39804 +                       }
39805                         ret = mmc_blk_cqe_issue_flush(mq, req);
39806                         break;
39807                 case REQ_OP_READ:
39808 diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
39809 index 1136b859ddd8..e30c4e88e404 100644
39810 --- a/drivers/mmc/core/core.c
39811 +++ b/drivers/mmc/core/core.c
39812 @@ -1207,7 +1207,7 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
39814         err = mmc_wait_for_cmd(host, &cmd, 0);
39815         if (err)
39816 -               return err;
39817 +               goto power_cycle;
39819         if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
39820                 return -EIO;
39821 @@ -2369,80 +2369,6 @@ void mmc_stop_host(struct mmc_host *host)
39822         mmc_release_host(host);
39825 -#ifdef CONFIG_PM_SLEEP
39826 -/* Do the card removal on suspend if card is assumed removeable
39827 - * Do that in pm notifier while userspace isn't yet frozen, so we will be able
39828 -   to sync the card.
39830 -static int mmc_pm_notify(struct notifier_block *notify_block,
39831 -                       unsigned long mode, void *unused)
39833 -       struct mmc_host *host = container_of(
39834 -               notify_block, struct mmc_host, pm_notify);
39835 -       unsigned long flags;
39836 -       int err = 0;
39838 -       switch (mode) {
39839 -       case PM_HIBERNATION_PREPARE:
39840 -       case PM_SUSPEND_PREPARE:
39841 -       case PM_RESTORE_PREPARE:
39842 -               spin_lock_irqsave(&host->lock, flags);
39843 -               host->rescan_disable = 1;
39844 -               spin_unlock_irqrestore(&host->lock, flags);
39845 -               cancel_delayed_work_sync(&host->detect);
39847 -               if (!host->bus_ops)
39848 -                       break;
39850 -               /* Validate prerequisites for suspend */
39851 -               if (host->bus_ops->pre_suspend)
39852 -                       err = host->bus_ops->pre_suspend(host);
39853 -               if (!err)
39854 -                       break;
39856 -               if (!mmc_card_is_removable(host)) {
39857 -                       dev_warn(mmc_dev(host),
39858 -                                "pre_suspend failed for non-removable host: "
39859 -                                "%d\n", err);
39860 -                       /* Avoid removing non-removable hosts */
39861 -                       break;
39862 -               }
39864 -               /* Calling bus_ops->remove() with a claimed host can deadlock */
39865 -               host->bus_ops->remove(host);
39866 -               mmc_claim_host(host);
39867 -               mmc_detach_bus(host);
39868 -               mmc_power_off(host);
39869 -               mmc_release_host(host);
39870 -               host->pm_flags = 0;
39871 -               break;
39873 -       case PM_POST_SUSPEND:
39874 -       case PM_POST_HIBERNATION:
39875 -       case PM_POST_RESTORE:
39877 -               spin_lock_irqsave(&host->lock, flags);
39878 -               host->rescan_disable = 0;
39879 -               spin_unlock_irqrestore(&host->lock, flags);
39880 -               _mmc_detect_change(host, 0, false);
39882 -       }
39884 -       return 0;
39887 -void mmc_register_pm_notifier(struct mmc_host *host)
39889 -       host->pm_notify.notifier_call = mmc_pm_notify;
39890 -       register_pm_notifier(&host->pm_notify);
39893 -void mmc_unregister_pm_notifier(struct mmc_host *host)
39895 -       unregister_pm_notifier(&host->pm_notify);
39897 -#endif
39899  static int __init mmc_init(void)
39901         int ret;
39902 diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
39903 index 575ac0257af2..db3c9c68875d 100644
39904 --- a/drivers/mmc/core/core.h
39905 +++ b/drivers/mmc/core/core.h
39906 @@ -29,6 +29,7 @@ struct mmc_bus_ops {
39907         int (*shutdown)(struct mmc_host *);
39908         int (*hw_reset)(struct mmc_host *);
39909         int (*sw_reset)(struct mmc_host *);
39910 +       bool (*cache_enabled)(struct mmc_host *);
39911  };
39913  void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
39914 @@ -93,14 +94,6 @@ int mmc_execute_tuning(struct mmc_card *card);
39915  int mmc_hs200_to_hs400(struct mmc_card *card);
39916  int mmc_hs400_to_hs200(struct mmc_card *card);
39918 -#ifdef CONFIG_PM_SLEEP
39919 -void mmc_register_pm_notifier(struct mmc_host *host);
39920 -void mmc_unregister_pm_notifier(struct mmc_host *host);
39921 -#else
39922 -static inline void mmc_register_pm_notifier(struct mmc_host *host) { }
39923 -static inline void mmc_unregister_pm_notifier(struct mmc_host *host) { }
39924 -#endif
39926  void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq);
39927  bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
39929 @@ -171,4 +164,12 @@ static inline void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
39930                 host->ops->post_req(host, mrq, err);
39933 +static inline bool mmc_cache_enabled(struct mmc_host *host)
39935 +       if (host->bus_ops->cache_enabled)
39936 +               return host->bus_ops->cache_enabled(host);
39938 +       return false;
39941  #endif
39942 diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
39943 index 9b89a91b6b47..fe05b3645fe9 100644
39944 --- a/drivers/mmc/core/host.c
39945 +++ b/drivers/mmc/core/host.c
39946 @@ -35,6 +35,42 @@
39948  static DEFINE_IDA(mmc_host_ida);
39950 +#ifdef CONFIG_PM_SLEEP
39951 +static int mmc_host_class_prepare(struct device *dev)
39953 +       struct mmc_host *host = cls_dev_to_mmc_host(dev);
39955 +       /*
39956 +        * It's safe to access the bus_ops pointer, as both userspace and the
39957 +        * workqueue for detecting cards are frozen at this point.
39958 +        */
39959 +       if (!host->bus_ops)
39960 +               return 0;
39962 +       /* Validate conditions for system suspend. */
39963 +       if (host->bus_ops->pre_suspend)
39964 +               return host->bus_ops->pre_suspend(host);
39966 +       return 0;
39969 +static void mmc_host_class_complete(struct device *dev)
39971 +       struct mmc_host *host = cls_dev_to_mmc_host(dev);
39973 +       _mmc_detect_change(host, 0, false);
39976 +static const struct dev_pm_ops mmc_host_class_dev_pm_ops = {
39977 +       .prepare = mmc_host_class_prepare,
39978 +       .complete = mmc_host_class_complete,
39981 +#define MMC_HOST_CLASS_DEV_PM_OPS (&mmc_host_class_dev_pm_ops)
39982 +#else
39983 +#define MMC_HOST_CLASS_DEV_PM_OPS NULL
39984 +#endif
39986  static void mmc_host_classdev_release(struct device *dev)
39988         struct mmc_host *host = cls_dev_to_mmc_host(dev);
39989 @@ -46,6 +82,7 @@ static void mmc_host_classdev_release(struct device *dev)
39990  static struct class mmc_host_class = {
39991         .name           = "mmc_host",
39992         .dev_release    = mmc_host_classdev_release,
39993 +       .pm             = MMC_HOST_CLASS_DEV_PM_OPS,
39994  };
39996  int mmc_register_host_class(void)
39997 @@ -538,8 +575,6 @@ int mmc_add_host(struct mmc_host *host)
39998  #endif
40000         mmc_start_host(host);
40001 -       mmc_register_pm_notifier(host);
40003         return 0;
40006 @@ -555,7 +590,6 @@ EXPORT_SYMBOL(mmc_add_host);
40007   */
40008  void mmc_remove_host(struct mmc_host *host)
40010 -       mmc_unregister_pm_notifier(host);
40011         mmc_stop_host(host);
40013  #ifdef CONFIG_DEBUG_FS
40014 diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
40015 index 8741271d3971..4d2b4b0da93c 100644
40016 --- a/drivers/mmc/core/mmc.c
40017 +++ b/drivers/mmc/core/mmc.c
40018 @@ -2029,6 +2029,12 @@ static void mmc_detect(struct mmc_host *host)
40019         }
40022 +static bool _mmc_cache_enabled(struct mmc_host *host)
40024 +       return host->card->ext_csd.cache_size > 0 &&
40025 +              host->card->ext_csd.cache_ctrl & 1;
40028  static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
40030         int err = 0;
40031 @@ -2208,6 +2214,7 @@ static const struct mmc_bus_ops mmc_ops = {
40032         .alive = mmc_alive,
40033         .shutdown = mmc_shutdown,
40034         .hw_reset = _mmc_hw_reset,
40035 +       .cache_enabled = _mmc_cache_enabled,
40036  };
40038  /*
40039 diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
40040 index 265d95ec82ce..c458f6b626a2 100644
40041 --- a/drivers/mmc/core/mmc_ops.c
40042 +++ b/drivers/mmc/core/mmc_ops.c
40043 @@ -988,9 +988,7 @@ int mmc_flush_cache(struct mmc_card *card)
40045         int err = 0;
40047 -       if (mmc_card_mmc(card) &&
40048 -                       (card->ext_csd.cache_size > 0) &&
40049 -                       (card->ext_csd.cache_ctrl & 1)) {
40050 +       if (mmc_cache_enabled(card->host)) {
40051                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
40052                                  EXT_CSD_FLUSH_CACHE, 1,
40053                                  MMC_CACHE_FLUSH_TIMEOUT_MS);
40054 diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
40055 index 6fa51a6ed058..2c48d6504101 100644
40056 --- a/drivers/mmc/core/sd.c
40057 +++ b/drivers/mmc/core/sd.c
40058 @@ -135,6 +135,9 @@ static int mmc_decode_csd(struct mmc_card *card)
40059                         csd->erase_size = UNSTUFF_BITS(resp, 39, 7) + 1;
40060                         csd->erase_size <<= csd->write_blkbits - 9;
40061                 }
40063 +               if (UNSTUFF_BITS(resp, 13, 1))
40064 +                       mmc_card_set_readonly(card);
40065                 break;
40066         case 1:
40067                 /*
40068 @@ -169,6 +172,9 @@ static int mmc_decode_csd(struct mmc_card *card)
40069                 csd->write_blkbits = 9;
40070                 csd->write_partial = 0;
40071                 csd->erase_size = 1;
40073 +               if (UNSTUFF_BITS(resp, 13, 1))
40074 +                       mmc_card_set_readonly(card);
40075                 break;
40076         default:
40077                 pr_err("%s: unrecognised CSD structure version %d\n",
40078 diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
40079 index 0fda7784cab2..3eb94ac2712e 100644
40080 --- a/drivers/mmc/core/sdio.c
40081 +++ b/drivers/mmc/core/sdio.c
40082 @@ -985,21 +985,37 @@ static void mmc_sdio_detect(struct mmc_host *host)
40083   */
40084  static int mmc_sdio_pre_suspend(struct mmc_host *host)
40086 -       int i, err = 0;
40087 +       int i;
40089         for (i = 0; i < host->card->sdio_funcs; i++) {
40090                 struct sdio_func *func = host->card->sdio_func[i];
40091                 if (func && sdio_func_present(func) && func->dev.driver) {
40092                         const struct dev_pm_ops *pmops = func->dev.driver->pm;
40093 -                       if (!pmops || !pmops->suspend || !pmops->resume) {
40094 +                       if (!pmops || !pmops->suspend || !pmops->resume)
40095                                 /* force removal of entire card in that case */
40096 -                               err = -ENOSYS;
40097 -                               break;
40098 -                       }
40099 +                               goto remove;
40100                 }
40101         }
40103 -       return err;
40104 +       return 0;
40106 +remove:
40107 +       if (!mmc_card_is_removable(host)) {
40108 +               dev_warn(mmc_dev(host),
40109 +                        "missing suspend/resume ops for non-removable SDIO card\n");
40110 +               /* Don't remove a non-removable card - we can't re-detect it. */
40111 +               return 0;
40112 +       }
40114 +       /* Remove the SDIO card and let it be re-detected later on. */
40115 +       mmc_sdio_remove(host);
40116 +       mmc_claim_host(host);
40117 +       mmc_detach_bus(host);
40118 +       mmc_power_off(host);
40119 +       mmc_release_host(host);
40120 +       host->pm_flags = 0;
40122 +       return 0;
40125  /*
40126 diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
40127 index b8b771b643cc..016a6106151a 100644
40128 --- a/drivers/mmc/host/meson-gx-mmc.c
40129 +++ b/drivers/mmc/host/meson-gx-mmc.c
40130 @@ -236,7 +236,8 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
40131         if (host->dram_access_quirk)
40132                 return;
40134 -       if (data->blocks > 1) {
40135 +       /* SD_IO_RW_EXTENDED (CMD53) can also use block mode under the hood */
40136 +       if (data->blocks > 1 || mrq->cmd->opcode == SD_IO_RW_EXTENDED) {
40137                 /*
40138                  * In block mode DMA descriptor format, "length" field indicates
40139                  * number of blocks and there is no way to pass DMA size that
40140 @@ -258,7 +259,9 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
40141         for_each_sg(data->sg, sg, data->sg_len, i) {
40142                 /* check for 8 byte alignment */
40143                 if (sg->offset % 8) {
40144 -                       WARN_ONCE(1, "unaligned scatterlist buffer\n");
40145 +                       dev_warn_once(mmc_dev(mmc),
40146 +                                     "unaligned sg offset %u, disabling descriptor DMA for transfer\n",
40147 +                                     sg->offset);
40148                         return;
40149                 }
40150         }
40151 diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
40152 index f9780c65ebe9..f24623aac2db 100644
40153 --- a/drivers/mmc/host/sdhci-brcmstb.c
40154 +++ b/drivers/mmc/host/sdhci-brcmstb.c
40155 @@ -199,7 +199,6 @@ static int sdhci_brcmstb_add_host(struct sdhci_host *host,
40156         if (dma64) {
40157                 dev_dbg(mmc_dev(host->mmc), "Using 64 bit DMA\n");
40158                 cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
40159 -               cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
40160         }
40162         ret = cqhci_init(cq_host, host->mmc, dma64);
40163 diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
40164 index a20459744d21..94327988da91 100644
40165 --- a/drivers/mmc/host/sdhci-esdhc-imx.c
40166 +++ b/drivers/mmc/host/sdhci-esdhc-imx.c
40167 @@ -1488,7 +1488,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
40169         mmc_of_parse_voltage(np, &host->ocr_mask);
40171 -       if (esdhc_is_usdhc(imx_data)) {
40172 +       if (esdhc_is_usdhc(imx_data) && !IS_ERR(imx_data->pinctrl)) {
40173                 imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
40174                                                 ESDHC_PINCTRL_STATE_100MHZ);
40175                 imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
40176 diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
40177 index 9552708846ca..bf04a08eeba1 100644
40178 --- a/drivers/mmc/host/sdhci-pci-core.c
40179 +++ b/drivers/mmc/host/sdhci-pci-core.c
40180 @@ -516,6 +516,7 @@ struct intel_host {
40181         int     drv_strength;
40182         bool    d3_retune;
40183         bool    rpm_retune_ok;
40184 +       bool    needs_pwr_off;
40185         u32     glk_rx_ctrl1;
40186         u32     glk_tun_val;
40187         u32     active_ltr;
40188 @@ -643,9 +644,25 @@ static int bxt_get_cd(struct mmc_host *mmc)
40189  static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
40190                                   unsigned short vdd)
40192 +       struct sdhci_pci_slot *slot = sdhci_priv(host);
40193 +       struct intel_host *intel_host = sdhci_pci_priv(slot);
40194         int cntr;
40195         u8 reg;
40197 +       /*
40198 +        * Bus power may control card power, but a full reset still may not
40199 +        * reset the power, whereas a direct write to SDHCI_POWER_CONTROL can.
40200 +        * That might be needed to initialize correctly, if the card was left
40201 +        * powered on previously.
40202 +        */
40203 +       if (intel_host->needs_pwr_off) {
40204 +               intel_host->needs_pwr_off = false;
40205 +               if (mode != MMC_POWER_OFF) {
40206 +                       sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
40207 +                       usleep_range(10000, 12500);
40208 +               }
40209 +       }
40211         sdhci_set_power(host, mode, vdd);
40213         if (mode == MMC_POWER_OFF)
40214 @@ -1135,6 +1152,14 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
40215         return 0;
40218 +static void byt_needs_pwr_off(struct sdhci_pci_slot *slot)
40220 +       struct intel_host *intel_host = sdhci_pci_priv(slot);
40221 +       u8 reg = sdhci_readb(slot->host, SDHCI_POWER_CONTROL);
40223 +       intel_host->needs_pwr_off = reg  & SDHCI_POWER_ON;
40226  static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
40228         byt_probe_slot(slot);
40229 @@ -1152,6 +1177,8 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
40230             slot->chip->pdev->subsystem_device == PCI_SUBDEVICE_ID_NI_78E3)
40231                 slot->host->mmc->caps2 |= MMC_CAP2_AVOID_3_3V;
40233 +       byt_needs_pwr_off(slot);
40235         return 0;
40238 @@ -1903,6 +1930,8 @@ static const struct pci_device_id pci_ids[] = {
40239         SDHCI_PCI_DEVICE(INTEL, CMLH_SD,   intel_byt_sd),
40240         SDHCI_PCI_DEVICE(INTEL, JSL_EMMC,  intel_glk_emmc),
40241         SDHCI_PCI_DEVICE(INTEL, JSL_SD,    intel_byt_sd),
40242 +       SDHCI_PCI_DEVICE(INTEL, LKF_EMMC,  intel_glk_emmc),
40243 +       SDHCI_PCI_DEVICE(INTEL, LKF_SD,    intel_byt_sd),
40244         SDHCI_PCI_DEVICE(O2, 8120,     o2),
40245         SDHCI_PCI_DEVICE(O2, 8220,     o2),
40246         SDHCI_PCI_DEVICE(O2, 8221,     o2),
40247 diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
40248 index 4a0f69b97a78..757211922506 100644
40249 --- a/drivers/mmc/host/sdhci-pci-gli.c
40250 +++ b/drivers/mmc/host/sdhci-pci-gli.c
40251 @@ -587,8 +587,13 @@ static void sdhci_gli_voltage_switch(struct sdhci_host *host)
40252          *
40253          * Wait 5ms after set 1.8V signal enable in Host Control 2 register
40254          * to ensure 1.8V signal enable bit is set by GL9750/GL9755.
40255 +        *
40256 +        * ...however, the controller in the NUC10i3FNK4 (a 9755) requires
40257 +        * slightly longer than 5ms before the control register reports that
40258 +        * 1.8V is ready, and far longer still before the card will actually
40259 +        * work reliably.
40260          */
40261 -       usleep_range(5000, 5500);
40262 +       usleep_range(100000, 110000);
40265  static void sdhci_gl9750_reset(struct sdhci_host *host, u8 mask)
40266 diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
40267 index d0ed232af0eb..8f90c4163bb5 100644
40268 --- a/drivers/mmc/host/sdhci-pci.h
40269 +++ b/drivers/mmc/host/sdhci-pci.h
40270 @@ -57,6 +57,8 @@
40271  #define PCI_DEVICE_ID_INTEL_CMLH_SD    0x06f5
40272  #define PCI_DEVICE_ID_INTEL_JSL_EMMC   0x4dc4
40273  #define PCI_DEVICE_ID_INTEL_JSL_SD     0x4df8
40274 +#define PCI_DEVICE_ID_INTEL_LKF_EMMC   0x98c4
40275 +#define PCI_DEVICE_ID_INTEL_LKF_SD     0x98f8
40277  #define PCI_DEVICE_ID_SYSKONNECT_8000  0x8000
40278  #define PCI_DEVICE_ID_VIA_95D0         0x95d0
40279 diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
40280 index 41d193fa77bb..8ea9132ebca4 100644
40281 --- a/drivers/mmc/host/sdhci-tegra.c
40282 +++ b/drivers/mmc/host/sdhci-tegra.c
40283 @@ -119,6 +119,10 @@
40284  /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
40285  #define SDHCI_TEGRA_CQE_BASE_ADDR                      0xF000
40287 +#define SDHCI_TEGRA_CQE_TRNS_MODE      (SDHCI_TRNS_MULTI | \
40288 +                                        SDHCI_TRNS_BLK_CNT_EN | \
40289 +                                        SDHCI_TRNS_DMA)
40291  struct sdhci_tegra_soc_data {
40292         const struct sdhci_pltfm_data *pdata;
40293         u64 dma_mask;
40294 @@ -1156,6 +1160,7 @@ static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
40295  static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
40297         struct mmc_host *mmc = cq_host->mmc;
40298 +       struct sdhci_host *host = mmc_priv(mmc);
40299         u8 ctrl;
40300         ktime_t timeout;
40301         bool timed_out;
40302 @@ -1170,6 +1175,7 @@ static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
40303          */
40304         if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
40305             cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
40306 +               sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
40307                 sdhci_cqe_enable(mmc);
40308                 writel(val, cq_host->mmio + reg);
40309                 timeout = ktime_add_us(ktime_get(), 50);
40310 @@ -1205,6 +1211,7 @@ static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
40311  static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
40313         struct cqhci_host *cq_host = mmc->cqe_private;
40314 +       struct sdhci_host *host = mmc_priv(mmc);
40315         u32 val;
40317         /*
40318 @@ -1218,6 +1225,7 @@ static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
40319                 if (val & CQHCI_ENABLE)
40320                         cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
40321                                      CQHCI_CFG);
40322 +               sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
40323                 sdhci_cqe_enable(mmc);
40324                 if (val & CQHCI_ENABLE)
40325                         cqhci_writel(cq_host, val, CQHCI_CFG);
40326 @@ -1281,12 +1289,36 @@ static void tegra_sdhci_set_timeout(struct sdhci_host *host,
40327         __sdhci_set_timeout(host, cmd);
40330 +static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc)
40332 +       struct cqhci_host *cq_host = mmc->cqe_private;
40333 +       u32 reg;
40335 +       reg = cqhci_readl(cq_host, CQHCI_CFG);
40336 +       reg |= CQHCI_ENABLE;
40337 +       cqhci_writel(cq_host, reg, CQHCI_CFG);
40340 +static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc)
40342 +       struct cqhci_host *cq_host = mmc->cqe_private;
40343 +       struct sdhci_host *host = mmc_priv(mmc);
40344 +       u32 reg;
40346 +       reg = cqhci_readl(cq_host, CQHCI_CFG);
40347 +       reg &= ~CQHCI_ENABLE;
40348 +       cqhci_writel(cq_host, reg, CQHCI_CFG);
40349 +       sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
40352  static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
40353         .write_l    = tegra_cqhci_writel,
40354         .enable = sdhci_tegra_cqe_enable,
40355         .disable = sdhci_cqe_disable,
40356         .dumpregs = sdhci_tegra_dumpregs,
40357         .update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
40358 +       .pre_enable = sdhci_tegra_cqe_pre_enable,
40359 +       .post_disable = sdhci_tegra_cqe_post_disable,
40360  };
40362  static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
40363 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
40364 index 2d73407ee52e..a9e20818ff3a 100644
40365 --- a/drivers/mmc/host/sdhci.c
40366 +++ b/drivers/mmc/host/sdhci.c
40367 @@ -2996,6 +2996,37 @@ static bool sdhci_request_done(struct sdhci_host *host)
40368                 return true;
40369         }
40371 +       /*
40372 +        * The controller needs a reset of internal state machines
40373 +        * upon error conditions.
40374 +        */
40375 +       if (sdhci_needs_reset(host, mrq)) {
40376 +               /*
40377 +                * Do not finish until command and data lines are available for
40378 +                * reset. Note there can only be one other mrq, so it cannot
40379 +                * also be in mrqs_done, otherwise host->cmd and host->data_cmd
40380 +                * would both be null.
40381 +                */
40382 +               if (host->cmd || host->data_cmd) {
40383 +                       spin_unlock_irqrestore(&host->lock, flags);
40384 +                       return true;
40385 +               }
40387 +               /* Some controllers need this kick or reset won't work here */
40388 +               if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
40389 +                       /* This is to force an update */
40390 +                       host->ops->set_clock(host, host->clock);
40392 +               /*
40393 +                * Spec says we should do both at the same time, but Ricoh
40394 +                * controllers do not like that.
40395 +                */
40396 +               sdhci_do_reset(host, SDHCI_RESET_CMD);
40397 +               sdhci_do_reset(host, SDHCI_RESET_DATA);
40399 +               host->pending_reset = false;
40400 +       }
40402         /*
40403          * Always unmap the data buffers if they were mapped by
40404          * sdhci_prepare_data() whenever we finish with a request.
40405 @@ -3059,35 +3090,6 @@ static bool sdhci_request_done(struct sdhci_host *host)
40406                 }
40407         }
40409 -       /*
40410 -        * The controller needs a reset of internal state machines
40411 -        * upon error conditions.
40412 -        */
40413 -       if (sdhci_needs_reset(host, mrq)) {
40414 -               /*
40415 -                * Do not finish until command and data lines are available for
40416 -                * reset. Note there can only be one other mrq, so it cannot
40417 -                * also be in mrqs_done, otherwise host->cmd and host->data_cmd
40418 -                * would both be null.
40419 -                */
40420 -               if (host->cmd || host->data_cmd) {
40421 -                       spin_unlock_irqrestore(&host->lock, flags);
40422 -                       return true;
40423 -               }
40425 -               /* Some controllers need this kick or reset won't work here */
40426 -               if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
40427 -                       /* This is to force an update */
40428 -                       host->ops->set_clock(host, host->clock);
40430 -               /* Spec says we should do both at the same time, but Ricoh
40431 -                  controllers do not like that. */
40432 -               sdhci_do_reset(host, SDHCI_RESET_CMD);
40433 -               sdhci_do_reset(host, SDHCI_RESET_DATA);
40435 -               host->pending_reset = false;
40436 -       }
40438         host->mrqs_done[i] = NULL;
40440         spin_unlock_irqrestore(&host->lock, flags);
40441 diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c
40442 index 2413b6750cec..ccbf9885a52b 100644
40443 --- a/drivers/mmc/host/uniphier-sd.c
40444 +++ b/drivers/mmc/host/uniphier-sd.c
40445 @@ -635,7 +635,7 @@ static int uniphier_sd_probe(struct platform_device *pdev)
40447         ret = tmio_mmc_host_probe(host);
40448         if (ret)
40449 -               goto free_host;
40450 +               goto disable_clk;
40452         ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED,
40453                                dev_name(dev), host);
40454 @@ -646,6 +646,8 @@ static int uniphier_sd_probe(struct platform_device *pdev)
40456  remove_host:
40457         tmio_mmc_host_remove(host);
40458 +disable_clk:
40459 +       uniphier_sd_clk_disable(host);
40460  free_host:
40461         tmio_mmc_host_free(host);
40463 @@ -658,6 +660,7 @@ static int uniphier_sd_remove(struct platform_device *pdev)
40465         tmio_mmc_host_remove(host);
40466         uniphier_sd_clk_disable(host);
40467 +       tmio_mmc_host_free(host);
40469         return 0;
40471 diff --git a/drivers/mtd/maps/physmap-bt1-rom.c b/drivers/mtd/maps/physmap-bt1-rom.c
40472 index a35450002284..58782cfaf71c 100644
40473 --- a/drivers/mtd/maps/physmap-bt1-rom.c
40474 +++ b/drivers/mtd/maps/physmap-bt1-rom.c
40475 @@ -79,7 +79,7 @@ static void __xipram bt1_rom_map_copy_from(struct map_info *map,
40476         if (shift) {
40477                 chunk = min_t(ssize_t, 4 - shift, len);
40478                 data = readl_relaxed(src - shift);
40479 -               memcpy(to, &data + shift, chunk);
40480 +               memcpy(to, (char *)&data + shift, chunk);
40481                 src += chunk;
40482                 to += chunk;
40483                 len -= chunk;
40484 diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
40485 index 001ed5deb622..4f63b8430c71 100644
40486 --- a/drivers/mtd/maps/physmap-core.c
40487 +++ b/drivers/mtd/maps/physmap-core.c
40488 @@ -69,8 +69,10 @@ static int physmap_flash_remove(struct platform_device *dev)
40489         int i, err = 0;
40491         info = platform_get_drvdata(dev);
40492 -       if (!info)
40493 +       if (!info) {
40494 +               err = -EINVAL;
40495                 goto out;
40496 +       }
40498         if (info->cmtd) {
40499                 err = mtd_device_unregister(info->cmtd);
40500 diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
40501 index 323035d4f2d0..688de663cabf 100644
40502 --- a/drivers/mtd/mtdchar.c
40503 +++ b/drivers/mtd/mtdchar.c
40504 @@ -651,16 +651,12 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
40505         case MEMGETINFO:
40506         case MEMREADOOB:
40507         case MEMREADOOB64:
40508 -       case MEMLOCK:
40509 -       case MEMUNLOCK:
40510         case MEMISLOCKED:
40511         case MEMGETOOBSEL:
40512         case MEMGETBADBLOCK:
40513 -       case MEMSETBADBLOCK:
40514         case OTPSELECT:
40515         case OTPGETREGIONCOUNT:
40516         case OTPGETREGIONINFO:
40517 -       case OTPLOCK:
40518         case ECCGETLAYOUT:
40519         case ECCGETSTATS:
40520         case MTDFILEMODE:
40521 @@ -671,9 +667,13 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
40522         /* "dangerous" commands */
40523         case MEMERASE:
40524         case MEMERASE64:
40525 +       case MEMLOCK:
40526 +       case MEMUNLOCK:
40527 +       case MEMSETBADBLOCK:
40528         case MEMWRITEOOB:
40529         case MEMWRITEOOB64:
40530         case MEMWRITE:
40531 +       case OTPLOCK:
40532                 if (!(file->f_mode & FMODE_WRITE))
40533                         return -EPERM;
40534                 break;
40535 diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
40536 index 2d6423d89a17..d97ddc65b5d4 100644
40537 --- a/drivers/mtd/mtdcore.c
40538 +++ b/drivers/mtd/mtdcore.c
40539 @@ -820,6 +820,9 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
40541         /* Prefer parsed partitions over driver-provided fallback */
40542         ret = parse_mtd_partitions(mtd, types, parser_data);
40543 +       if (ret == -EPROBE_DEFER)
40544 +               goto out;
40546         if (ret > 0)
40547                 ret = 0;
40548         else if (nr_parts)
40549 diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
40550 index 12ca4f19cb14..665fd9020b76 100644
40551 --- a/drivers/mtd/mtdpart.c
40552 +++ b/drivers/mtd/mtdpart.c
40553 @@ -331,7 +331,7 @@ static int __del_mtd_partitions(struct mtd_info *mtd)
40555         list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
40556                 if (mtd_has_partitions(child))
40557 -                       del_mtd_partitions(child);
40558 +                       __del_mtd_partitions(child);
40560                 pr_info("Deleting %s MTD partition\n", child->name);
40561                 ret = del_mtd_device(child);
40562 diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
40563 index e6ceec8f50dc..8aab1017b460 100644
40564 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c
40565 +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
40566 @@ -883,10 +883,12 @@ static int atmel_nand_pmecc_correct_data(struct nand_chip *chip, void *buf,
40567                                                           NULL, 0,
40568                                                           chip->ecc.strength);
40570 -               if (ret >= 0)
40571 +               if (ret >= 0) {
40572 +                       mtd->ecc_stats.corrected += ret;
40573                         max_bitflips = max(ret, max_bitflips);
40574 -               else
40575 +               } else {
40576                         mtd->ecc_stats.failed++;
40577 +               }
40579                 databuf += chip->ecc.size;
40580                 eccbuf += chip->ecc.bytes;
40581 diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
40582 index 659eaa6f0980..5ff4291380c5 100644
40583 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
40584 +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
40585 @@ -2688,6 +2688,12 @@ static int brcmnand_attach_chip(struct nand_chip *chip)
40587         ret = brcmstb_choose_ecc_layout(host);
40589 +       /* If OOB is written with ECC enabled it will cause ECC errors */
40590 +       if (is_hamming_ecc(host->ctrl, &host->hwcfg)) {
40591 +               chip->ecc.write_oob = brcmnand_write_oob_raw;
40592 +               chip->ecc.read_oob = brcmnand_read_oob_raw;
40593 +       }
40595         return ret;
40598 diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
40599 index 0101c0fab50a..a24e2f57fa68 100644
40600 --- a/drivers/mtd/nand/raw/fsmc_nand.c
40601 +++ b/drivers/mtd/nand/raw/fsmc_nand.c
40602 @@ -1077,11 +1077,13 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
40603                 host->read_dma_chan = dma_request_channel(mask, filter, NULL);
40604                 if (!host->read_dma_chan) {
40605                         dev_err(&pdev->dev, "Unable to get read dma channel\n");
40606 +                       ret = -ENODEV;
40607                         goto disable_clk;
40608                 }
40609                 host->write_dma_chan = dma_request_channel(mask, filter, NULL);
40610                 if (!host->write_dma_chan) {
40611                         dev_err(&pdev->dev, "Unable to get write dma channel\n");
40612 +                       ret = -ENODEV;
40613                         goto release_dma_read_chan;
40614                 }
40615         }
40616 diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
40617 index 3fa8c22d3f36..4d08e4ab5c1b 100644
40618 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
40619 +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
40620 @@ -2449,7 +2449,7 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
40621         this->bch_geometry.auxiliary_size = 128;
40622         ret = gpmi_alloc_dma_buffer(this);
40623         if (ret)
40624 -               goto err_out;
40625 +               return ret;
40627         nand_controller_init(&this->base);
40628         this->base.ops = &gpmi_nand_controller_ops;
40629 diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
40630 index fd4c318b520f..87c23bb320bf 100644
40631 --- a/drivers/mtd/nand/raw/qcom_nandc.c
40632 +++ b/drivers/mtd/nand/raw/qcom_nandc.c
40633 @@ -2898,7 +2898,7 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
40634         struct device *dev = nandc->dev;
40635         struct device_node *dn = dev->of_node, *child;
40636         struct qcom_nand_host *host;
40637 -       int ret;
40638 +       int ret = -ENODEV;
40640         for_each_available_child_of_node(dn, child) {
40641                 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
40642 @@ -2916,10 +2916,7 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
40643                 list_add_tail(&host->node, &nandc->host_list);
40644         }
40646 -       if (list_empty(&nandc->host_list))
40647 -               return -ENODEV;
40649 -       return 0;
40650 +       return ret;
40653  /* parse custom DT properties here */
40654 diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
40655 index 61d932c1b718..17f63f95f4a2 100644
40656 --- a/drivers/mtd/nand/spi/core.c
40657 +++ b/drivers/mtd/nand/spi/core.c
40658 @@ -1263,12 +1263,14 @@ static const struct spi_device_id spinand_ids[] = {
40659         { .name = "spi-nand" },
40660         { /* sentinel */ },
40661  };
40662 +MODULE_DEVICE_TABLE(spi, spinand_ids);
40664  #ifdef CONFIG_OF
40665  static const struct of_device_id spinand_of_ids[] = {
40666         { .compatible = "spi-nand" },
40667         { /* sentinel */ },
40668  };
40669 +MODULE_DEVICE_TABLE(of, spinand_of_ids);
40670  #endif
40672  static struct spi_mem_driver spinand_drv = {
40673 diff --git a/drivers/mtd/parsers/qcomsmempart.c b/drivers/mtd/parsers/qcomsmempart.c
40674 index 808cb33d71f8..d9083308f6ba 100644
40675 --- a/drivers/mtd/parsers/qcomsmempart.c
40676 +++ b/drivers/mtd/parsers/qcomsmempart.c
40677 @@ -65,6 +65,13 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
40678         int ret, i, numparts;
40679         char *name, *c;
40681 +       if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS)
40682 +                       && mtd->type == MTD_NORFLASH) {
40683 +               pr_err("%s: SMEM partition parser is incompatible with 4K sectors\n",
40684 +                               mtd->name);
40685 +               return -EINVAL;
40686 +       }
40688         pr_debug("Parsing partition table info from SMEM\n");
40689         ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len);
40690         if (IS_ERR(ptable)) {
40691 @@ -104,7 +111,7 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
40692          * complete partition table
40693          */
40694         ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len);
40695 -       if (IS_ERR_OR_NULL(ptable)) {
40696 +       if (IS_ERR(ptable)) {
40697                 pr_err("Error reading partition table\n");
40698                 return PTR_ERR(ptable);
40699         }
40700 diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
40701 index 0522304f52fa..72bc1342c3ff 100644
40702 --- a/drivers/mtd/spi-nor/core.c
40703 +++ b/drivers/mtd/spi-nor/core.c
40704 @@ -3301,6 +3301,37 @@ static void spi_nor_resume(struct mtd_info *mtd)
40705                 dev_err(dev, "resume() failed\n");
40708 +static int spi_nor_get_device(struct mtd_info *mtd)
40710 +       struct mtd_info *master = mtd_get_master(mtd);
40711 +       struct spi_nor *nor = mtd_to_spi_nor(master);
40712 +       struct device *dev;
40714 +       if (nor->spimem)
40715 +               dev = nor->spimem->spi->controller->dev.parent;
40716 +       else
40717 +               dev = nor->dev;
40719 +       if (!try_module_get(dev->driver->owner))
40720 +               return -ENODEV;
40722 +       return 0;
40725 +static void spi_nor_put_device(struct mtd_info *mtd)
40727 +       struct mtd_info *master = mtd_get_master(mtd);
40728 +       struct spi_nor *nor = mtd_to_spi_nor(master);
40729 +       struct device *dev;
40731 +       if (nor->spimem)
40732 +               dev = nor->spimem->spi->controller->dev.parent;
40733 +       else
40734 +               dev = nor->dev;
40736 +       module_put(dev->driver->owner);
40739  void spi_nor_restore(struct spi_nor *nor)
40741         /* restore the addressing mode */
40742 @@ -3495,6 +3526,8 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
40743         mtd->_read = spi_nor_read;
40744         mtd->_suspend = spi_nor_suspend;
40745         mtd->_resume = spi_nor_resume;
40746 +       mtd->_get_device = spi_nor_get_device;
40747 +       mtd->_put_device = spi_nor_put_device;
40749         if (nor->params->locking_ops) {
40750                 mtd->_lock = spi_nor_lock;
40751 diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
40752 index 9203abaac229..662b212787d4 100644
40753 --- a/drivers/mtd/spi-nor/macronix.c
40754 +++ b/drivers/mtd/spi-nor/macronix.c
40755 @@ -73,9 +73,6 @@ static const struct flash_info macronix_parts[] = {
40756                               SECT_4K | SPI_NOR_DUAL_READ |
40757                               SPI_NOR_QUAD_READ) },
40758         { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
40759 -       { "mx25l51245g", INFO(0xc2201a, 0, 64 * 1024, 1024,
40760 -                             SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
40761 -                             SPI_NOR_4B_OPCODES) },
40762         { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024,
40763                               SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
40764                               SPI_NOR_4B_OPCODES) },
40765 diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
40766 index 3d63b15bbaa1..164071e9d457 100644
40767 --- a/drivers/net/caif/caif_hsi.c
40768 +++ b/drivers/net/caif/caif_hsi.c
40769 @@ -924,7 +924,7 @@ static void cfhsi_wake_down(struct work_struct *work)
40770                         break;
40772                 set_current_state(TASK_INTERRUPTIBLE);
40773 -               schedule_timeout(1);
40774 +               schedule_min_hrtimeout();
40775                 retry--;
40776         }
40778 diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
40779 index 6a64fe410987..c3508109263e 100644
40780 --- a/drivers/net/can/dev/skb.c
40781 +++ b/drivers/net/can/dev/skb.c
40782 @@ -151,7 +151,11 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx)
40784         struct can_priv *priv = netdev_priv(dev);
40786 -       BUG_ON(idx >= priv->echo_skb_max);
40787 +       if (idx >= priv->echo_skb_max) {
40788 +               netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
40789 +                          __func__, idx, priv->echo_skb_max);
40790 +               return;
40791 +       }
40793         if (priv->echo_skb[idx]) {
40794                 dev_kfree_skb_any(priv->echo_skb[idx]);
40795 diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
40796 index 0c8d36bc668c..f71127229caf 100644
40797 --- a/drivers/net/can/m_can/m_can.c
40798 +++ b/drivers/net/can/m_can/m_can.c
40799 @@ -1455,6 +1455,8 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
40800         int i;
40801         int putidx;
40803 +       cdev->tx_skb = NULL;
40805         /* Generate ID field for TX buffer Element */
40806         /* Common to all supported M_CAN versions */
40807         if (cf->can_id & CAN_EFF_FLAG) {
40808 @@ -1571,7 +1573,6 @@ static void m_can_tx_work_queue(struct work_struct *ws)
40809                                                    tx_work);
40811         m_can_tx_handler(cdev);
40812 -       cdev->tx_skb = NULL;
40815  static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
40816 diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
40817 index a57da43680d8..bd7d0251be10 100644
40818 --- a/drivers/net/can/spi/mcp251x.c
40819 +++ b/drivers/net/can/spi/mcp251x.c
40820 @@ -956,8 +956,6 @@ static int mcp251x_stop(struct net_device *net)
40822         priv->force_quit = 1;
40823         free_irq(spi->irq, priv);
40824 -       destroy_workqueue(priv->wq);
40825 -       priv->wq = NULL;
40827         mutex_lock(&priv->mcp_lock);
40829 @@ -1224,24 +1222,15 @@ static int mcp251x_open(struct net_device *net)
40830                 goto out_close;
40831         }
40833 -       priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
40834 -                                  0);
40835 -       if (!priv->wq) {
40836 -               ret = -ENOMEM;
40837 -               goto out_clean;
40838 -       }
40839 -       INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
40840 -       INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
40842         ret = mcp251x_hw_wake(spi);
40843         if (ret)
40844 -               goto out_free_wq;
40845 +               goto out_free_irq;
40846         ret = mcp251x_setup(net, spi);
40847         if (ret)
40848 -               goto out_free_wq;
40849 +               goto out_free_irq;
40850         ret = mcp251x_set_normal_mode(spi);
40851         if (ret)
40852 -               goto out_free_wq;
40853 +               goto out_free_irq;
40855         can_led_event(net, CAN_LED_EVENT_OPEN);
40857 @@ -1250,9 +1239,7 @@ static int mcp251x_open(struct net_device *net)
40859         return 0;
40861 -out_free_wq:
40862 -       destroy_workqueue(priv->wq);
40863 -out_clean:
40864 +out_free_irq:
40865         free_irq(spi->irq, priv);
40866         mcp251x_hw_sleep(spi);
40867  out_close:
40868 @@ -1373,6 +1360,15 @@ static int mcp251x_can_probe(struct spi_device *spi)
40869         if (ret)
40870                 goto out_clk;
40872 +       priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
40873 +                                  0);
40874 +       if (!priv->wq) {
40875 +               ret = -ENOMEM;
40876 +               goto out_clk;
40877 +       }
40878 +       INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
40879 +       INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
40881         priv->spi = spi;
40882         mutex_init(&priv->mcp_lock);
40884 @@ -1417,6 +1413,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
40885         return 0;
40887  error_probe:
40888 +       destroy_workqueue(priv->wq);
40889 +       priv->wq = NULL;
40890         mcp251x_power_enable(priv->power, 0);
40892  out_clk:
40893 @@ -1438,6 +1436,9 @@ static int mcp251x_can_remove(struct spi_device *spi)
40895         mcp251x_power_enable(priv->power, 0);
40897 +       destroy_workqueue(priv->wq);
40898 +       priv->wq = NULL;
40900         clk_disable_unprepare(priv->clk);
40902         free_candev(net);
40903 diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
40904 index 799e9d5d3481..4a742aa5c417 100644
40905 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
40906 +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
40907 @@ -2856,8 +2856,8 @@ static int mcp251xfd_probe(struct spi_device *spi)
40909         clk = devm_clk_get(&spi->dev, NULL);
40910         if (IS_ERR(clk))
40911 -               dev_err_probe(&spi->dev, PTR_ERR(clk),
40912 -                             "Failed to get Oscillator (clock)!\n");
40913 +               return dev_err_probe(&spi->dev, PTR_ERR(clk),
40914 +                                    "Failed to get Oscillator (clock)!\n");
40915         freq = clk_get_rate(clk);
40917         /* Sanity check */
40918 @@ -2957,10 +2957,12 @@ static int mcp251xfd_probe(struct spi_device *spi)
40920         err = mcp251xfd_register(priv);
40921         if (err)
40922 -               goto out_free_candev;
40923 +               goto out_can_rx_offload_del;
40925         return 0;
40927 + out_can_rx_offload_del:
40928 +       can_rx_offload_del(&priv->offload);
40929   out_free_candev:
40930         spi->max_speed_hz = priv->spi_max_speed_hz_orig;
40932 diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
40933 index e393e8457d77..4274f78682d9 100644
40934 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c
40935 +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
40936 @@ -288,7 +288,7 @@ static int pcan_usb_write_mode(struct peak_usb_device *dev, u8 onoff)
40937         } else {
40938                 /* the PCAN-USB needs time to init */
40939                 set_current_state(TASK_INTERRUPTIBLE);
40940 -               schedule_timeout(msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT));
40941 +               schedule_msec_hrtimeout((PCAN_USB_STARTUP_TIMEOUT));
40942         }
40944         return err;
40945 diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
40946 index ba5d546d06aa..9c86cacc4a72 100644
40947 --- a/drivers/net/dsa/bcm_sf2.c
40948 +++ b/drivers/net/dsa/bcm_sf2.c
40949 @@ -32,6 +32,36 @@
40950  #include "b53/b53_priv.h"
40951  #include "b53/b53_regs.h"
40953 +static u16 bcm_sf2_reg_rgmii_cntrl(struct bcm_sf2_priv *priv, int port)
40955 +       switch (priv->type) {
40956 +       case BCM4908_DEVICE_ID:
40957 +               switch (port) {
40958 +               case 7:
40959 +                       return REG_RGMII_11_CNTRL;
40960 +               default:
40961 +                       break;
40962 +               }
40963 +               break;
40964 +       default:
40965 +               switch (port) {
40966 +               case 0:
40967 +                       return REG_RGMII_0_CNTRL;
40968 +               case 1:
40969 +                       return REG_RGMII_1_CNTRL;
40970 +               case 2:
40971 +                       return REG_RGMII_2_CNTRL;
40972 +               default:
40973 +                       break;
40974 +               }
40975 +       }
40977 +       WARN_ONCE(1, "Unsupported port %d\n", port);
40979 +       /* RO fallback reg */
40980 +       return REG_SWITCH_STATUS;
40983  /* Return the number of active ports, not counting the IMP (CPU) port */
40984  static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
40986 @@ -647,6 +677,7 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
40988         struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
40989         u32 id_mode_dis = 0, port_mode;
40990 +       u32 reg_rgmii_ctrl;
40991         u32 reg;
40993         if (port == core_readl(priv, CORE_IMP0_PRT_ID))
40994 @@ -670,10 +701,12 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
40995                 return;
40996         }
40998 +       reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
41000         /* Clear id_mode_dis bit, and the existing port mode, let
41001          * RGMII_MODE_EN bet set by mac_link_{up,down}
41002          */
41003 -       reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
41004 +       reg = reg_readl(priv, reg_rgmii_ctrl);
41005         reg &= ~ID_MODE_DIS;
41006         reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
41008 @@ -681,13 +714,14 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
41009         if (id_mode_dis)
41010                 reg |= ID_MODE_DIS;
41012 -       reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
41013 +       reg_writel(priv, reg, reg_rgmii_ctrl);
41016  static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
41017                                     phy_interface_t interface, bool link)
41019         struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
41020 +       u32 reg_rgmii_ctrl;
41021         u32 reg;
41023         if (!phy_interface_mode_is_rgmii(interface) &&
41024 @@ -695,13 +729,15 @@ static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
41025             interface != PHY_INTERFACE_MODE_REVMII)
41026                 return;
41028 +       reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
41030         /* If the link is down, just disable the interface to conserve power */
41031 -       reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
41032 +       reg = reg_readl(priv, reg_rgmii_ctrl);
41033         if (link)
41034                 reg |= RGMII_MODE_EN;
41035         else
41036                 reg &= ~RGMII_MODE_EN;
41037 -       reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
41038 +       reg_writel(priv, reg, reg_rgmii_ctrl);
41041  static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
41042 @@ -735,11 +771,15 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
41044         struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
41045         struct ethtool_eee *p = &priv->dev->ports[port].eee;
41046 -       u32 reg, offset;
41048         bcm_sf2_sw_mac_link_set(ds, port, interface, true);
41050         if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
41051 +               u32 reg_rgmii_ctrl;
41052 +               u32 reg, offset;
41054 +               reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
41056                 if (priv->type == BCM4908_DEVICE_ID ||
41057                     priv->type == BCM7445_DEVICE_ID)
41058                         offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
41059 @@ -750,7 +790,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
41060                     interface == PHY_INTERFACE_MODE_RGMII_TXID ||
41061                     interface == PHY_INTERFACE_MODE_MII ||
41062                     interface == PHY_INTERFACE_MODE_REVMII) {
41063 -                       reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
41064 +                       reg = reg_readl(priv, reg_rgmii_ctrl);
41065                         reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
41067                         if (tx_pause)
41068 @@ -758,7 +798,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
41069                         if (rx_pause)
41070                                 reg |= RX_PAUSE_EN;
41072 -                       reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
41073 +                       reg_writel(priv, reg, reg_rgmii_ctrl);
41074                 }
41076                 reg = SW_OVERRIDE | LINK_STS;
41077 @@ -1144,9 +1184,7 @@ static const u16 bcm_sf2_4908_reg_offsets[] = {
41078         [REG_PHY_REVISION]      = 0x14,
41079         [REG_SPHY_CNTRL]        = 0x24,
41080         [REG_CROSSBAR]          = 0xc8,
41081 -       [REG_RGMII_0_CNTRL]     = 0xe0,
41082 -       [REG_RGMII_1_CNTRL]     = 0xec,
41083 -       [REG_RGMII_2_CNTRL]     = 0xf8,
41084 +       [REG_RGMII_11_CNTRL]    = 0x014c,
41085         [REG_LED_0_CNTRL]       = 0x40,
41086         [REG_LED_1_CNTRL]       = 0x4c,
41087         [REG_LED_2_CNTRL]       = 0x58,
41088 diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
41089 index 1d2d55c9f8aa..9e141d1a0b07 100644
41090 --- a/drivers/net/dsa/bcm_sf2_regs.h
41091 +++ b/drivers/net/dsa/bcm_sf2_regs.h
41092 @@ -21,6 +21,7 @@ enum bcm_sf2_reg_offs {
41093         REG_RGMII_0_CNTRL,
41094         REG_RGMII_1_CNTRL,
41095         REG_RGMII_2_CNTRL,
41096 +       REG_RGMII_11_CNTRL,
41097         REG_LED_0_CNTRL,
41098         REG_LED_1_CNTRL,
41099         REG_LED_2_CNTRL,
41100 @@ -48,8 +49,6 @@ enum bcm_sf2_reg_offs {
41101  #define  PHY_PHYAD_SHIFT               8
41102  #define  PHY_PHYAD_MASK                        0x1F
41104 -#define REG_RGMII_CNTRL_P(x)           (REG_RGMII_0_CNTRL + (x))
41106  /* Relative to REG_RGMII_CNTRL */
41107  #define  RGMII_MODE_EN                 (1 << 0)
41108  #define  ID_MODE_DIS                   (1 << 1)
41109 diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
41110 index 21953d6d484c..ada7a38d4d31 100644
41111 --- a/drivers/net/dsa/mv88e6xxx/devlink.c
41112 +++ b/drivers/net/dsa/mv88e6xxx/devlink.c
41113 @@ -678,7 +678,7 @@ static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds,
41114                                 sizeof(struct mv88e6xxx_devlink_atu_entry);
41115                         break;
41116                 case MV88E6XXX_REGION_VTU:
41117 -                       size = mv88e6xxx_max_vid(chip) *
41118 +                       size = (mv88e6xxx_max_vid(chip) + 1) *
41119                                 sizeof(struct mv88e6xxx_devlink_vtu_entry);
41120                         break;
41121                 }
41122 diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
41123 index 3195936dc5be..2ce04fef698d 100644
41124 --- a/drivers/net/dsa/mv88e6xxx/serdes.c
41125 +++ b/drivers/net/dsa/mv88e6xxx/serdes.c
41126 @@ -443,15 +443,15 @@ int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
41127  u8 mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
41129         /* There are no configurable serdes lanes on this switch chip but we
41130 -        * need to return non-zero so that callers of
41131 +        * need to return a non-negative lane number so that callers of
41132          * mv88e6xxx_serdes_get_lane() know this is a serdes port.
41133          */
41134         switch (chip->ports[port].cmode) {
41135         case MV88E6185_PORT_STS_CMODE_SERDES:
41136         case MV88E6185_PORT_STS_CMODE_1000BASE_X:
41137 -               return 0xff;
41138 -       default:
41139                 return 0;
41140 +       default:
41141 +               return -ENODEV;
41142         }
41145 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
41146 index b53a0d87371a..cf4249d59383 100644
41147 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
41148 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
41149 @@ -122,7 +122,10 @@ enum board_idx {
41150         NETXTREME_E_VF,
41151         NETXTREME_C_VF,
41152         NETXTREME_S_VF,
41153 +       NETXTREME_C_VF_HV,
41154 +       NETXTREME_E_VF_HV,
41155         NETXTREME_E_P5_VF,
41156 +       NETXTREME_E_P5_VF_HV,
41157  };
41159  /* indexed by enum above */
41160 @@ -170,7 +173,10 @@ static const struct {
41161         [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
41162         [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
41163         [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
41164 +       [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
41165 +       [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
41166         [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
41167 +       [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
41168  };
41170  static const struct pci_device_id bnxt_pci_tbl[] = {
41171 @@ -222,15 +228,25 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
41172         { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
41173  #ifdef CONFIG_BNXT_SRIOV
41174         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
41175 +       { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
41176 +       { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
41177         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
41178 +       { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
41179         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
41180 +       { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
41181 +       { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
41182 +       { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
41183 +       { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
41184         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
41185         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
41186         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
41187         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
41188         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
41189 +       { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
41190         { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
41191         { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
41192 +       { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
41193 +       { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
41194         { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
41195  #endif
41196         { 0 }
41197 @@ -265,7 +281,8 @@ static struct workqueue_struct *bnxt_pf_wq;
41198  static bool bnxt_vf_pciid(enum board_idx idx)
41200         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
41201 -               idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
41202 +               idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
41203 +               idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF);
41206  #define DB_CP_REARM_FLAGS      (DB_KEY_CP | DB_IDX_VALID)
41207 @@ -1732,14 +1749,16 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
41209         cons = rxcmp->rx_cmp_opaque;
41210         if (unlikely(cons != rxr->rx_next_cons)) {
41211 -               int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
41212 +               int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
41214                 /* 0xffff is forced error, don't print it */
41215                 if (rxr->rx_next_cons != 0xffff)
41216                         netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
41217                                     cons, rxr->rx_next_cons);
41218                 bnxt_sched_reset(bp, rxr);
41219 -               return rc1;
41220 +               if (rc1)
41221 +                       return rc1;
41222 +               goto next_rx_no_prod_no_len;
41223         }
41224         rx_buf = &rxr->rx_buf_ring[cons];
41225         data = rx_buf->data;
41226 @@ -9736,7 +9755,9 @@ static ssize_t bnxt_show_temp(struct device *dev,
41227         if (!rc)
41228                 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
41229         mutex_unlock(&bp->hwrm_cmd_lock);
41230 -       return rc ?: len;
41231 +       if (rc)
41232 +               return rc;
41233 +       return len;
41235  static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
41237 diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
41238 index e6d4ad99cc38..3f1c189646f4 100644
41239 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
41240 +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
41241 @@ -521,7 +521,7 @@
41242  #define    CN23XX_BAR1_INDEX_OFFSET                3
41244  #define    CN23XX_PEM_BAR1_INDEX_REG(port, idx)                \
41245 -               (CN23XX_PEM_BAR1_INDEX_START + ((port) << CN23XX_PEM_OFFSET) + \
41246 +               (CN23XX_PEM_BAR1_INDEX_START + (((u64)port) << CN23XX_PEM_OFFSET) + \
41247                  ((idx) << CN23XX_BAR1_INDEX_OFFSET))
41249  /*############################ DPI #########################*/
41250 diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
41251 index f782e6af45e9..50bbe79fb93d 100644
41252 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
41253 +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
41254 @@ -776,7 +776,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
41255         mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
41256         mbx.rq.qs_num = qs->vnic_id;
41257         mbx.rq.rq_num = qidx;
41258 -       mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
41259 +       mbx.rq.cfg = ((u64)rq->caching << 26) | (rq->cq_qs << 19) |
41260                           (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
41261                           (rq->cont_qs_rbdr_idx << 8) |
41262                           (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
41263 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
41264 index 83b46440408b..bde8494215c4 100644
41265 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
41266 +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
41267 @@ -174,31 +174,31 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
41268                                       WORD_MASK, f->fs.nat_lip[15] |
41269                                       f->fs.nat_lip[14] << 8 |
41270                                       f->fs.nat_lip[13] << 16 |
41271 -                                     f->fs.nat_lip[12] << 24, 1);
41272 +                                     (u64)f->fs.nat_lip[12] << 24, 1);
41274                         set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 1,
41275                                       WORD_MASK, f->fs.nat_lip[11] |
41276                                       f->fs.nat_lip[10] << 8 |
41277                                       f->fs.nat_lip[9] << 16 |
41278 -                                     f->fs.nat_lip[8] << 24, 1);
41279 +                                     (u64)f->fs.nat_lip[8] << 24, 1);
41281                         set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 2,
41282                                       WORD_MASK, f->fs.nat_lip[7] |
41283                                       f->fs.nat_lip[6] << 8 |
41284                                       f->fs.nat_lip[5] << 16 |
41285 -                                     f->fs.nat_lip[4] << 24, 1);
41286 +                                     (u64)f->fs.nat_lip[4] << 24, 1);
41288                         set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 3,
41289                                       WORD_MASK, f->fs.nat_lip[3] |
41290                                       f->fs.nat_lip[2] << 8 |
41291                                       f->fs.nat_lip[1] << 16 |
41292 -                                     f->fs.nat_lip[0] << 24, 1);
41293 +                                     (u64)f->fs.nat_lip[0] << 24, 1);
41294                 } else {
41295                         set_tcb_field(adap, f, tid, TCB_RX_FRAG3_LEN_RAW_W,
41296                                       WORD_MASK, f->fs.nat_lip[3] |
41297                                       f->fs.nat_lip[2] << 8 |
41298                                       f->fs.nat_lip[1] << 16 |
41299 -                                     f->fs.nat_lip[0] << 24, 1);
41300 +                                     (u64)f->fs.nat_lip[0] << 25, 1);
41301                 }
41302         }
41304 @@ -208,25 +208,25 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
41305                                       WORD_MASK, f->fs.nat_fip[15] |
41306                                       f->fs.nat_fip[14] << 8 |
41307                                       f->fs.nat_fip[13] << 16 |
41308 -                                     f->fs.nat_fip[12] << 24, 1);
41309 +                                     (u64)f->fs.nat_fip[12] << 24, 1);
41311                         set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 1,
41312                                       WORD_MASK, f->fs.nat_fip[11] |
41313                                       f->fs.nat_fip[10] << 8 |
41314                                       f->fs.nat_fip[9] << 16 |
41315 -                                     f->fs.nat_fip[8] << 24, 1);
41316 +                                     (u64)f->fs.nat_fip[8] << 24, 1);
41318                         set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 2,
41319                                       WORD_MASK, f->fs.nat_fip[7] |
41320                                       f->fs.nat_fip[6] << 8 |
41321                                       f->fs.nat_fip[5] << 16 |
41322 -                                     f->fs.nat_fip[4] << 24, 1);
41323 +                                     (u64)f->fs.nat_fip[4] << 24, 1);
41325                         set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 3,
41326                                       WORD_MASK, f->fs.nat_fip[3] |
41327                                       f->fs.nat_fip[2] << 8 |
41328                                       f->fs.nat_fip[1] << 16 |
41329 -                                     f->fs.nat_fip[0] << 24, 1);
41330 +                                     (u64)f->fs.nat_fip[0] << 24, 1);
41332                 } else {
41333                         set_tcb_field(adap, f, tid,
41334 @@ -234,13 +234,13 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
41335                                       WORD_MASK, f->fs.nat_fip[3] |
41336                                       f->fs.nat_fip[2] << 8 |
41337                                       f->fs.nat_fip[1] << 16 |
41338 -                                     f->fs.nat_fip[0] << 24, 1);
41339 +                                     (u64)f->fs.nat_fip[0] << 24, 1);
41340                 }
41341         }
41343         set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK,
41344                       (dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) |
41345 -                     (sp ? (nat_fp[1] << 16 | nat_fp[0] << 24) : 0),
41346 +                     (sp ? (nat_fp[1] << 16 | (u64)nat_fp[0] << 24) : 0),
41347                       1);
41350 diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
41351 index 256fae15e032..1e5f2edb70cf 100644
41352 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
41353 +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
41354 @@ -2563,12 +2563,12 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
41355         spin_lock_bh(&eosw_txq->lock);
41356         if (tc != FW_SCHED_CLS_NONE) {
41357                 if (eosw_txq->state != CXGB4_EO_STATE_CLOSED)
41358 -                       goto out_unlock;
41359 +                       goto out_free_skb;
41361                 next_state = CXGB4_EO_STATE_FLOWC_OPEN_SEND;
41362         } else {
41363                 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
41364 -                       goto out_unlock;
41365 +                       goto out_free_skb;
41367                 next_state = CXGB4_EO_STATE_FLOWC_CLOSE_SEND;
41368         }
41369 @@ -2604,17 +2604,19 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
41370                 eosw_txq_flush_pending_skbs(eosw_txq);
41372         ret = eosw_txq_enqueue(eosw_txq, skb);
41373 -       if (ret) {
41374 -               dev_consume_skb_any(skb);
41375 -               goto out_unlock;
41376 -       }
41377 +       if (ret)
41378 +               goto out_free_skb;
41380         eosw_txq->state = next_state;
41381         eosw_txq->flowc_idx = eosw_txq->pidx;
41382         eosw_txq_advance(eosw_txq, 1);
41383         ethofld_xmit(dev, eosw_txq);
41385 -out_unlock:
41386 +       spin_unlock_bh(&eosw_txq->lock);
41387 +       return 0;
41389 +out_free_skb:
41390 +       dev_consume_skb_any(skb);
41391         spin_unlock_bh(&eosw_txq->lock);
41392         return ret;
41394 diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
41395 index f04ec53544ae..b1443ff439de 100644
41396 --- a/drivers/net/ethernet/cisco/enic/enic_main.c
41397 +++ b/drivers/net/ethernet/cisco/enic/enic_main.c
41398 @@ -768,7 +768,7 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
41399         return err;
41402 -static inline void enic_queue_wq_skb(struct enic *enic,
41403 +static inline int enic_queue_wq_skb(struct enic *enic,
41404         struct vnic_wq *wq, struct sk_buff *skb)
41406         unsigned int mss = skb_shinfo(skb)->gso_size;
41407 @@ -814,6 +814,7 @@ static inline void enic_queue_wq_skb(struct enic *enic,
41408                 wq->to_use = buf->next;
41409                 dev_kfree_skb(skb);
41410         }
41411 +       return err;
41414  /* netif_tx_lock held, process context with BHs disabled, or BH */
41415 @@ -857,7 +858,8 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
41416                 return NETDEV_TX_BUSY;
41417         }
41419 -       enic_queue_wq_skb(enic, wq, skb);
41420 +       if (enic_queue_wq_skb(enic, wq, skb))
41421 +               goto error;
41423         if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
41424                 netif_tx_stop_queue(txq);
41425 @@ -865,6 +867,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
41426         if (!netdev_xmit_more() || netif_xmit_stopped(txq))
41427                 vnic_wq_doorbell(wq);
41429 +error:
41430         spin_unlock(&enic->wq_lock[txq_map]);
41432         return NETDEV_TX_OK;
41433 diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
41434 index 67c436400352..de7b31842233 100644
41435 --- a/drivers/net/ethernet/freescale/Makefile
41436 +++ b/drivers/net/ethernet/freescale/Makefile
41437 @@ -24,6 +24,4 @@ obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/
41439  obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/
41441 -obj-$(CONFIG_FSL_ENETC) += enetc/
41442 -obj-$(CONFIG_FSL_ENETC_MDIO) += enetc/
41443 -obj-$(CONFIG_FSL_ENETC_VF) += enetc/
41444 +obj-y += enetc/
41445 diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
41446 index 3db882322b2b..70aea9c274fe 100644
41447 --- a/drivers/net/ethernet/freescale/fec_main.c
41448 +++ b/drivers/net/ethernet/freescale/fec_main.c
41449 @@ -2048,6 +2048,8 @@ static int fec_enet_mii_probe(struct net_device *ndev)
41450         fep->link = 0;
41451         fep->full_duplex = 0;
41453 +       phy_dev->mac_managed_pm = 1;
41455         phy_attached_info(phy_dev);
41457         return 0;
41458 @@ -3864,6 +3866,7 @@ static int __maybe_unused fec_resume(struct device *dev)
41459                 netif_device_attach(ndev);
41460                 netif_tx_unlock_bh(ndev);
41461                 napi_enable(&fep->napi);
41462 +               phy_init_hw(ndev->phydev);
41463                 phy_start(ndev->phydev);
41464         }
41465         rtnl_unlock();
41466 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
41467 index bf4302a5cf95..0f70158c2551 100644
41468 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
41469 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
41470 @@ -576,8 +576,8 @@ static int hns3_nic_net_stop(struct net_device *netdev)
41471         if (h->ae_algo->ops->set_timer_task)
41472                 h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
41474 -       netif_tx_stop_all_queues(netdev);
41475         netif_carrier_off(netdev);
41476 +       netif_tx_disable(netdev);
41478         hns3_nic_net_down(netdev);
41480 @@ -823,7 +823,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
41481   * and it is udp packet, which has a dest port as the IANA assigned.
41482   * the hardware is expected to do the checksum offload, but the
41483   * hardware will not do the checksum offload when udp dest port is
41484 - * 4789 or 6081.
41485 + * 4789, 4790 or 6081.
41486   */
41487  static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
41489 @@ -841,7 +841,8 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
41491         if (!(!skb->encapsulation &&
41492               (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) ||
41493 -             l4.udp->dest == htons(GENEVE_UDP_PORT))))
41494 +             l4.udp->dest == htons(GENEVE_UDP_PORT) ||
41495 +             l4.udp->dest == htons(4790))))
41496                 return false;
41498         skb_checksum_help(skb);
41499 @@ -1277,23 +1278,21 @@ static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
41502  static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
41503 -                                  u8 max_non_tso_bd_num)
41504 +                                  u8 max_non_tso_bd_num, unsigned int bd_num,
41505 +                                  unsigned int recursion_level)
41507 +#define HNS3_MAX_RECURSION_LEVEL       24
41509         struct sk_buff *frag_skb;
41510 -       unsigned int bd_num = 0;
41512         /* If the total len is within the max bd limit */
41513 -       if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) &&
41514 +       if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level &&
41515 +                  !skb_has_frag_list(skb) &&
41516                    skb_shinfo(skb)->nr_frags < max_non_tso_bd_num))
41517                 return skb_shinfo(skb)->nr_frags + 1U;
41519 -       /* The below case will always be linearized, return
41520 -        * HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized.
41521 -        */
41522 -       if (unlikely(skb->len > HNS3_MAX_TSO_SIZE ||
41523 -                    (!skb_is_gso(skb) && skb->len >
41524 -                     HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))))
41525 -               return HNS3_MAX_TSO_BD_NUM + 1U;
41526 +       if (unlikely(recursion_level >= HNS3_MAX_RECURSION_LEVEL))
41527 +               return UINT_MAX;
41529         bd_num = hns3_skb_bd_num(skb, bd_size, bd_num);
41531 @@ -1301,7 +1300,8 @@ static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
41532                 return bd_num;
41534         skb_walk_frags(skb, frag_skb) {
41535 -               bd_num = hns3_skb_bd_num(frag_skb, bd_size, bd_num);
41536 +               bd_num = hns3_tx_bd_num(frag_skb, bd_size, max_non_tso_bd_num,
41537 +                                       bd_num, recursion_level + 1);
41538                 if (bd_num > HNS3_MAX_TSO_BD_NUM)
41539                         return bd_num;
41540         }
41541 @@ -1361,6 +1361,43 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
41542                 size[i] = skb_frag_size(&shinfo->frags[i]);
41545 +static int hns3_skb_linearize(struct hns3_enet_ring *ring,
41546 +                             struct sk_buff *skb,
41547 +                             u8 max_non_tso_bd_num,
41548 +                             unsigned int bd_num)
41550 +       /* 'bd_num == UINT_MAX' means the skb' fraglist has a
41551 +        * recursion level of over HNS3_MAX_RECURSION_LEVEL.
41552 +        */
41553 +       if (bd_num == UINT_MAX) {
41554 +               u64_stats_update_begin(&ring->syncp);
41555 +               ring->stats.over_max_recursion++;
41556 +               u64_stats_update_end(&ring->syncp);
41557 +               return -ENOMEM;
41558 +       }
41560 +       /* The skb->len has exceeded the hw limitation, linearization
41561 +        * will not help.
41562 +        */
41563 +       if (skb->len > HNS3_MAX_TSO_SIZE ||
41564 +           (!skb_is_gso(skb) && skb->len >
41565 +            HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) {
41566 +               u64_stats_update_begin(&ring->syncp);
41567 +               ring->stats.hw_limitation++;
41568 +               u64_stats_update_end(&ring->syncp);
41569 +               return -ENOMEM;
41570 +       }
41572 +       if (__skb_linearize(skb)) {
41573 +               u64_stats_update_begin(&ring->syncp);
41574 +               ring->stats.sw_err_cnt++;
41575 +               u64_stats_update_end(&ring->syncp);
41576 +               return -ENOMEM;
41577 +       }
41579 +       return 0;
41582  static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
41583                                   struct net_device *netdev,
41584                                   struct sk_buff *skb)
41585 @@ -1370,7 +1407,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
41586         unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
41587         unsigned int bd_num;
41589 -       bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num);
41590 +       bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num, 0, 0);
41591         if (unlikely(bd_num > max_non_tso_bd_num)) {
41592                 if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
41593                     !hns3_skb_need_linearized(skb, bd_size, bd_num,
41594 @@ -1379,16 +1416,11 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
41595                         goto out;
41596                 }
41598 -               if (__skb_linearize(skb))
41599 +               if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num,
41600 +                                      bd_num))
41601                         return -ENOMEM;
41603                 bd_num = hns3_tx_bd_count(skb->len);
41604 -               if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
41605 -                   (!skb_is_gso(skb) &&
41606 -                    bd_num > max_non_tso_bd_num)) {
41607 -                       trace_hns3_over_max_bd(skb);
41608 -                       return -ENOMEM;
41609 -               }
41611                 u64_stats_update_begin(&ring->syncp);
41612                 ring->stats.tx_copy++;
41613 @@ -1412,6 +1444,10 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
41614                 return bd_num;
41615         }
41617 +       u64_stats_update_begin(&ring->syncp);
41618 +       ring->stats.tx_busy++;
41619 +       u64_stats_update_end(&ring->syncp);
41621         return -EBUSY;
41624 @@ -1459,6 +1495,7 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
41625                                  struct sk_buff *skb, enum hns_desc_type type)
41627         unsigned int size = skb_headlen(skb);
41628 +       struct sk_buff *frag_skb;
41629         int i, ret, bd_num = 0;
41631         if (size) {
41632 @@ -1483,6 +1520,15 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
41633                 bd_num += ret;
41634         }
41636 +       skb_walk_frags(skb, frag_skb) {
41637 +               ret = hns3_fill_skb_to_desc(ring, frag_skb,
41638 +                                           DESC_TYPE_FRAGLIST_SKB);
41639 +               if (unlikely(ret < 0))
41640 +                       return ret;
41642 +               bd_num += ret;
41643 +       }
41645         return bd_num;
41648 @@ -1513,8 +1559,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
41649         struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
41650         struct netdev_queue *dev_queue;
41651         int pre_ntu, next_to_use_head;
41652 -       struct sk_buff *frag_skb;
41653 -       int bd_num = 0;
41654         bool doorbell;
41655         int ret;
41657 @@ -1530,15 +1574,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
41658         ret = hns3_nic_maybe_stop_tx(ring, netdev, skb);
41659         if (unlikely(ret <= 0)) {
41660                 if (ret == -EBUSY) {
41661 -                       u64_stats_update_begin(&ring->syncp);
41662 -                       ring->stats.tx_busy++;
41663 -                       u64_stats_update_end(&ring->syncp);
41664                         hns3_tx_doorbell(ring, 0, true);
41665                         return NETDEV_TX_BUSY;
41666 -               } else if (ret == -ENOMEM) {
41667 -                       u64_stats_update_begin(&ring->syncp);
41668 -                       ring->stats.sw_err_cnt++;
41669 -                       u64_stats_update_end(&ring->syncp);
41670                 }
41672                 hns3_rl_err(netdev, "xmit error: %d!\n", ret);
41673 @@ -1551,21 +1588,14 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
41674         if (unlikely(ret < 0))
41675                 goto fill_err;
41677 +       /* 'ret < 0' means filling error, 'ret == 0' means skb->len is
41678 +        * zero, which is unlikely, and 'ret > 0' means how many tx desc
41679 +        * need to be notified to the hw.
41680 +        */
41681         ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
41682 -       if (unlikely(ret < 0))
41683 +       if (unlikely(ret <= 0))
41684                 goto fill_err;
41686 -       bd_num += ret;
41688 -       skb_walk_frags(skb, frag_skb) {
41689 -               ret = hns3_fill_skb_to_desc(ring, frag_skb,
41690 -                                           DESC_TYPE_FRAGLIST_SKB);
41691 -               if (unlikely(ret < 0))
41692 -                       goto fill_err;
41694 -               bd_num += ret;
41695 -       }
41697         pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
41698                                         (ring->desc_num - 1);
41699         ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
41700 @@ -1576,7 +1606,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
41701         dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
41702         doorbell = __netdev_tx_sent_queue(dev_queue, skb->len,
41703                                           netdev_xmit_more());
41704 -       hns3_tx_doorbell(ring, bd_num, doorbell);
41705 +       hns3_tx_doorbell(ring, ret, doorbell);
41707         return NETDEV_TX_OK;
41709 @@ -1748,11 +1778,15 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
41710                         tx_drop += ring->stats.tx_l4_proto_err;
41711                         tx_drop += ring->stats.tx_l2l3l4_err;
41712                         tx_drop += ring->stats.tx_tso_err;
41713 +                       tx_drop += ring->stats.over_max_recursion;
41714 +                       tx_drop += ring->stats.hw_limitation;
41715                         tx_errors += ring->stats.sw_err_cnt;
41716                         tx_errors += ring->stats.tx_vlan_err;
41717                         tx_errors += ring->stats.tx_l4_proto_err;
41718                         tx_errors += ring->stats.tx_l2l3l4_err;
41719                         tx_errors += ring->stats.tx_tso_err;
41720 +                       tx_errors += ring->stats.over_max_recursion;
41721 +                       tx_errors += ring->stats.hw_limitation;
41722                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
41724                 /* fetch the rx stats */
41725 @@ -3704,7 +3738,6 @@ static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
41727  static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
41729 -       struct hnae3_ring_chain_node vector_ring_chain;
41730         struct hnae3_handle *h = priv->ae_handle;
41731         struct hns3_enet_tqp_vector *tqp_vector;
41732         int ret;
41733 @@ -3736,6 +3769,8 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
41734         }
41736         for (i = 0; i < priv->vector_num; i++) {
41737 +               struct hnae3_ring_chain_node vector_ring_chain;
41739                 tqp_vector = &priv->tqp_vector[i];
41741                 tqp_vector->rx_group.total_bytes = 0;
41742 @@ -4554,6 +4589,11 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
41743         struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
41744         int ret = 0;
41746 +       if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
41747 +               netdev_err(kinfo->netdev, "device is not initialized yet\n");
41748 +               return -EFAULT;
41749 +       }
41751         clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
41753         if (netif_running(kinfo->netdev)) {
41754 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
41755 index d069b04ee587..e44224e23315 100644
41756 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
41757 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
41758 @@ -376,6 +376,8 @@ struct ring_stats {
41759                         u64 tx_l4_proto_err;
41760                         u64 tx_l2l3l4_err;
41761                         u64 tx_tso_err;
41762 +                       u64 over_max_recursion;
41763 +                       u64 hw_limitation;
41764                 };
41765                 struct {
41766                         u64 rx_pkts;
41767 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
41768 index adcec4ea7cb9..d20f2e246017 100644
41769 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
41770 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
41771 @@ -44,6 +44,8 @@ static const struct hns3_stats hns3_txq_stats[] = {
41772         HNS3_TQP_STAT("l4_proto_err", tx_l4_proto_err),
41773         HNS3_TQP_STAT("l2l3l4_err", tx_l2l3l4_err),
41774         HNS3_TQP_STAT("tso_err", tx_tso_err),
41775 +       HNS3_TQP_STAT("over_max_recursion", over_max_recursion),
41776 +       HNS3_TQP_STAT("hw_limitation", hw_limitation),
41777  };
41779  #define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
41780 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
41781 index 0ca7f1b984bf..78d3eb142df8 100644
41782 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
41783 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
41784 @@ -753,8 +753,9 @@ static int hclge_config_igu_egu_hw_err_int(struct hclge_dev *hdev, bool en)
41786         /* configure IGU,EGU error interrupts */
41787         hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false);
41788 +       desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_TYPE);
41789         if (en)
41790 -               desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
41791 +               desc.data[0] |= cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
41793         desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK);
41795 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
41796 index 608fe26fc3fe..d647f3c84134 100644
41797 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
41798 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
41799 @@ -32,7 +32,8 @@
41800  #define HCLGE_TQP_ECC_ERR_INT_EN_MASK  0x0FFF
41801  #define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK    0x0F000000
41802  #define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN 0x0F000000
41803 -#define HCLGE_IGU_ERR_INT_EN   0x0000066F
41804 +#define HCLGE_IGU_ERR_INT_EN   0x0000000F
41805 +#define HCLGE_IGU_ERR_INT_TYPE 0x00000660
41806  #define HCLGE_IGU_ERR_INT_EN_MASK      0x000F
41807  #define HCLGE_IGU_TNL_ERR_INT_EN    0x0002AABF
41808  #define HCLGE_IGU_TNL_ERR_INT_EN_MASK  0x003F
41809 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
41810 index b0dbe6dcaa7b..7a560d0e19b9 100644
41811 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
41812 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
41813 @@ -11379,7 +11379,6 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
41814  #define REG_LEN_PER_LINE       (REG_NUM_PER_LINE * sizeof(u32))
41815  #define REG_SEPARATOR_LINE     1
41816  #define REG_NUM_REMAIN_MASK    3
41817 -#define BD_LIST_MAX_NUM                30
41819  int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
41821 @@ -11473,15 +11472,19 @@ static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
41823         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
41824         int data_len_per_desc, bd_num, i;
41825 -       int bd_num_list[BD_LIST_MAX_NUM];
41826 +       int *bd_num_list;
41827         u32 data_len;
41828         int ret;
41830 +       bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
41831 +       if (!bd_num_list)
41832 +               return -ENOMEM;
41834         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
41835         if (ret) {
41836                 dev_err(&hdev->pdev->dev,
41837                         "Get dfx reg bd num fail, status is %d.\n", ret);
41838 -               return ret;
41839 +               goto out;
41840         }
41842         data_len_per_desc = sizeof_field(struct hclge_desc, data);
41843 @@ -11492,6 +11495,8 @@ static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
41844                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
41845         }
41847 +out:
41848 +       kfree(bd_num_list);
41849         return ret;
41852 @@ -11499,16 +11504,20 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
41854         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
41855         int bd_num, bd_num_max, buf_len, i;
41856 -       int bd_num_list[BD_LIST_MAX_NUM];
41857         struct hclge_desc *desc_src;
41858 +       int *bd_num_list;
41859         u32 *reg = data;
41860         int ret;
41862 +       bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
41863 +       if (!bd_num_list)
41864 +               return -ENOMEM;
41866         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
41867         if (ret) {
41868                 dev_err(&hdev->pdev->dev,
41869                         "Get dfx reg bd num fail, status is %d.\n", ret);
41870 -               return ret;
41871 +               goto out;
41872         }
41874         bd_num_max = bd_num_list[0];
41875 @@ -11517,8 +11526,10 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
41877         buf_len = sizeof(*desc_src) * bd_num_max;
41878         desc_src = kzalloc(buf_len, GFP_KERNEL);
41879 -       if (!desc_src)
41880 -               return -ENOMEM;
41881 +       if (!desc_src) {
41882 +               ret = -ENOMEM;
41883 +               goto out;
41884 +       }
41886         for (i = 0; i < dfx_reg_type_num; i++) {
41887                 bd_num = bd_num_list[i];
41888 @@ -11534,6 +11545,8 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
41889         }
41891         kfree(desc_src);
41892 +out:
41893 +       kfree(bd_num_list);
41894         return ret;
41897 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
41898 index 51a36e74f088..c3bb16b1f060 100644
41899 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
41900 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
41901 @@ -535,7 +535,7 @@ static void hclge_get_link_mode(struct hclge_vport *vport,
41902         unsigned long advertising;
41903         unsigned long supported;
41904         unsigned long send_data;
41905 -       u8 msg_data[10];
41906 +       u8 msg_data[10] = {};
41907         u8 dest_vfid;
41909         advertising = hdev->hw.mac.advertising[0];
41910 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
41911 index e89820702540..c194bba187d6 100644
41912 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
41913 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
41914 @@ -255,6 +255,8 @@ void hclge_mac_start_phy(struct hclge_dev *hdev)
41915         if (!phydev)
41916                 return;
41918 +       phy_loopback(phydev, false);
41920         phy_start(phydev);
41923 diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
41924 index 15f93b355099..5069f690cf0b 100644
41925 --- a/drivers/net/ethernet/intel/i40e/i40e.h
41926 +++ b/drivers/net/ethernet/intel/i40e/i40e.h
41927 @@ -1142,7 +1142,6 @@ static inline bool i40e_is_sw_dcb(struct i40e_pf *pf)
41928         return !!(pf->flags & I40E_FLAG_DISABLE_FW_LLDP);
41931 -void i40e_set_lldp_forwarding(struct i40e_pf *pf, bool enable);
41932  #ifdef CONFIG_I40E_DCB
41933  void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
41934                            struct i40e_dcbx_config *old_cfg,
41935 diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
41936 index ce626eace692..140b677f114d 100644
41937 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
41938 +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
41939 @@ -1566,8 +1566,10 @@ enum i40e_aq_phy_type {
41940         I40E_PHY_TYPE_25GBASE_LR                = 0x22,
41941         I40E_PHY_TYPE_25GBASE_AOC               = 0x23,
41942         I40E_PHY_TYPE_25GBASE_ACC               = 0x24,
41943 -       I40E_PHY_TYPE_2_5GBASE_T                = 0x30,
41944 -       I40E_PHY_TYPE_5GBASE_T                  = 0x31,
41945 +       I40E_PHY_TYPE_2_5GBASE_T                = 0x26,
41946 +       I40E_PHY_TYPE_5GBASE_T                  = 0x27,
41947 +       I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS    = 0x30,
41948 +       I40E_PHY_TYPE_5GBASE_T_LINK_STATUS      = 0x31,
41949         I40E_PHY_TYPE_MAX,
41950         I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP   = 0xFD,
41951         I40E_PHY_TYPE_EMPTY                     = 0xFE,
41952 diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
41953 index a2dba32383f6..32f3facbed1a 100644
41954 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c
41955 +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
41956 @@ -375,6 +375,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
41957                                 clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
41958                                           &cdev->state);
41959                                 i40e_client_del_instance(pf);
41960 +                               return;
41961                         }
41962                 }
41963         }
41964 diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
41965 index ec19e18305ec..ce35e064cf60 100644
41966 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c
41967 +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
41968 @@ -1154,8 +1154,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
41969                 break;
41970         case I40E_PHY_TYPE_100BASE_TX:
41971         case I40E_PHY_TYPE_1000BASE_T:
41972 -       case I40E_PHY_TYPE_2_5GBASE_T:
41973 -       case I40E_PHY_TYPE_5GBASE_T:
41974 +       case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
41975 +       case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
41976         case I40E_PHY_TYPE_10GBASE_T:
41977                 media = I40E_MEDIA_TYPE_BASET;
41978                 break;
41979 diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
41980 index 0e92668012e3..93dd58fda272 100644
41981 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
41982 +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
41983 @@ -841,8 +841,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
41984                                                              10000baseT_Full);
41985                 break;
41986         case I40E_PHY_TYPE_10GBASE_T:
41987 -       case I40E_PHY_TYPE_5GBASE_T:
41988 -       case I40E_PHY_TYPE_2_5GBASE_T:
41989 +       case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
41990 +       case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
41991         case I40E_PHY_TYPE_1000BASE_T:
41992         case I40E_PHY_TYPE_100BASE_TX:
41993                 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
41994 @@ -1409,7 +1409,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
41996                 memset(&config, 0, sizeof(config));
41997                 config.phy_type = abilities.phy_type;
41998 -               config.abilities = abilities.abilities;
41999 +               config.abilities = abilities.abilities |
42000 +                                  I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
42001                 config.phy_type_ext = abilities.phy_type_ext;
42002                 config.link_speed = abilities.link_speed;
42003                 config.eee_capability = abilities.eee_capability;
42004 @@ -5287,7 +5288,6 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
42005                         i40e_aq_cfg_lldp_mib_change_event(&pf->hw, false, NULL);
42006                         i40e_aq_stop_lldp(&pf->hw, true, false, NULL);
42007                 } else {
42008 -                       i40e_set_lldp_forwarding(pf, false);
42009                         status = i40e_aq_start_lldp(&pf->hw, false, NULL);
42010                         if (status) {
42011                                 adq_err = pf->hw.aq.asq_last_status;
42012 diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
42013 index 527023ee4c07..ac4b44fc19f1 100644
42014 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
42015 +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
42016 @@ -6878,40 +6878,6 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
42018  #endif /* CONFIG_I40E_DCB */
42020 -/**
42021 - * i40e_set_lldp_forwarding - set forwarding of lldp frames
42022 - * @pf: PF being configured
42023 - * @enable: if forwarding to OS shall be enabled
42024 - *
42025 - * Toggle forwarding of lldp frames behavior,
42026 - * When passing DCB control from firmware to software
42027 - * lldp frames must be forwarded to the software based
42028 - * lldp agent.
42029 - */
42030 -void i40e_set_lldp_forwarding(struct i40e_pf *pf, bool enable)
42032 -       if (pf->lan_vsi == I40E_NO_VSI)
42033 -               return;
42035 -       if (!pf->vsi[pf->lan_vsi])
42036 -               return;
42038 -       /* No need to check the outcome, commands may fail
42039 -        * if desired value is already set
42040 -        */
42041 -       i40e_aq_add_rem_control_packet_filter(&pf->hw, NULL, ETH_P_LLDP,
42042 -                                             I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX |
42043 -                                             I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC,
42044 -                                             pf->vsi[pf->lan_vsi]->seid, 0,
42045 -                                             enable, NULL, NULL);
42047 -       i40e_aq_add_rem_control_packet_filter(&pf->hw, NULL, ETH_P_LLDP,
42048 -                                             I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX |
42049 -                                             I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC,
42050 -                                             pf->vsi[pf->lan_vsi]->seid, 0,
42051 -                                             enable, NULL, NULL);
42054  /**
42055   * i40e_print_link_message - print link up or down
42056   * @vsi: the VSI for which link needs a message
42057 @@ -10735,10 +10701,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
42058          */
42059         i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
42060                                                        pf->main_vsi_seid);
42061 -#ifdef CONFIG_I40E_DCB
42062 -       if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)
42063 -               i40e_set_lldp_forwarding(pf, true);
42064 -#endif /* CONFIG_I40E_DCB */
42066         /* restart the VSIs that were rebuilt and running before the reset */
42067         i40e_pf_unquiesce_all_vsi(pf);
42068 @@ -15753,10 +15715,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
42069          */
42070         i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
42071                                                        pf->main_vsi_seid);
42072 -#ifdef CONFIG_I40E_DCB
42073 -       if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)
42074 -               i40e_set_lldp_forwarding(pf, true);
42075 -#endif /* CONFIG_I40E_DCB */
42077         if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
42078                 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
42079 diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
42080 index 06b4271219b1..70b515049540 100644
42081 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
42082 +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
42083 @@ -1961,10 +1961,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
42084                                  union i40e_rx_desc *rx_desc)
42087 -       /* XDP packets use error pointer so abort at this point */
42088 -       if (IS_ERR(skb))
42089 -               return true;
42091         /* ERR_MASK will only have valid bits if EOP set, and
42092          * what we are doing here is actually checking
42093          * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
42094 @@ -2534,7 +2530,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
42095                 }
42097                 /* exit if we failed to retrieve a buffer */
42098 -               if (!skb) {
42099 +               if (!xdp_res && !skb) {
42100                         rx_ring->rx_stats.alloc_buff_failed++;
42101                         rx_buffer->pagecnt_bias++;
42102                         break;
42103 @@ -2547,7 +2543,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
42104                 if (i40e_is_non_eop(rx_ring, rx_desc))
42105                         continue;
42107 -               if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
42108 +               if (xdp_res || i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
42109                         skb = NULL;
42110                         continue;
42111                 }
42112 diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
42113 index 5c10faaca790..c81109a63e90 100644
42114 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h
42115 +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
42116 @@ -239,11 +239,8 @@ struct i40e_phy_info {
42117  #define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \
42118                                              I40E_PHY_TYPE_OFFSET)
42119  /* Offset for 2.5G/5G PHY Types value to bit number conversion */
42120 -#define I40E_PHY_TYPE_OFFSET2 (-10)
42121 -#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T + \
42122 -                                            I40E_PHY_TYPE_OFFSET2)
42123 -#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T + \
42124 -                                            I40E_PHY_TYPE_OFFSET2)
42125 +#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T)
42126 +#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T)
42127  #define I40E_HW_CAP_MAX_GPIO                   30
42128  /* Capabilities of a PF or a VF or the whole device */
42129  struct i40e_hw_capabilities {
42130 diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
42131 index dc5b3c06d1e0..ebd08543791b 100644
42132 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c
42133 +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
42134 @@ -3899,8 +3899,6 @@ static void iavf_remove(struct pci_dev *pdev)
42136         iounmap(hw->hw_addr);
42137         pci_release_regions(pdev);
42138 -       iavf_free_all_tx_resources(adapter);
42139 -       iavf_free_all_rx_resources(adapter);
42140         iavf_free_queues(adapter);
42141         kfree(adapter->vf_res);
42142         spin_lock_bh(&adapter->mac_vlan_list_lock);
42143 diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
42144 index d13c7fc8fb0a..195d122c9cb2 100644
42145 --- a/drivers/net/ethernet/intel/ice/ice_lib.c
42146 +++ b/drivers/net/ethernet/intel/ice/ice_lib.c
42147 @@ -2818,38 +2818,46 @@ int ice_vsi_release(struct ice_vsi *vsi)
42150  /**
42151 - * ice_vsi_rebuild_update_coalesce - set coalesce for a q_vector
42152 + * ice_vsi_rebuild_update_coalesce_intrl - set interrupt rate limit for a q_vector
42153   * @q_vector: pointer to q_vector which is being updated
42154 - * @coalesce: pointer to array of struct with stored coalesce
42155 + * @stored_intrl_setting: original INTRL setting
42156   *
42157   * Set coalesce param in q_vector and update these parameters in HW.
42158   */
42159  static void
42160 -ice_vsi_rebuild_update_coalesce(struct ice_q_vector *q_vector,
42161 -                               struct ice_coalesce_stored *coalesce)
42162 +ice_vsi_rebuild_update_coalesce_intrl(struct ice_q_vector *q_vector,
42163 +                                     u16 stored_intrl_setting)
42165 -       struct ice_ring_container *rx_rc = &q_vector->rx;
42166 -       struct ice_ring_container *tx_rc = &q_vector->tx;
42167         struct ice_hw *hw = &q_vector->vsi->back->hw;
42169 -       tx_rc->itr_setting = coalesce->itr_tx;
42170 -       rx_rc->itr_setting = coalesce->itr_rx;
42172 -       /* dynamic ITR values will be updated during Tx/Rx */
42173 -       if (!ITR_IS_DYNAMIC(tx_rc->itr_setting))
42174 -               wr32(hw, GLINT_ITR(tx_rc->itr_idx, q_vector->reg_idx),
42175 -                    ITR_REG_ALIGN(tx_rc->itr_setting) >>
42176 -                    ICE_ITR_GRAN_S);
42177 -       if (!ITR_IS_DYNAMIC(rx_rc->itr_setting))
42178 -               wr32(hw, GLINT_ITR(rx_rc->itr_idx, q_vector->reg_idx),
42179 -                    ITR_REG_ALIGN(rx_rc->itr_setting) >>
42180 -                    ICE_ITR_GRAN_S);
42182 -       q_vector->intrl = coalesce->intrl;
42183 +       q_vector->intrl = stored_intrl_setting;
42184         wr32(hw, GLINT_RATE(q_vector->reg_idx),
42185              ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
42188 +/**
42189 + * ice_vsi_rebuild_update_coalesce_itr - set coalesce for a q_vector
42190 + * @q_vector: pointer to q_vector which is being updated
42191 + * @rc: pointer to ring container
42192 + * @stored_itr_setting: original ITR setting
42193 + *
42194 + * Set coalesce param in q_vector and update these parameters in HW.
42195 + */
42196 +static void
42197 +ice_vsi_rebuild_update_coalesce_itr(struct ice_q_vector *q_vector,
42198 +                                   struct ice_ring_container *rc,
42199 +                                   u16 stored_itr_setting)
42201 +       struct ice_hw *hw = &q_vector->vsi->back->hw;
42203 +       rc->itr_setting = stored_itr_setting;
42205 +       /* dynamic ITR values will be updated during Tx/Rx */
42206 +       if (!ITR_IS_DYNAMIC(rc->itr_setting))
42207 +               wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
42208 +                    ITR_REG_ALIGN(rc->itr_setting) >> ICE_ITR_GRAN_S);
42211  /**
42212   * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
42213   * @vsi: VSI connected with q_vectors
42214 @@ -2869,6 +2877,11 @@ ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
42215                 coalesce[i].itr_tx = q_vector->tx.itr_setting;
42216                 coalesce[i].itr_rx = q_vector->rx.itr_setting;
42217                 coalesce[i].intrl = q_vector->intrl;
42219 +               if (i < vsi->num_txq)
42220 +                       coalesce[i].tx_valid = true;
42221 +               if (i < vsi->num_rxq)
42222 +                       coalesce[i].rx_valid = true;
42223         }
42225         return vsi->num_q_vectors;
42226 @@ -2893,17 +2906,59 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
42227         if ((size && !coalesce) || !vsi)
42228                 return;
42230 -       for (i = 0; i < size && i < vsi->num_q_vectors; i++)
42231 -               ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
42232 -                                               &coalesce[i]);
42234 -       /* number of q_vectors increased, so assume coalesce settings were
42235 -        * changed globally (i.e. ethtool -C eth0 instead of per-queue) and use
42236 -        * the previous settings from q_vector 0 for all of the new q_vectors
42237 +       /* There are a couple of cases that have to be handled here:
42238 +        *   1. The case where the number of queue vectors stays the same, but
42239 +        *      the number of Tx or Rx rings changes (the first for loop)
42240 +        *   2. The case where the number of queue vectors increased (the
42241 +        *      second for loop)
42242          */
42243 -       for (; i < vsi->num_q_vectors; i++)
42244 -               ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
42245 -                                               &coalesce[0]);
42246 +       for (i = 0; i < size && i < vsi->num_q_vectors; i++) {
42247 +               /* There are 2 cases to handle here and they are the same for
42248 +                * both Tx and Rx:
42249 +                *   if the entry was valid previously (coalesce[i].[tr]x_valid
42250 +                *   and the loop variable is less than the number of rings
42251 +                *   allocated, then write the previous values
42252 +                *
42253 +                *   if the entry was not valid previously, but the number of
42254 +                *   rings is less than are allocated (this means the number of
42255 +                *   rings increased from previously), then write out the
42256 +                *   values in the first element
42257 +                */
42258 +               if (i < vsi->alloc_rxq && coalesce[i].rx_valid)
42259 +                       ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
42260 +                                                           &vsi->q_vectors[i]->rx,
42261 +                                                           coalesce[i].itr_rx);
42262 +               else if (i < vsi->alloc_rxq)
42263 +                       ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
42264 +                                                           &vsi->q_vectors[i]->rx,
42265 +                                                           coalesce[0].itr_rx);
42267 +               if (i < vsi->alloc_txq && coalesce[i].tx_valid)
42268 +                       ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
42269 +                                                           &vsi->q_vectors[i]->tx,
42270 +                                                           coalesce[i].itr_tx);
42271 +               else if (i < vsi->alloc_txq)
42272 +                       ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
42273 +                                                           &vsi->q_vectors[i]->tx,
42274 +                                                           coalesce[0].itr_tx);
42276 +               ice_vsi_rebuild_update_coalesce_intrl(vsi->q_vectors[i],
42277 +                                                     coalesce[i].intrl);
42278 +       }
42280 +       /* the number of queue vectors increased so write whatever is in
42281 +        * the first element
42282 +        */
42283 +       for (; i < vsi->num_q_vectors; i++) {
42284 +               ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
42285 +                                                   &vsi->q_vectors[i]->tx,
42286 +                                                   coalesce[0].itr_tx);
42287 +               ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
42288 +                                                   &vsi->q_vectors[i]->rx,
42289 +                                                   coalesce[0].itr_rx);
42290 +               ice_vsi_rebuild_update_coalesce_intrl(vsi->q_vectors[i],
42291 +                                                     coalesce[0].intrl);
42292 +       }
42295  /**
42296 @@ -2932,9 +2987,11 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
42298         coalesce = kcalloc(vsi->num_q_vectors,
42299                            sizeof(struct ice_coalesce_stored), GFP_KERNEL);
42300 -       if (coalesce)
42301 -               prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi,
42302 -                                                                 coalesce);
42303 +       if (!coalesce)
42304 +               return -ENOMEM;
42306 +       prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
42308         ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
42309         ice_vsi_free_q_vectors(vsi);
42311 diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
42312 index 5dab77504fa5..672a7ff0ee36 100644
42313 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h
42314 +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
42315 @@ -351,6 +351,8 @@ struct ice_coalesce_stored {
42316         u16 itr_tx;
42317         u16 itr_rx;
42318         u8 intrl;
42319 +       u8 tx_valid;
42320 +       u8 rx_valid;
42321  };
42323  /* iterator for handling rings in ring container */
42324 diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
42325 index 25dd903a3e92..d849b0f65de2 100644
42326 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
42327 +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
42328 @@ -431,7 +431,8 @@ static void prestera_port_handle_event(struct prestera_switch *sw,
42329                         netif_carrier_on(port->dev);
42330                         if (!delayed_work_pending(caching_dw))
42331                                 queue_delayed_work(prestera_wq, caching_dw, 0);
42332 -               } else {
42333 +               } else if (netif_running(port->dev) &&
42334 +                          netif_carrier_ok(port->dev)) {
42335                         netif_carrier_off(port->dev);
42336                         if (delayed_work_pending(caching_dw))
42337                                 cancel_delayed_work(caching_dw);
42338 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
42339 index 01d3ee4b5829..bcd5e7ae8482 100644
42340 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
42341 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
42342 @@ -1319,7 +1319,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
42343                 skb->protocol = eth_type_trans(skb, netdev);
42345                 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
42346 -                   RX_DMA_VID(trxd.rxd3))
42347 +                   (trxd.rxd2 & RX_DMA_VTAG))
42348                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
42349                                                RX_DMA_VID(trxd.rxd3));
42350                 skb_record_rx_queue(skb, 0);
42351 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
42352 index fd3cec8f06ba..c47272100615 100644
42353 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
42354 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
42355 @@ -296,6 +296,7 @@
42356  #define RX_DMA_LSO             BIT(30)
42357  #define RX_DMA_PLEN0(_x)       (((_x) & 0x3fff) << 16)
42358  #define RX_DMA_GET_PLEN0(_x)   (((_x) >> 16) & 0x3fff)
42359 +#define RX_DMA_VTAG            BIT(15)
42361  /* QDMA descriptor rxd3 */
42362  #define RX_DMA_VID(_x)         ((_x) & 0xfff)
42363 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
42364 index bdbffe484fce..d2efe2455955 100644
42365 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
42366 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
42367 @@ -576,7 +576,7 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
42369         pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
42370         wqe = MLX5E_TX_FETCH_WQE(sq, pi);
42371 -       prefetchw(wqe->data);
42372 +       net_prefetchw(wqe->data);
42374         *session = (struct mlx5e_tx_mpwqe) {
42375                 .wqe = wqe,
42376 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
42377 index 22bee4990232..bb61f52d782d 100644
42378 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
42379 +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
42380 @@ -850,7 +850,7 @@ mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
42381                 return;
42382         }
42384 -       if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action &
42385 +       if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action ==
42386             MLX5_ACCEL_ESP_ACTION_DECRYPT)
42387                 ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle);
42389 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
42390 index 9143ec326ebf..f146c618a78e 100644
42391 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
42392 +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
42393 @@ -1532,6 +1532,7 @@ static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *val
42395         DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port);
42396         DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn);
42397 +       misc_mask->source_eswitch_owner_vhca_id = 0;
42400  static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
42401 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
42402 index 7846a21555ef..1f6bc0c7e91d 100644
42403 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
42404 +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
42405 @@ -535,6 +535,16 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
42406         u16 erif_index = 0;
42407         int err;
42409 +       /* Add the eRIF */
42410 +       if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
42411 +               erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
42412 +               err = mr->mr_ops->route_erif_add(mlxsw_sp,
42413 +                                                rve->mr_route->route_priv,
42414 +                                                erif_index);
42415 +               if (err)
42416 +                       return err;
42417 +       }
42419         /* Update the route action, as the new eVIF can be a tunnel or a pimreg
42420          * device which will require updating the action.
42421          */
42422 @@ -544,17 +554,7 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
42423                                                       rve->mr_route->route_priv,
42424                                                       route_action);
42425                 if (err)
42426 -                       return err;
42427 -       }
42429 -       /* Add the eRIF */
42430 -       if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
42431 -               erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
42432 -               err = mr->mr_ops->route_erif_add(mlxsw_sp,
42433 -                                                rve->mr_route->route_priv,
42434 -                                                erif_index);
42435 -               if (err)
42436 -                       goto err_route_erif_add;
42437 +                       goto err_route_action_update;
42438         }
42440         /* Update the minimum MTU */
42441 @@ -572,14 +572,14 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
42442         return 0;
42444  err_route_min_mtu_update:
42445 -       if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
42446 -               mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
42447 -                                          erif_index);
42448 -err_route_erif_add:
42449         if (route_action != rve->mr_route->route_action)
42450                 mr->mr_ops->route_action_update(mlxsw_sp,
42451                                                 rve->mr_route->route_priv,
42452                                                 rve->mr_route->route_action);
42453 +err_route_action_update:
42454 +       if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
42455 +               mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
42456 +                                          erif_index);
42457         return err;
42460 diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
42461 index 713ee3041d49..bea978df7713 100644
42462 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
42463 +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
42464 @@ -364,6 +364,7 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
42466         attrs.split = eth_port.is_split;
42467         attrs.splittable = !attrs.split;
42468 +       attrs.lanes = eth_port.port_lanes;
42469         attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
42470         attrs.phys.port_number = eth_port.label_port;
42471         attrs.phys.split_subport_number = eth_port.label_subport;
42472 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
42473 index d8a3ecaed3fc..d8f0863b3934 100644
42474 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
42475 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
42476 @@ -1048,7 +1048,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
42477         for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
42478                 skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
42479                 if (!skb)
42480 -                       break;
42481 +                       goto error;
42482                 qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
42483                 skb_put(skb, QLCNIC_ILB_PKT_SIZE);
42484                 adapter->ahw->diag_cnt = 0;
42485 @@ -1072,6 +1072,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
42486                         cnt++;
42487         }
42488         if (cnt != i) {
42489 +error:
42490                 dev_err(&adapter->pdev->dev,
42491                         "LB Test: failed, TX[%d], RX[%d]\n", i, cnt);
42492                 if (mode != QLCNIC_ILB_MODE)
42493 diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
42494 index 117188e3c7de..87b8c032195d 100644
42495 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
42496 +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
42497 @@ -1437,6 +1437,7 @@ netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
42499         struct emac_tpd tpd;
42500         u32 prod_idx;
42501 +       int len;
42503         memset(&tpd, 0, sizeof(tpd));
42505 @@ -1456,9 +1457,10 @@ netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
42506         if (skb_network_offset(skb) != ETH_HLEN)
42507                 TPD_TYP_SET(&tpd, 1);
42509 +       len = skb->len;
42510         emac_tx_fill_tpd(adpt, tx_q, skb, &tpd);
42512 -       netdev_sent_queue(adpt->netdev, skb->len);
42513 +       netdev_sent_queue(adpt->netdev, len);
42515         /* Make sure the are enough free descriptors to hold one
42516          * maximum-sized SKB.  We need one desc for each fragment,
42517 diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
42518 index eb0c03bdb12d..cad57d58d764 100644
42519 --- a/drivers/net/ethernet/renesas/ravb_main.c
42520 +++ b/drivers/net/ethernet/renesas/ravb_main.c
42521 @@ -911,31 +911,20 @@ static int ravb_poll(struct napi_struct *napi, int budget)
42522         int q = napi - priv->napi;
42523         int mask = BIT(q);
42524         int quota = budget;
42525 -       u32 ris0, tis;
42527 -       for (;;) {
42528 -               tis = ravb_read(ndev, TIS);
42529 -               ris0 = ravb_read(ndev, RIS0);
42530 -               if (!((ris0 & mask) || (tis & mask)))
42531 -                       break;
42532 +       /* Processing RX Descriptor Ring */
42533 +       /* Clear RX interrupt */
42534 +       ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
42535 +       if (ravb_rx(ndev, &quota, q))
42536 +               goto out;
42538 -               /* Processing RX Descriptor Ring */
42539 -               if (ris0 & mask) {
42540 -                       /* Clear RX interrupt */
42541 -                       ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
42542 -                       if (ravb_rx(ndev, &quota, q))
42543 -                               goto out;
42544 -               }
42545 -               /* Processing TX Descriptor Ring */
42546 -               if (tis & mask) {
42547 -                       spin_lock_irqsave(&priv->lock, flags);
42548 -                       /* Clear TX interrupt */
42549 -                       ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
42550 -                       ravb_tx_free(ndev, q, true);
42551 -                       netif_wake_subqueue(ndev, q);
42552 -                       spin_unlock_irqrestore(&priv->lock, flags);
42553 -               }
42554 -       }
42555 +       /* Processing RX Descriptor Ring */
42556 +       spin_lock_irqsave(&priv->lock, flags);
42557 +       /* Clear TX interrupt */
42558 +       ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
42559 +       ravb_tx_free(ndev, q, true);
42560 +       netif_wake_subqueue(ndev, q);
42561 +       spin_unlock_irqrestore(&priv->lock, flags);
42563         napi_complete(napi);
42565 diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
42566 index da6886dcac37..4fa72b573c17 100644
42567 --- a/drivers/net/ethernet/sfc/ef10.c
42568 +++ b/drivers/net/ethernet/sfc/ef10.c
42569 @@ -2928,8 +2928,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
42571         /* Get the transmit queue */
42572         tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
42573 -       tx_queue = efx_channel_get_tx_queue(channel,
42574 -                                           tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
42575 +       tx_queue = channel->tx_queue + (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
42577         if (!tx_queue->timestamping) {
42578                 /* Transmit completion */
42579 diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
42580 index 1bfeee283ea9..a3ca406a3561 100644
42581 --- a/drivers/net/ethernet/sfc/efx_channels.c
42582 +++ b/drivers/net/ethernet/sfc/efx_channels.c
42583 @@ -914,6 +914,8 @@ int efx_set_channels(struct efx_nic *efx)
42584                         }
42585                 }
42586         }
42587 +       if (xdp_queue_number)
42588 +               efx->xdp_tx_queue_count = xdp_queue_number;
42590         rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
42591         if (rc)
42592 diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
42593 index d75cf5ff5686..49df02ecee91 100644
42594 --- a/drivers/net/ethernet/sfc/farch.c
42595 +++ b/drivers/net/ethernet/sfc/farch.c
42596 @@ -835,14 +835,14 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
42597                 /* Transmit completion */
42598                 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
42599                 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
42600 -               tx_queue = efx_channel_get_tx_queue(
42601 -                       channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
42602 +               tx_queue = channel->tx_queue +
42603 +                               (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
42604                 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
42605         } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
42606                 /* Rewrite the FIFO write pointer */
42607                 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
42608 -               tx_queue = efx_channel_get_tx_queue(
42609 -                       channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
42610 +               tx_queue = channel->tx_queue +
42611 +                               (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
42613                 netif_tx_lock(efx->net_dev);
42614                 efx_farch_notify_tx_desc(tx_queue);
42615 @@ -1081,16 +1081,16 @@ static void
42616  efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
42618         struct efx_tx_queue *tx_queue;
42619 +       struct efx_channel *channel;
42620         int qid;
42622         qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
42623         if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
42624 -               tx_queue = efx_get_tx_queue(efx, qid / EFX_MAX_TXQ_PER_CHANNEL,
42625 -                                           qid % EFX_MAX_TXQ_PER_CHANNEL);
42626 -               if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
42627 +               channel = efx_get_tx_channel(efx, qid / EFX_MAX_TXQ_PER_CHANNEL);
42628 +               tx_queue = channel->tx_queue + (qid % EFX_MAX_TXQ_PER_CHANNEL);
42629 +               if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0))
42630                         efx_farch_magic_event(tx_queue->channel,
42631                                               EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
42632 -               }
42633         }
42636 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
42637 index bf3250e0e59c..749585fe6fc9 100644
42638 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
42639 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
42640 @@ -352,6 +352,8 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
42641         plat_dat->bsp_priv = gmac;
42642         plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
42643         plat_dat->multicast_filter_bins = 0;
42644 +       plat_dat->tx_fifo_size = 8192;
42645 +       plat_dat->rx_fifo_size = 8192;
42647         err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
42648         if (err)
42649 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
42650 index 0e1ca2cba3c7..e18dee7fe687 100644
42651 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
42652 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
42653 @@ -30,7 +30,7 @@ struct sunxi_priv_data {
42654  static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
42656         struct sunxi_priv_data *gmac = priv;
42657 -       int ret;
42658 +       int ret = 0;
42660         if (gmac->regulator) {
42661                 ret = regulator_enable(gmac->regulator);
42662 @@ -51,11 +51,11 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
42663         } else {
42664                 clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
42665                 ret = clk_prepare(gmac->tx_clk);
42666 -               if (ret)
42667 -                       return ret;
42668 +               if (ret && gmac->regulator)
42669 +                       regulator_disable(gmac->regulator);
42670         }
42672 -       return 0;
42673 +       return ret;
42676  static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
42677 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
42678 index 29f765a246a0..aaf37598cbd3 100644
42679 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
42680 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
42681 @@ -638,6 +638,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
42682         value &= ~GMAC_PACKET_FILTER_PCF;
42683         value &= ~GMAC_PACKET_FILTER_PM;
42684         value &= ~GMAC_PACKET_FILTER_PR;
42685 +       value &= ~GMAC_PACKET_FILTER_RA;
42686         if (dev->flags & IFF_PROMISC) {
42687                 /* VLAN Tag Filter Fail Packets Queuing */
42688                 if (hw->vlan_fail_q_en) {
42689 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
42690 index 62aa0e95beb7..a7249e4071f1 100644
42691 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
42692 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
42693 @@ -222,7 +222,7 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
42694                                        u32 channel, int fifosz, u8 qmode)
42696         unsigned int rqs = fifosz / 256 - 1;
42697 -       u32 mtl_rx_op, mtl_rx_int;
42698 +       u32 mtl_rx_op;
42700         mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
42702 @@ -283,11 +283,6 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
42703         }
42705         writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
42707 -       /* Enable MTL RX overflow */
42708 -       mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
42709 -       writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
42710 -              ioaddr + MTL_CHAN_INT_CTRL(channel));
42713  static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
42714 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
42715 index 4749bd0af160..369d7cde3993 100644
42716 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
42717 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
42718 @@ -2757,8 +2757,15 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
42720         /* Enable TSO */
42721         if (priv->tso) {
42722 -               for (chan = 0; chan < tx_cnt; chan++)
42723 +               for (chan = 0; chan < tx_cnt; chan++) {
42724 +                       struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
42726 +                       /* TSO and TBS cannot co-exist */
42727 +                       if (tx_q->tbs & STMMAC_TBS_AVAIL)
42728 +                               continue;
42730                         stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
42731 +               }
42732         }
42734         /* Enable Split Header */
42735 @@ -2850,9 +2857,8 @@ static int stmmac_open(struct net_device *dev)
42736                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
42737                 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
42739 +               /* Setup per-TXQ tbs flag before TX descriptor alloc */
42740                 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
42741 -               if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
42742 -                       tx_q->tbs &= ~STMMAC_TBS_AVAIL;
42743         }
42745         ret = alloc_dma_desc_resources(priv);
42746 @@ -4162,7 +4168,6 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
42747         /* To handle GMAC own interrupts */
42748         if ((priv->plat->has_gmac) || xmac) {
42749                 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
42750 -               int mtl_status;
42752                 if (unlikely(status)) {
42753                         /* For LPI we need to save the tx status */
42754 @@ -4173,17 +4178,8 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
42755                 }
42757                 for (queue = 0; queue < queues_count; queue++) {
42758 -                       struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
42760 -                       mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
42761 -                                                               queue);
42762 -                       if (mtl_status != -EINVAL)
42763 -                               status |= mtl_status;
42765 -                       if (status & CORE_IRQ_MTL_RX_OVERFLOW)
42766 -                               stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
42767 -                                                      rx_q->rx_tail_addr,
42768 -                                                      queue);
42769 +                       status = stmmac_host_mtl_irq_status(priv, priv->hw,
42770 +                                                           queue);
42771                 }
42773                 /* PCS link status */
42774 diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
42775 index 707ccdd03b19..74e748662ec0 100644
42776 --- a/drivers/net/ethernet/sun/niu.c
42777 +++ b/drivers/net/ethernet/sun/niu.c
42778 @@ -8144,10 +8144,10 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
42779                                      "VPD_SCAN: Reading in property [%s] len[%d]\n",
42780                                      namebuf, prop_len);
42781                         for (i = 0; i < prop_len; i++) {
42782 -                               err = niu_pci_eeprom_read(np, off + i);
42783 -                               if (err >= 0)
42784 -                                       *prop_buf = err;
42785 -                               ++prop_buf;
42786 +                               err =  niu_pci_eeprom_read(np, off + i);
42787 +                               if (err < 0)
42788 +                                       return err;
42789 +                               *prop_buf++ = err;
42790                         }
42791                 }
42793 @@ -8158,14 +8158,14 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
42796  /* ESPC_PIO_EN_ENABLE must be set */
42797 -static void niu_pci_vpd_fetch(struct niu *np, u32 start)
42798 +static int niu_pci_vpd_fetch(struct niu *np, u32 start)
42800         u32 offset;
42801         int err;
42803         err = niu_pci_eeprom_read16_swp(np, start + 1);
42804         if (err < 0)
42805 -               return;
42806 +               return err;
42808         offset = err + 3;
42810 @@ -8174,12 +8174,14 @@ static void niu_pci_vpd_fetch(struct niu *np, u32 start)
42811                 u32 end;
42813                 err = niu_pci_eeprom_read(np, here);
42814 +               if (err < 0)
42815 +                       return err;
42816                 if (err != 0x90)
42817 -                       return;
42818 +                       return -EINVAL;
42820                 err = niu_pci_eeprom_read16_swp(np, here + 1);
42821                 if (err < 0)
42822 -                       return;
42823 +                       return err;
42825                 here = start + offset + 3;
42826                 end = start + offset + err;
42827 @@ -8187,9 +8189,12 @@ static void niu_pci_vpd_fetch(struct niu *np, u32 start)
42828                 offset += err;
42830                 err = niu_pci_vpd_scan_props(np, here, end);
42831 -               if (err < 0 || err == 1)
42832 -                       return;
42833 +               if (err < 0)
42834 +                       return err;
42835 +               if (err == 1)
42836 +                       return -EINVAL;
42837         }
42838 +       return 0;
42841  /* ESPC_PIO_EN_ENABLE must be set */
42842 @@ -9280,8 +9285,11 @@ static int niu_get_invariants(struct niu *np)
42843                 offset = niu_pci_vpd_offset(np);
42844                 netif_printk(np, probe, KERN_DEBUG, np->dev,
42845                              "%s() VPD offset [%08x]\n", __func__, offset);
42846 -               if (offset)
42847 -                       niu_pci_vpd_fetch(np, offset);
42848 +               if (offset) {
42849 +                       err = niu_pci_vpd_fetch(np, offset);
42850 +                       if (err < 0)
42851 +                               return err;
42852 +               }
42853                 nw64(ESPC_PIO_EN, 0);
42855                 if (np->flags & NIU_FLAGS_VPD_VALID) {
42856 diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
42857 index c7031e1960d4..03055c96f076 100644
42858 --- a/drivers/net/ethernet/ti/davinci_emac.c
42859 +++ b/drivers/net/ethernet/ti/davinci_emac.c
42860 @@ -169,11 +169,11 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
42861  /* EMAC mac_status register */
42862  #define EMAC_MACSTATUS_TXERRCODE_MASK  (0xF00000)
42863  #define EMAC_MACSTATUS_TXERRCODE_SHIFT (20)
42864 -#define EMAC_MACSTATUS_TXERRCH_MASK    (0x7)
42865 +#define EMAC_MACSTATUS_TXERRCH_MASK    (0x70000)
42866  #define EMAC_MACSTATUS_TXERRCH_SHIFT   (16)
42867  #define EMAC_MACSTATUS_RXERRCODE_MASK  (0xF000)
42868  #define EMAC_MACSTATUS_RXERRCODE_SHIFT (12)
42869 -#define EMAC_MACSTATUS_RXERRCH_MASK    (0x7)
42870 +#define EMAC_MACSTATUS_RXERRCH_MASK    (0x700)
42871  #define EMAC_MACSTATUS_RXERRCH_SHIFT   (8)
42873  /* EMAC RX register masks */
42874 diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
42875 index c6eb7f2368aa..911b5ef9e680 100644
42876 --- a/drivers/net/ethernet/xilinx/Kconfig
42877 +++ b/drivers/net/ethernet/xilinx/Kconfig
42878 @@ -18,12 +18,14 @@ if NET_VENDOR_XILINX
42880  config XILINX_EMACLITE
42881         tristate "Xilinx 10/100 Ethernet Lite support"
42882 +       depends on HAS_IOMEM
42883         select PHYLIB
42884         help
42885           This driver supports the 10/100 Ethernet Lite from Xilinx.
42887  config XILINX_AXI_EMAC
42888         tristate "Xilinx 10/100/1000 AXI Ethernet support"
42889 +       depends on HAS_IOMEM
42890         select PHYLINK
42891         help
42892           This driver supports the 10/100/1000 Ethernet from Xilinx for the
42893 @@ -31,6 +33,7 @@ config XILINX_AXI_EMAC
42895  config XILINX_LL_TEMAC
42896         tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
42897 +       depends on HAS_IOMEM
42898         select PHYLIB
42899         help
42900           This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
42901 diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
42902 index 0152f1e70783..9defaa21a1a9 100644
42903 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
42904 +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
42905 @@ -1085,7 +1085,7 @@ static int init_queues(struct port *port)
42906         int i;
42908         if (!ports_open) {
42909 -               dma_pool = dma_pool_create(DRV_NAME, port->netdev->dev.parent,
42910 +               dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
42911                                            POOL_ALLOC_SIZE, 32, 0);
42912                 if (!dma_pool)
42913                         return -ENOMEM;
42914 @@ -1435,6 +1435,9 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
42915         ndev->netdev_ops = &ixp4xx_netdev_ops;
42916         ndev->ethtool_ops = &ixp4xx_ethtool_ops;
42917         ndev->tx_queue_len = 100;
42918 +       /* Inherit the DMA masks from the platform device */
42919 +       ndev->dev.dma_mask = dev->dma_mask;
42920 +       ndev->dev.coherent_dma_mask = dev->coherent_dma_mask;
42922         netif_napi_add(ndev, &port->napi, eth_poll, NAPI_WEIGHT);
42924 diff --git a/drivers/net/fddi/Kconfig b/drivers/net/fddi/Kconfig
42925 index f722079dfb6a..f99c1048c97e 100644
42926 --- a/drivers/net/fddi/Kconfig
42927 +++ b/drivers/net/fddi/Kconfig
42928 @@ -40,17 +40,20 @@ config DEFXX
42930  config DEFXX_MMIO
42931         bool
42932 -       prompt "Use MMIO instead of PIO" if PCI || EISA
42933 +       prompt "Use MMIO instead of IOP" if PCI || EISA
42934         depends on DEFXX
42935 -       default n if PCI || EISA
42936 +       default n if EISA
42937         default y
42938         help
42939           This instructs the driver to use EISA or PCI memory-mapped I/O
42940 -         (MMIO) as appropriate instead of programmed I/O ports (PIO).
42941 +         (MMIO) as appropriate instead of programmed I/O ports (IOP).
42942           Enabling this gives an improvement in processing time in parts
42943 -         of the driver, but it may cause problems with EISA (DEFEA)
42944 -         adapters.  TURBOchannel does not have the concept of I/O ports,
42945 -         so MMIO is always used for these (DEFTA) adapters.
42946 +         of the driver, but it requires a memory window to be configured
42947 +         for EISA (DEFEA) adapters that may not always be available.
42948 +         Conversely some PCIe host bridges do not support IOP, so MMIO
42949 +         may be required to access PCI (DEFPA) adapters on downstream PCI
42950 +         buses with some systems.  TURBOchannel does not have the concept
42951 +         of I/O ports, so MMIO is always used for these (DEFTA) adapters.
42953           If unsure, say N.
42955 diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
42956 index 077c68498f04..c7ce6d5491af 100644
42957 --- a/drivers/net/fddi/defxx.c
42958 +++ b/drivers/net/fddi/defxx.c
42959 @@ -495,6 +495,25 @@ static const struct net_device_ops dfx_netdev_ops = {
42960         .ndo_set_mac_address    = dfx_ctl_set_mac_address,
42961  };
42963 +static void dfx_register_res_alloc_err(const char *print_name, bool mmio,
42964 +                                      bool eisa)
42966 +       pr_err("%s: Cannot use %s, no address set, aborting\n",
42967 +              print_name, mmio ? "MMIO" : "I/O");
42968 +       pr_err("%s: Recompile driver with \"CONFIG_DEFXX_MMIO=%c\"\n",
42969 +              print_name, mmio ? 'n' : 'y');
42970 +       if (eisa && mmio)
42971 +               pr_err("%s: Or run ECU and set adapter's MMIO location\n",
42972 +                      print_name);
42975 +static void dfx_register_res_err(const char *print_name, bool mmio,
42976 +                                unsigned long start, unsigned long len)
42978 +       pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, aborting\n",
42979 +              print_name, mmio ? "MMIO" : "I/O", len, start);
42982  /*
42983   * ================
42984   * = dfx_register =
42985 @@ -568,15 +587,12 @@ static int dfx_register(struct device *bdev)
42986         dev_set_drvdata(bdev, dev);
42988         dfx_get_bars(bdev, bar_start, bar_len);
42989 -       if (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0) {
42990 -               pr_err("%s: Cannot use MMIO, no address set, aborting\n",
42991 -                      print_name);
42992 -               pr_err("%s: Run ECU and set adapter's MMIO location\n",
42993 -                      print_name);
42994 -               pr_err("%s: Or recompile driver with \"CONFIG_DEFXX_MMIO=n\""
42995 -                      "\n", print_name);
42996 +       if (bar_len[0] == 0 ||
42997 +           (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0)) {
42998 +               dfx_register_res_alloc_err(print_name, dfx_use_mmio,
42999 +                                          dfx_bus_eisa);
43000                 err = -ENXIO;
43001 -               goto err_out;
43002 +               goto err_out_disable;
43003         }
43005         if (dfx_use_mmio)
43006 @@ -585,18 +601,16 @@ static int dfx_register(struct device *bdev)
43007         else
43008                 region = request_region(bar_start[0], bar_len[0], print_name);
43009         if (!region) {
43010 -               pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, "
43011 -                      "aborting\n", dfx_use_mmio ? "MMIO" : "I/O", print_name,
43012 -                      (long)bar_len[0], (long)bar_start[0]);
43013 +               dfx_register_res_err(print_name, dfx_use_mmio,
43014 +                                    bar_start[0], bar_len[0]);
43015                 err = -EBUSY;
43016                 goto err_out_disable;
43017         }
43018         if (bar_start[1] != 0) {
43019                 region = request_region(bar_start[1], bar_len[1], print_name);
43020                 if (!region) {
43021 -                       pr_err("%s: Cannot reserve I/O resource "
43022 -                              "0x%lx @ 0x%lx, aborting\n", print_name,
43023 -                              (long)bar_len[1], (long)bar_start[1]);
43024 +                       dfx_register_res_err(print_name, 0,
43025 +                                            bar_start[1], bar_len[1]);
43026                         err = -EBUSY;
43027                         goto err_out_csr_region;
43028                 }
43029 @@ -604,9 +618,8 @@ static int dfx_register(struct device *bdev)
43030         if (bar_start[2] != 0) {
43031                 region = request_region(bar_start[2], bar_len[2], print_name);
43032                 if (!region) {
43033 -                       pr_err("%s: Cannot reserve I/O resource "
43034 -                              "0x%lx @ 0x%lx, aborting\n", print_name,
43035 -                              (long)bar_len[2], (long)bar_start[2]);
43036 +                       dfx_register_res_err(print_name, 0,
43037 +                                            bar_start[2], bar_len[2]);
43038                         err = -EBUSY;
43039                         goto err_out_bh_region;
43040                 }
43041 diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
43042 index 42f31c681846..61cd3dd4deab 100644
43043 --- a/drivers/net/geneve.c
43044 +++ b/drivers/net/geneve.c
43045 @@ -891,7 +891,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
43046         __be16 sport;
43047         int err;
43049 -       if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
43050 +       if (!pskb_inet_may_pull(skb))
43051                 return -EINVAL;
43053         sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
43054 @@ -988,7 +988,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
43055         __be16 sport;
43056         int err;
43058 -       if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
43059 +       if (!pskb_inet_may_pull(skb))
43060                 return -EINVAL;
43062         sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
43063 diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
43064 index 390d3403386a..144892060718 100644
43065 --- a/drivers/net/ipa/gsi.c
43066 +++ b/drivers/net/ipa/gsi.c
43067 @@ -211,8 +211,8 @@ static void gsi_irq_setup(struct gsi *gsi)
43068         iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
43070         /* The inter-EE registers are in the non-adjusted address range */
43071 -       iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
43072 -       iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
43073 +       iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET);
43074 +       iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET);
43076         iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
43078 diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
43079 index 1622d8cf8dea..48ef04afab79 100644
43080 --- a/drivers/net/ipa/gsi_reg.h
43081 +++ b/drivers/net/ipa/gsi_reg.h
43082 @@ -53,15 +53,15 @@
43083  #define GSI_EE_REG_ADJUST                      0x0000d000      /* IPA v4.5+ */
43085  /* The two inter-EE IRQ register offsets are relative to gsi->virt_raw */
43086 -#define GSI_INTER_EE_SRC_CH_IRQ_OFFSET \
43087 -                       GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
43088 -#define GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(ee) \
43089 -                       (0x0000c018 + 0x1000 * (ee))
43091 -#define GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET \
43092 -                       GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(GSI_EE_AP)
43093 -#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(ee) \
43094 -                       (0x0000c01c + 0x1000 * (ee))
43095 +#define GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET \
43096 +                       GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
43097 +#define GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(ee) \
43098 +                       (0x0000c020 + 0x1000 * (ee))
43100 +#define GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET \
43101 +                       GSI_INTER_EE_N_SRC_EV_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
43102 +#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_MSK_OFFSET(ee) \
43103 +                       (0x0000c024 + 0x1000 * (ee))
43105  /* All other register offsets are relative to gsi->virt */
43106  #define GSI_CH_C_CNTXT_0_OFFSET(ch) \
43107 diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
43108 index 6eac50d4b42f..d453ec016168 100644
43109 --- a/drivers/net/phy/intel-xway.c
43110 +++ b/drivers/net/phy/intel-xway.c
43111 @@ -11,6 +11,18 @@
43113  #define XWAY_MDIO_IMASK                        0x19    /* interrupt mask */
43114  #define XWAY_MDIO_ISTAT                        0x1A    /* interrupt status */
43115 +#define XWAY_MDIO_LED                  0x1B    /* led control */
43117 +/* bit 15:12 are reserved */
43118 +#define XWAY_MDIO_LED_LED3_EN          BIT(11) /* Enable the integrated function of LED3 */
43119 +#define XWAY_MDIO_LED_LED2_EN          BIT(10) /* Enable the integrated function of LED2 */
43120 +#define XWAY_MDIO_LED_LED1_EN          BIT(9)  /* Enable the integrated function of LED1 */
43121 +#define XWAY_MDIO_LED_LED0_EN          BIT(8)  /* Enable the integrated function of LED0 */
43122 +/* bit 7:4 are reserved */
43123 +#define XWAY_MDIO_LED_LED3_DA          BIT(3)  /* Direct Access to LED3 */
43124 +#define XWAY_MDIO_LED_LED2_DA          BIT(2)  /* Direct Access to LED2 */
43125 +#define XWAY_MDIO_LED_LED1_DA          BIT(1)  /* Direct Access to LED1 */
43126 +#define XWAY_MDIO_LED_LED0_DA          BIT(0)  /* Direct Access to LED0 */
43128  #define XWAY_MDIO_INIT_WOL             BIT(15) /* Wake-On-LAN */
43129  #define XWAY_MDIO_INIT_MSRE            BIT(14)
43130 @@ -159,6 +171,15 @@ static int xway_gphy_config_init(struct phy_device *phydev)
43131         /* Clear all pending interrupts */
43132         phy_read(phydev, XWAY_MDIO_ISTAT);
43134 +       /* Ensure that integrated led function is enabled for all leds */
43135 +       err = phy_write(phydev, XWAY_MDIO_LED,
43136 +                       XWAY_MDIO_LED_LED0_EN |
43137 +                       XWAY_MDIO_LED_LED1_EN |
43138 +                       XWAY_MDIO_LED_LED2_EN |
43139 +                       XWAY_MDIO_LED_LED3_EN);
43140 +       if (err)
43141 +               return err;
43143         phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LEDCH,
43144                       XWAY_MMD_LEDCH_NACS_NONE |
43145                       XWAY_MMD_LEDCH_SBF_F02HZ |
43146 diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
43147 index 8018ddf7f316..f86c9ddc609e 100644
43148 --- a/drivers/net/phy/marvell.c
43149 +++ b/drivers/net/phy/marvell.c
43150 @@ -967,22 +967,28 @@ static int m88e1111_get_downshift(struct phy_device *phydev, u8 *data)
43152  static int m88e1111_set_downshift(struct phy_device *phydev, u8 cnt)
43154 -       int val;
43155 +       int val, err;
43157         if (cnt > MII_M1111_PHY_EXT_CR_DOWNSHIFT_MAX)
43158                 return -E2BIG;
43160 -       if (!cnt)
43161 -               return phy_clear_bits(phydev, MII_M1111_PHY_EXT_CR,
43162 -                                     MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN);
43163 +       if (!cnt) {
43164 +               err = phy_clear_bits(phydev, MII_M1111_PHY_EXT_CR,
43165 +                                    MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN);
43166 +       } else {
43167 +               val = MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN;
43168 +               val |= FIELD_PREP(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, cnt - 1);
43170 -       val = MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN;
43171 -       val |= FIELD_PREP(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, cnt - 1);
43172 +               err = phy_modify(phydev, MII_M1111_PHY_EXT_CR,
43173 +                                MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN |
43174 +                                MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK,
43175 +                                val);
43176 +       }
43178 -       return phy_modify(phydev, MII_M1111_PHY_EXT_CR,
43179 -                         MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN |
43180 -                         MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK,
43181 -                         val);
43182 +       if (err < 0)
43183 +               return err;
43185 +       return genphy_soft_reset(phydev);
43188  static int m88e1111_get_tunable(struct phy_device *phydev,
43189 @@ -1025,22 +1031,28 @@ static int m88e1011_get_downshift(struct phy_device *phydev, u8 *data)
43191  static int m88e1011_set_downshift(struct phy_device *phydev, u8 cnt)
43193 -       int val;
43194 +       int val, err;
43196         if (cnt > MII_M1011_PHY_SCR_DOWNSHIFT_MAX)
43197                 return -E2BIG;
43199 -       if (!cnt)
43200 -               return phy_clear_bits(phydev, MII_M1011_PHY_SCR,
43201 -                                     MII_M1011_PHY_SCR_DOWNSHIFT_EN);
43202 +       if (!cnt) {
43203 +               err = phy_clear_bits(phydev, MII_M1011_PHY_SCR,
43204 +                                    MII_M1011_PHY_SCR_DOWNSHIFT_EN);
43205 +       } else {
43206 +               val = MII_M1011_PHY_SCR_DOWNSHIFT_EN;
43207 +               val |= FIELD_PREP(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, cnt - 1);
43209 -       val = MII_M1011_PHY_SCR_DOWNSHIFT_EN;
43210 -       val |= FIELD_PREP(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, cnt - 1);
43211 +               err = phy_modify(phydev, MII_M1011_PHY_SCR,
43212 +                                MII_M1011_PHY_SCR_DOWNSHIFT_EN |
43213 +                                MII_M1011_PHY_SCR_DOWNSHIFT_MASK,
43214 +                                val);
43215 +       }
43217 -       return phy_modify(phydev, MII_M1011_PHY_SCR,
43218 -                         MII_M1011_PHY_SCR_DOWNSHIFT_EN |
43219 -                         MII_M1011_PHY_SCR_DOWNSHIFT_MASK,
43220 -                         val);
43221 +       if (err < 0)
43222 +               return err;
43224 +       return genphy_soft_reset(phydev);
43227  static int m88e1011_get_tunable(struct phy_device *phydev,
43228 diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
43229 index cc38e326405a..af2e1759b523 100644
43230 --- a/drivers/net/phy/phy_device.c
43231 +++ b/drivers/net/phy/phy_device.c
43232 @@ -273,6 +273,9 @@ static __maybe_unused int mdio_bus_phy_suspend(struct device *dev)
43234         struct phy_device *phydev = to_phy_device(dev);
43236 +       if (phydev->mac_managed_pm)
43237 +               return 0;
43239         /* We must stop the state machine manually, otherwise it stops out of
43240          * control, possibly with the phydev->lock held. Upon resume, netdev
43241          * may call phy routines that try to grab the same lock, and that may
43242 @@ -294,6 +297,9 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
43243         struct phy_device *phydev = to_phy_device(dev);
43244         int ret;
43246 +       if (phydev->mac_managed_pm)
43247 +               return 0;
43249         if (!phydev->suspended_by_mdio_bus)
43250                 goto no_resume;
43252 diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
43253 index ddb78fb4d6dc..d8cac02a79b9 100644
43254 --- a/drivers/net/phy/smsc.c
43255 +++ b/drivers/net/phy/smsc.c
43256 @@ -185,10 +185,13 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
43257         return genphy_config_aneg(phydev);
43260 -static int lan87xx_config_aneg_ext(struct phy_device *phydev)
43261 +static int lan95xx_config_aneg_ext(struct phy_device *phydev)
43263         int rc;
43265 +       if (phydev->phy_id != 0x0007c0f0) /* not (LAN9500A or LAN9505A) */
43266 +               return lan87xx_config_aneg(phydev);
43268         /* Extend Manual AutoMDIX timer */
43269         rc = phy_read(phydev, PHY_EDPD_CONFIG);
43270         if (rc < 0)
43271 @@ -441,7 +444,7 @@ static struct phy_driver smsc_phy_driver[] = {
43272         .read_status    = lan87xx_read_status,
43273         .config_init    = smsc_phy_config_init,
43274         .soft_reset     = smsc_phy_reset,
43275 -       .config_aneg    = lan87xx_config_aneg_ext,
43276 +       .config_aneg    = lan95xx_config_aneg_ext,
43278         /* IRQ related */
43279         .config_intr    = smsc_phy_config_intr,
43280 diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
43281 index d650b39b6e5d..c1316718304d 100644
43282 --- a/drivers/net/usb/ax88179_178a.c
43283 +++ b/drivers/net/usb/ax88179_178a.c
43284 @@ -296,12 +296,12 @@ static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
43285         int ret;
43287         if (2 == size) {
43288 -               u16 buf;
43289 +               u16 buf = 0;
43290                 ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
43291                 le16_to_cpus(&buf);
43292                 *((u16 *)data) = buf;
43293         } else if (4 == size) {
43294 -               u32 buf;
43295 +               u32 buf = 0;
43296                 ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
43297                 le32_to_cpus(&buf);
43298                 *((u32 *)data) = buf;
43299 @@ -1296,6 +1296,8 @@ static void ax88179_get_mac_addr(struct usbnet *dev)
43301         u8 mac[ETH_ALEN];
43303 +       memset(mac, 0, sizeof(mac));
43305         /* Maybe the boot loader passed the MAC address via device tree */
43306         if (!eth_platform_get_mac_address(&dev->udev->dev, mac)) {
43307                 netif_dbg(dev, ifup, dev->net,
43308 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
43309 index 9bc58e64b5b7..3ef4b2841402 100644
43310 --- a/drivers/net/usb/hso.c
43311 +++ b/drivers/net/usb/hso.c
43312 @@ -3104,7 +3104,7 @@ static void hso_free_interface(struct usb_interface *interface)
43313                         cancel_work_sync(&serial_table[i]->async_put_intf);
43314                         cancel_work_sync(&serial_table[i]->async_get_intf);
43315                         hso_serial_tty_unregister(serial);
43316 -                       kref_put(&serial_table[i]->ref, hso_serial_ref_free);
43317 +                       kref_put(&serial->parent->ref, hso_serial_ref_free);
43318                 }
43319         }
43321 diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
43322 index e81c5699c952..d2b360cfb402 100644
43323 --- a/drivers/net/usb/lan78xx.c
43324 +++ b/drivers/net/usb/lan78xx.c
43325 @@ -2655,7 +2655,7 @@ static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
43326         while (!skb_queue_empty(&dev->rxq) &&
43327                !skb_queue_empty(&dev->txq) &&
43328                !skb_queue_empty(&dev->done)) {
43329 -               schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
43330 +               schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
43331                 set_current_state(TASK_UNINTERRUPTIBLE);
43332                 netif_dbg(dev, ifdown, dev->net,
43333                           "waited for %d urb completions\n", temp);
43334 diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
43335 index f4f37ecfed58..36647378e016 100644
43336 --- a/drivers/net/usb/usbnet.c
43337 +++ b/drivers/net/usb/usbnet.c
43338 @@ -764,7 +764,7 @@ static void wait_skb_queue_empty(struct sk_buff_head *q)
43339         spin_lock_irqsave(&q->lock, flags);
43340         while (!skb_queue_empty(q)) {
43341                 spin_unlock_irqrestore(&q->lock, flags);
43342 -               schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
43343 +               schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
43344                 set_current_state(TASK_UNINTERRUPTIBLE);
43345                 spin_lock_irqsave(&q->lock, flags);
43346         }
43347 diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
43348 index 4d9dc7d15908..0720f5f92caa 100644
43349 --- a/drivers/net/wan/hdlc_fr.c
43350 +++ b/drivers/net/wan/hdlc_fr.c
43351 @@ -415,7 +415,7 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
43353                 if (pad > 0) { /* Pad the frame with zeros */
43354                         if (__skb_pad(skb, pad, false))
43355 -                               goto out;
43356 +                               goto drop;
43357                         skb_put(skb, pad);
43358                 }
43359         }
43360 @@ -448,9 +448,8 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
43361         return NETDEV_TX_OK;
43363  drop:
43364 -       kfree_skb(skb);
43365 -out:
43366         dev->stats.tx_dropped++;
43367 +       kfree_skb(skb);
43368         return NETDEV_TX_OK;
43371 diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
43372 index c3372498f4f1..8fda0446ff71 100644
43373 --- a/drivers/net/wan/lapbether.c
43374 +++ b/drivers/net/wan/lapbether.c
43375 @@ -51,6 +51,8 @@ struct lapbethdev {
43376         struct list_head        node;
43377         struct net_device       *ethdev;        /* link to ethernet device */
43378         struct net_device       *axdev;         /* lapbeth device (lapb#) */
43379 +       bool                    up;
43380 +       spinlock_t              up_lock;        /* Protects "up" */
43381  };
43383  static LIST_HEAD(lapbeth_devices);
43384 @@ -101,8 +103,9 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
43385         rcu_read_lock();
43386         lapbeth = lapbeth_get_x25_dev(dev);
43387         if (!lapbeth)
43388 -               goto drop_unlock;
43389 -       if (!netif_running(lapbeth->axdev))
43390 +               goto drop_unlock_rcu;
43391 +       spin_lock_bh(&lapbeth->up_lock);
43392 +       if (!lapbeth->up)
43393                 goto drop_unlock;
43395         len = skb->data[0] + skb->data[1] * 256;
43396 @@ -117,11 +120,14 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
43397                 goto drop_unlock;
43398         }
43399  out:
43400 +       spin_unlock_bh(&lapbeth->up_lock);
43401         rcu_read_unlock();
43402         return 0;
43403  drop_unlock:
43404         kfree_skb(skb);
43405         goto out;
43406 +drop_unlock_rcu:
43407 +       rcu_read_unlock();
43408  drop:
43409         kfree_skb(skb);
43410         return 0;
43411 @@ -151,13 +157,11 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
43412  static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
43413                                       struct net_device *dev)
43415 +       struct lapbethdev *lapbeth = netdev_priv(dev);
43416         int err;
43418 -       /*
43419 -        * Just to be *really* sure not to send anything if the interface
43420 -        * is down, the ethernet device may have gone.
43421 -        */
43422 -       if (!netif_running(dev))
43423 +       spin_lock_bh(&lapbeth->up_lock);
43424 +       if (!lapbeth->up)
43425                 goto drop;
43427         /* There should be a pseudo header of 1 byte added by upper layers.
43428 @@ -194,6 +198,7 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
43429                 goto drop;
43430         }
43431  out:
43432 +       spin_unlock_bh(&lapbeth->up_lock);
43433         return NETDEV_TX_OK;
43434  drop:
43435         kfree_skb(skb);
43436 @@ -285,6 +290,7 @@ static const struct lapb_register_struct lapbeth_callbacks = {
43437   */
43438  static int lapbeth_open(struct net_device *dev)
43440 +       struct lapbethdev *lapbeth = netdev_priv(dev);
43441         int err;
43443         if ((err = lapb_register(dev, &lapbeth_callbacks)) != LAPB_OK) {
43444 @@ -292,13 +298,22 @@ static int lapbeth_open(struct net_device *dev)
43445                 return -ENODEV;
43446         }
43448 +       spin_lock_bh(&lapbeth->up_lock);
43449 +       lapbeth->up = true;
43450 +       spin_unlock_bh(&lapbeth->up_lock);
43452         return 0;
43455  static int lapbeth_close(struct net_device *dev)
43457 +       struct lapbethdev *lapbeth = netdev_priv(dev);
43458         int err;
43460 +       spin_lock_bh(&lapbeth->up_lock);
43461 +       lapbeth->up = false;
43462 +       spin_unlock_bh(&lapbeth->up_lock);
43464         if ((err = lapb_unregister(dev)) != LAPB_OK)
43465                 pr_err("lapb_unregister error: %d\n", err);
43467 @@ -356,6 +371,9 @@ static int lapbeth_new_device(struct net_device *dev)
43468         dev_hold(dev);
43469         lapbeth->ethdev = dev;
43471 +       lapbeth->up = false;
43472 +       spin_lock_init(&lapbeth->up_lock);
43474         rc = -EIO;
43475         if (register_netdevice(ndev))
43476                 goto fail;
43477 diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
43478 index 0a37be6a7d33..fab398046a3f 100644
43479 --- a/drivers/net/wireless/ath/ath10k/htc.c
43480 +++ b/drivers/net/wireless/ath/ath10k/htc.c
43481 @@ -669,7 +669,7 @@ static int ath10k_htc_send_bundle(struct ath10k_htc_ep *ep,
43483         ath10k_dbg(ar, ATH10K_DBG_HTC,
43484                    "bundle tx status %d eid %d req count %d count %d len %d\n",
43485 -                  ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, bundle_skb->len);
43486 +                  ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, skb_len);
43487         return ret;
43490 diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
43491 index d97b33f789e4..7efbe03fbca8 100644
43492 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
43493 +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
43494 @@ -592,6 +592,9 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
43495                                         GFP_ATOMIC
43496                                         );
43497                 break;
43498 +       default:
43499 +               kfree(tb);
43500 +               return;
43501         }
43503  exit:
43504 diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
43505 index cccfd3bd4d27..ca5cda890d58 100644
43506 --- a/drivers/net/wireless/ath/ath11k/wmi.c
43507 +++ b/drivers/net/wireless/ath/ath11k/wmi.c
43508 @@ -5417,31 +5417,6 @@ int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
43509         return 0;
43512 -static int
43513 -ath11k_pull_pdev_temp_ev(struct ath11k_base *ab, u8 *evt_buf,
43514 -                        u32 len, const struct wmi_pdev_temperature_event *ev)
43516 -       const void **tb;
43517 -       int ret;
43519 -       tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
43520 -       if (IS_ERR(tb)) {
43521 -               ret = PTR_ERR(tb);
43522 -               ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
43523 -               return ret;
43524 -       }
43526 -       ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
43527 -       if (!ev) {
43528 -               ath11k_warn(ab, "failed to fetch pdev temp ev");
43529 -               kfree(tb);
43530 -               return -EPROTO;
43531 -       }
43533 -       kfree(tb);
43534 -       return 0;
43537  size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head)
43539         struct ath11k_fw_stats_vdev *i;
43540 @@ -6849,23 +6824,37 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
43541                                   struct sk_buff *skb)
43543         struct ath11k *ar;
43544 -       struct wmi_pdev_temperature_event ev = {0};
43545 +       const void **tb;
43546 +       const struct wmi_pdev_temperature_event *ev;
43547 +       int ret;
43549 +       tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
43550 +       if (IS_ERR(tb)) {
43551 +               ret = PTR_ERR(tb);
43552 +               ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
43553 +               return;
43554 +       }
43556 -       if (ath11k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) {
43557 -               ath11k_warn(ab, "failed to extract pdev temperature event");
43558 +       ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
43559 +       if (!ev) {
43560 +               ath11k_warn(ab, "failed to fetch pdev temp ev");
43561 +               kfree(tb);
43562                 return;
43563         }
43565         ath11k_dbg(ab, ATH11K_DBG_WMI,
43566 -                  "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
43567 +                  "pdev temperature ev temp %d pdev_id %d\n", ev->temp, ev->pdev_id);
43569 -       ar = ath11k_mac_get_ar_by_pdev_id(ab, ev.pdev_id);
43570 +       ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
43571         if (!ar) {
43572 -               ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
43573 +               ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id);
43574 +               kfree(tb);
43575                 return;
43576         }
43578 -       ath11k_thermal_event_temperature(ar, ev.temp);
43579 +       ath11k_thermal_event_temperature(ar, ev->temp);
43581 +       kfree(tb);
43584  static void ath11k_fils_discovery_event(struct ath11k_base *ab,
43585 diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
43586 index db0c6fa9c9dc..ff61ae34ecdf 100644
43587 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
43588 +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
43589 @@ -246,7 +246,7 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
43590         if (unlikely(r)) {
43591                 ath_dbg(common, WMI, "REGISTER READ FAILED: (0x%04x, %d)\n",
43592                         reg_offset, r);
43593 -               return -EIO;
43594 +               return -1;
43595         }
43597         return be32_to_cpu(val);
43598 diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
43599 index 5abc2a5526ec..2ca3b86714a9 100644
43600 --- a/drivers/net/wireless/ath/ath9k/hw.c
43601 +++ b/drivers/net/wireless/ath/ath9k/hw.c
43602 @@ -286,7 +286,7 @@ static bool ath9k_hw_read_revisions(struct ath_hw *ah)
43604         srev = REG_READ(ah, AR_SREV);
43606 -       if (srev == -EIO) {
43607 +       if (srev == -1) {
43608                 ath_err(ath9k_hw_common(ah),
43609                         "Failed to read SREV register");
43610                 return false;
43611 diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
43612 index 60db38c38960..fd37d4d2983b 100644
43613 --- a/drivers/net/wireless/cisco/airo.c
43614 +++ b/drivers/net/wireless/cisco/airo.c
43615 @@ -3817,6 +3817,68 @@ static inline void set_auth_type(struct airo_info *local, int auth_type)
43616                 local->last_auth = auth_type;
43619 +static int noinline_for_stack airo_readconfig(struct airo_info *ai, u8 *mac, int lock)
43621 +       int i, status;
43622 +       /* large variables, so don't inline this function,
43623 +        * maybe change to kmalloc
43624 +        */
43625 +       tdsRssiRid rssi_rid;
43626 +       CapabilityRid cap_rid;
43628 +       kfree(ai->SSID);
43629 +       ai->SSID = NULL;
43630 +       // general configuration (read/modify/write)
43631 +       status = readConfigRid(ai, lock);
43632 +       if (status != SUCCESS) return ERROR;
43634 +       status = readCapabilityRid(ai, &cap_rid, lock);
43635 +       if (status != SUCCESS) return ERROR;
43637 +       status = PC4500_readrid(ai, RID_RSSI, &rssi_rid, sizeof(rssi_rid), lock);
43638 +       if (status == SUCCESS) {
43639 +               if (ai->rssi || (ai->rssi = kmalloc(512, GFP_KERNEL)) != NULL)
43640 +                       memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */
43641 +       }
43642 +       else {
43643 +               kfree(ai->rssi);
43644 +               ai->rssi = NULL;
43645 +               if (cap_rid.softCap & cpu_to_le16(8))
43646 +                       ai->config.rmode |= RXMODE_NORMALIZED_RSSI;
43647 +               else
43648 +                       airo_print_warn(ai->dev->name, "unknown received signal "
43649 +                                       "level scale");
43650 +       }
43651 +       ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS;
43652 +       set_auth_type(ai, AUTH_OPEN);
43653 +       ai->config.modulation = MOD_CCK;
43655 +       if (le16_to_cpu(cap_rid.len) >= sizeof(cap_rid) &&
43656 +           (cap_rid.extSoftCap & cpu_to_le16(1)) &&
43657 +           micsetup(ai) == SUCCESS) {
43658 +               ai->config.opmode |= MODE_MIC;
43659 +               set_bit(FLAG_MIC_CAPABLE, &ai->flags);
43660 +       }
43662 +       /* Save off the MAC */
43663 +       for (i = 0; i < ETH_ALEN; i++) {
43664 +               mac[i] = ai->config.macAddr[i];
43665 +       }
43667 +       /* Check to see if there are any insmod configured
43668 +          rates to add */
43669 +       if (rates[0]) {
43670 +               memset(ai->config.rates, 0, sizeof(ai->config.rates));
43671 +               for (i = 0; i < 8 && rates[i]; i++) {
43672 +                       ai->config.rates[i] = rates[i];
43673 +               }
43674 +       }
43675 +       set_bit (FLAG_COMMIT, &ai->flags);
43677 +       return SUCCESS;
43681  static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
43683         Cmd cmd;
43684 @@ -3863,58 +3925,9 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
43685         if (lock)
43686                 up(&ai->sem);
43687         if (ai->config.len == 0) {
43688 -               int i;
43689 -               tdsRssiRid rssi_rid;
43690 -               CapabilityRid cap_rid;
43692 -               kfree(ai->SSID);
43693 -               ai->SSID = NULL;
43694 -               // general configuration (read/modify/write)
43695 -               status = readConfigRid(ai, lock);
43696 -               if (status != SUCCESS) return ERROR;
43698 -               status = readCapabilityRid(ai, &cap_rid, lock);
43699 -               if (status != SUCCESS) return ERROR;
43701 -               status = PC4500_readrid(ai, RID_RSSI,&rssi_rid, sizeof(rssi_rid), lock);
43702 -               if (status == SUCCESS) {
43703 -                       if (ai->rssi || (ai->rssi = kmalloc(512, GFP_KERNEL)) != NULL)
43704 -                               memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */
43705 -               }
43706 -               else {
43707 -                       kfree(ai->rssi);
43708 -                       ai->rssi = NULL;
43709 -                       if (cap_rid.softCap & cpu_to_le16(8))
43710 -                               ai->config.rmode |= RXMODE_NORMALIZED_RSSI;
43711 -                       else
43712 -                               airo_print_warn(ai->dev->name, "unknown received signal "
43713 -                                               "level scale");
43714 -               }
43715 -               ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS;
43716 -               set_auth_type(ai, AUTH_OPEN);
43717 -               ai->config.modulation = MOD_CCK;
43719 -               if (le16_to_cpu(cap_rid.len) >= sizeof(cap_rid) &&
43720 -                   (cap_rid.extSoftCap & cpu_to_le16(1)) &&
43721 -                   micsetup(ai) == SUCCESS) {
43722 -                       ai->config.opmode |= MODE_MIC;
43723 -                       set_bit(FLAG_MIC_CAPABLE, &ai->flags);
43724 -               }
43726 -               /* Save off the MAC */
43727 -               for (i = 0; i < ETH_ALEN; i++) {
43728 -                       mac[i] = ai->config.macAddr[i];
43729 -               }
43731 -               /* Check to see if there are any insmod configured
43732 -                  rates to add */
43733 -               if (rates[0]) {
43734 -                       memset(ai->config.rates, 0, sizeof(ai->config.rates));
43735 -                       for (i = 0; i < 8 && rates[i]; i++) {
43736 -                               ai->config.rates[i] = rates[i];
43737 -                       }
43738 -               }
43739 -               set_bit (FLAG_COMMIT, &ai->flags);
43740 +               status = airo_readconfig(ai, mac, lock);
43741 +               if (status != SUCCESS)
43742 +                       return ERROR;
43743         }
43745         /* Setup the SSIDs if present */
43746 diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
43747 index 23fbddd0c1f8..534ab3b894e2 100644
43748 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
43749 +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
43750 @@ -815,7 +815,7 @@ static int ipw2100_hw_send_command(struct ipw2100_priv *priv,
43751          * doesn't seem to have as many firmware restart cycles...
43752          *
43753          * As a test, we're sticking in a 1/100s delay here */
43754 -       schedule_timeout_uninterruptible(msecs_to_jiffies(10));
43755 +       schedule_msec_hrtimeout_uninterruptible((10));
43757         return 0;
43759 @@ -1266,7 +1266,7 @@ static int ipw2100_start_adapter(struct ipw2100_priv *priv)
43760         IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n");
43761         i = 5000;
43762         do {
43763 -               schedule_timeout_uninterruptible(msecs_to_jiffies(40));
43764 +               schedule_msec_hrtimeout_uninterruptible((40));
43765                 /* Todo... wait for sync command ... */
43767                 read_register(priv->net_dev, IPW_REG_INTA, &inta);
43768 diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
43769 index a0cf78c418ac..903de34028ef 100644
43770 --- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
43771 +++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
43772 @@ -633,8 +633,10 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee,
43773         }
43775         if (ext->alg != IW_ENCODE_ALG_NONE) {
43776 -               memcpy(sec.keys[idx], ext->key, ext->key_len);
43777 -               sec.key_sizes[idx] = ext->key_len;
43778 +               int key_len = clamp_val(ext->key_len, 0, SCM_KEY_LEN);
43780 +               memcpy(sec.keys[idx], ext->key, key_len);
43781 +               sec.key_sizes[idx] = key_len;
43782                 sec.flags |= (1 << idx);
43783                 if (ext->alg == IW_ENCODE_ALG_WEP) {
43784                         sec.encode_alg[idx] = SEC_ALG_WEP;
43785 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
43786 index 579bc81cc0ae..4cd8c39cc3e9 100644
43787 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
43788 +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
43789 @@ -1,6 +1,6 @@
43790  // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
43791  /*
43792 - * Copyright (C) 2018-2020 Intel Corporation
43793 + * Copyright (C) 2018-2021 Intel Corporation
43794   */
43795  #include <linux/firmware.h>
43796  #include "iwl-drv.h"
43797 @@ -426,7 +426,8 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
43798         const struct firmware *fw;
43799         int res;
43801 -       if (!iwlwifi_mod_params.enable_ini)
43802 +       if (!iwlwifi_mod_params.enable_ini ||
43803 +           trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_9000)
43804                 return;
43806         res = firmware_request_nowarn(&fw, "iwl-debug-yoyo.bin", dev);
43807 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
43808 index 60e0db4a5e20..9236f9106826 100644
43809 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
43810 +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
43811 @@ -2,7 +2,7 @@
43812  /*
43813   * Copyright (C) 2015 Intel Mobile Communications GmbH
43814   * Copyright (C) 2016-2017 Intel Deutschland GmbH
43815 - * Copyright (C) 2019-2020 Intel Corporation
43816 + * Copyright (C) 2019-2021 Intel Corporation
43817   */
43818  #include <linux/kernel.h>
43819  #include <linux/bsearch.h>
43820 @@ -21,7 +21,6 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
43821                                   const struct iwl_cfg_trans_params *cfg_trans)
43823         struct iwl_trans *trans;
43824 -       int txcmd_size, txcmd_align;
43825  #ifdef CONFIG_LOCKDEP
43826         static struct lock_class_key __key;
43827  #endif
43828 @@ -31,10 +30,40 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
43829                 return NULL;
43831         trans->trans_cfg = cfg_trans;
43832 -       if (!cfg_trans->gen2) {
43834 +#ifdef CONFIG_LOCKDEP
43835 +       lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
43836 +                        &__key, 0);
43837 +#endif
43839 +       trans->dev = dev;
43840 +       trans->ops = ops;
43841 +       trans->num_rx_queues = 1;
43843 +       WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
43845 +       if (trans->trans_cfg->use_tfh) {
43846 +               trans->txqs.tfd.addr_size = 64;
43847 +               trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
43848 +               trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
43849 +       } else {
43850 +               trans->txqs.tfd.addr_size = 36;
43851 +               trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
43852 +               trans->txqs.tfd.size = sizeof(struct iwl_tfd);
43853 +       }
43854 +       trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans);
43856 +       return trans;
43859 +int iwl_trans_init(struct iwl_trans *trans)
43861 +       int txcmd_size, txcmd_align;
43863 +       if (!trans->trans_cfg->gen2) {
43864                 txcmd_size = sizeof(struct iwl_tx_cmd);
43865                 txcmd_align = sizeof(void *);
43866 -       } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) {
43867 +       } else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
43868                 txcmd_size = sizeof(struct iwl_tx_cmd_gen2);
43869                 txcmd_align = 64;
43870         } else {
43871 @@ -46,17 +75,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
43872         txcmd_size += 36; /* biggest possible 802.11 header */
43874         /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
43875 -       if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align))
43876 -               return ERR_PTR(-EINVAL);
43878 -#ifdef CONFIG_LOCKDEP
43879 -       lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
43880 -                        &__key, 0);
43881 -#endif
43883 -       trans->dev = dev;
43884 -       trans->ops = ops;
43885 -       trans->num_rx_queues = 1;
43886 +       if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align))
43887 +               return -EINVAL;
43889         if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
43890                 trans->txqs.bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
43891 @@ -68,23 +88,16 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
43892          * allocate here.
43893          */
43894         if (trans->trans_cfg->gen2) {
43895 -               trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", dev,
43896 +               trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", trans->dev,
43897                                                        trans->txqs.bc_tbl_size,
43898                                                        256, 0);
43899                 if (!trans->txqs.bc_pool)
43900 -                       return NULL;
43901 +                       return -ENOMEM;
43902         }
43904 -       if (trans->trans_cfg->use_tfh) {
43905 -               trans->txqs.tfd.addr_size = 64;
43906 -               trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
43907 -               trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
43908 -       } else {
43909 -               trans->txqs.tfd.addr_size = 36;
43910 -               trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
43911 -               trans->txqs.tfd.size = sizeof(struct iwl_tfd);
43912 -       }
43913 -       trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans);
43914 +       /* Some things must not change even if the config does */
43915 +       WARN_ON(trans->txqs.tfd.addr_size !=
43916 +               (trans->trans_cfg->use_tfh ? 64 : 36));
43918         snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
43919                  "iwl_cmd_pool:%s", dev_name(trans->dev));
43920 @@ -93,35 +106,35 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
43921                                   txcmd_size, txcmd_align,
43922                                   SLAB_HWCACHE_ALIGN, NULL);
43923         if (!trans->dev_cmd_pool)
43924 -               return NULL;
43926 -       WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
43927 +               return -ENOMEM;
43929         trans->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
43930         if (!trans->txqs.tso_hdr_page) {
43931                 kmem_cache_destroy(trans->dev_cmd_pool);
43932 -               return NULL;
43933 +               return -ENOMEM;
43934         }
43936         /* Initialize the wait queue for commands */
43937         init_waitqueue_head(&trans->wait_command_queue);
43939 -       return trans;
43940 +       return 0;
43943  void iwl_trans_free(struct iwl_trans *trans)
43945         int i;
43947 -       for_each_possible_cpu(i) {
43948 -               struct iwl_tso_hdr_page *p =
43949 -                       per_cpu_ptr(trans->txqs.tso_hdr_page, i);
43950 +       if (trans->txqs.tso_hdr_page) {
43951 +               for_each_possible_cpu(i) {
43952 +                       struct iwl_tso_hdr_page *p =
43953 +                               per_cpu_ptr(trans->txqs.tso_hdr_page, i);
43955 -               if (p->page)
43956 -                       __free_page(p->page);
43957 -       }
43958 +                       if (p && p->page)
43959 +                               __free_page(p->page);
43960 +               }
43962 -       free_percpu(trans->txqs.tso_hdr_page);
43963 +               free_percpu(trans->txqs.tso_hdr_page);
43964 +       }
43966         kmem_cache_destroy(trans->dev_cmd_pool);
43968 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
43969 index 4a5822c1be13..3e0df6fbb642 100644
43970 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
43971 +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
43972 @@ -1438,6 +1438,7 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
43973                           struct device *dev,
43974                           const struct iwl_trans_ops *ops,
43975                           const struct iwl_cfg_trans_params *cfg_trans);
43976 +int iwl_trans_init(struct iwl_trans *trans);
43977  void iwl_trans_free(struct iwl_trans *trans);
43979  /*****************************************************
43980 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
43981 index 8772b65c9dab..2d58cb969918 100644
43982 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
43983 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
43984 @@ -1,7 +1,7 @@
43985  // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
43986  /*
43987   * Copyright (C) 2017 Intel Deutschland GmbH
43988 - * Copyright (C) 2018-2020 Intel Corporation
43989 + * Copyright (C) 2018-2021 Intel Corporation
43990   */
43991  #include "rs.h"
43992  #include "fw-api.h"
43993 @@ -72,19 +72,15 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
43994         bool vht_ena = vht_cap->vht_supported;
43995         u16 flags = 0;
43997 +       /* get STBC flags */
43998         if (mvm->cfg->ht_params->stbc &&
43999             (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1)) {
44000 -               if (he_cap->has_he) {
44001 -                       if (he_cap->he_cap_elem.phy_cap_info[2] &
44002 -                           IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
44003 -                               flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
44005 -                       if (he_cap->he_cap_elem.phy_cap_info[7] &
44006 -                           IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ)
44007 -                               flags |= IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK;
44008 -               } else if ((ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) ||
44009 -                          (vht_ena &&
44010 -                           (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)))
44011 +               if (he_cap->has_he && he_cap->he_cap_elem.phy_cap_info[2] &
44012 +                                     IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
44013 +                       flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
44014 +               else if (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
44015 +                       flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
44016 +               else if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)
44017                         flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
44018         }
44020 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
44021 index 558a0b2ef0fc..66faf7914bd8 100644
44022 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
44023 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
44024 @@ -17,10 +17,20 @@
44025  #include "iwl-prph.h"
44026  #include "internal.h"
44028 +#define TRANS_CFG_MARKER BIT(0)
44029 +#define _IS_A(cfg, _struct) __builtin_types_compatible_p(typeof(cfg),  \
44030 +                                                        struct _struct)
44031 +extern int _invalid_type;
44032 +#define _TRANS_CFG_MARKER(cfg)                                         \
44033 +       (__builtin_choose_expr(_IS_A(cfg, iwl_cfg_trans_params),        \
44034 +                              TRANS_CFG_MARKER,                        \
44035 +        __builtin_choose_expr(_IS_A(cfg, iwl_cfg), 0, _invalid_type)))
44036 +#define _ASSIGN_CFG(cfg) (_TRANS_CFG_MARKER(cfg) + (kernel_ulong_t)&(cfg))
44038  #define IWL_PCI_DEVICE(dev, subdev, cfg) \
44039         .vendor = PCI_VENDOR_ID_INTEL,  .device = (dev), \
44040         .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
44041 -       .driver_data = (kernel_ulong_t)&(cfg)
44042 +       .driver_data = _ASSIGN_CFG(cfg)
44044  /* Hardware specific file defines the PCI IDs table for that hardware module */
44045  static const struct pci_device_id iwl_hw_card_ids[] = {
44046 @@ -1075,19 +1085,22 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
44048  static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
44050 -       const struct iwl_cfg_trans_params *trans =
44051 -               (struct iwl_cfg_trans_params *)(ent->driver_data);
44052 +       const struct iwl_cfg_trans_params *trans;
44053         const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
44054         struct iwl_trans *iwl_trans;
44055         struct iwl_trans_pcie *trans_pcie;
44056         int i, ret;
44057 +       const struct iwl_cfg *cfg;
44059 +       trans = (void *)(ent->driver_data & ~TRANS_CFG_MARKER);
44061         /*
44062          * This is needed for backwards compatibility with the old
44063          * tables, so we don't need to change all the config structs
44064          * at the same time.  The cfg is used to compare with the old
44065          * full cfg structs.
44066          */
44067 -       const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
44068 +       cfg = (void *)(ent->driver_data & ~TRANS_CFG_MARKER);
44070         /* make sure trans is the first element in iwl_cfg */
44071         BUILD_BUG_ON(offsetof(struct iwl_cfg, trans));
44072 @@ -1202,11 +1215,19 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
44074  #endif
44075         /*
44076 -        * If we didn't set the cfg yet, assume the trans is actually
44077 -        * a full cfg from the old tables.
44078 +        * If we didn't set the cfg yet, the PCI ID table entry should have
44079 +        * been a full config - if yes, use it, otherwise fail.
44080          */
44081 -       if (!iwl_trans->cfg)
44082 +       if (!iwl_trans->cfg) {
44083 +               if (ent->driver_data & TRANS_CFG_MARKER) {
44084 +                       pr_err("No config found for PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n",
44085 +                              pdev->device, pdev->subsystem_device,
44086 +                              iwl_trans->hw_rev, iwl_trans->hw_rf_id);
44087 +                       ret = -EINVAL;
44088 +                       goto out_free_trans;
44089 +               }
44090                 iwl_trans->cfg = cfg;
44091 +       }
44093         /* if we don't have a name yet, copy name from the old cfg */
44094         if (!iwl_trans->name)
44095 @@ -1222,6 +1243,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
44096                 trans_pcie->num_rx_bufs = RX_QUEUE_SIZE;
44097         }
44099 +       ret = iwl_trans_init(iwl_trans);
44100 +       if (ret)
44101 +               goto out_free_trans;
44103         pci_set_drvdata(pdev, iwl_trans);
44104         iwl_trans->drv = iwl_drv_start(iwl_trans);
44106 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
44107 index 94ffc1ae484d..af9412bd697e 100644
44108 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
44109 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
44110 @@ -1,7 +1,7 @@
44111  // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
44112  /*
44113   * Copyright (C) 2017 Intel Deutschland GmbH
44114 - * Copyright (C) 2018-2020 Intel Corporation
44115 + * Copyright (C) 2018-2021 Intel Corporation
44116   */
44117  #include "iwl-trans.h"
44118  #include "iwl-prph.h"
44119 @@ -143,7 +143,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
44120         if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
44121                 IWL_DEBUG_INFO(trans,
44122                                "DEVICE_ENABLED bit was set and is now cleared\n");
44123 -               iwl_txq_gen2_tx_stop(trans);
44124 +               iwl_txq_gen2_tx_free(trans);
44125                 iwl_pcie_rx_stop(trans);
44126         }
44128 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
44129 index 4456abb9a074..34bde8c87324 100644
44130 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
44131 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
44132 @@ -40,6 +40,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
44133         const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
44134         u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
44135         struct iwl_tfh_tfd *tfd;
44136 +       unsigned long flags;
44138         copy_size = sizeof(struct iwl_cmd_header_wide);
44139         cmd_size = sizeof(struct iwl_cmd_header_wide);
44140 @@ -108,14 +109,14 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
44141                 goto free_dup_buf;
44142         }
44144 -       spin_lock_bh(&txq->lock);
44145 +       spin_lock_irqsave(&txq->lock, flags);
44147         idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
44148         tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
44149         memset(tfd, 0, sizeof(*tfd));
44151         if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
44152 -               spin_unlock_bh(&txq->lock);
44153 +               spin_unlock_irqrestore(&txq->lock, flags);
44155                 IWL_ERR(trans, "No space in command queue\n");
44156                 iwl_op_mode_cmd_queue_full(trans->op_mode);
44157 @@ -250,7 +251,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
44158         spin_unlock(&trans_pcie->reg_lock);
44160  out:
44161 -       spin_unlock_bh(&txq->lock);
44162 +       spin_unlock_irqrestore(&txq->lock, flags);
44163  free_dup_buf:
44164         if (idx < 0)
44165                 kfree(dup_buf);
44166 diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
44167 index 833f43d1ca7a..810dcb3df242 100644
44168 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
44169 +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
44170 @@ -13,30 +13,6 @@
44171  #include "iwl-scd.h"
44172  #include <linux/dmapool.h>
44175 - * iwl_txq_gen2_tx_stop - Stop all Tx DMA channels
44176 - */
44177 -void iwl_txq_gen2_tx_stop(struct iwl_trans *trans)
44179 -       int txq_id;
44181 -       /*
44182 -        * This function can be called before the op_mode disabled the
44183 -        * queues. This happens when we have an rfkill interrupt.
44184 -        * Since we stop Tx altogether - mark the queues as stopped.
44185 -        */
44186 -       memset(trans->txqs.queue_stopped, 0,
44187 -              sizeof(trans->txqs.queue_stopped));
44188 -       memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
44190 -       /* Unmap DMA from host system and free skb's */
44191 -       for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) {
44192 -               if (!trans->txqs.txq[txq_id])
44193 -                       continue;
44194 -               iwl_txq_gen2_unmap(trans, txq_id);
44195 -       }
44198  /*
44199   * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
44200   */
44201 @@ -1189,6 +1165,12 @@ static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
44202                 goto error_free_resp;
44203         }
44205 +       if (WARN_ONCE(trans->txqs.txq[qid],
44206 +                     "queue %d already allocated\n", qid)) {
44207 +               ret = -EIO;
44208 +               goto error_free_resp;
44209 +       }
44211         txq->id = qid;
44212         trans->txqs.txq[qid] = txq;
44213         wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
44214 diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
44215 index af1dbdf5617a..20efc62acf13 100644
44216 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h
44217 +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
44218 @@ -1,6 +1,6 @@
44219  /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
44220  /*
44221 - * Copyright (C) 2020 Intel Corporation
44222 + * Copyright (C) 2020-2021 Intel Corporation
44223   */
44224  #ifndef __iwl_trans_queue_tx_h__
44225  #define __iwl_trans_queue_tx_h__
44226 @@ -123,7 +123,6 @@ int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
44227  void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
44228  void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
44229  void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
44230 -void iwl_txq_gen2_tx_stop(struct iwl_trans *trans);
44231  void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
44232  int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
44233                  bool cmd_queue);
44234 diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
44235 index c9f8c056aa51..84b32a5f01ee 100644
44236 --- a/drivers/net/wireless/marvell/mwl8k.c
44237 +++ b/drivers/net/wireless/marvell/mwl8k.c
44238 @@ -1473,6 +1473,7 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
44239         if (txq->skb == NULL) {
44240                 dma_free_coherent(&priv->pdev->dev, size, txq->txd,
44241                                   txq->txd_dma);
44242 +               txq->txd = NULL;
44243                 return -ENOMEM;
44244         }
44246 diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
44247 index 2f27c43ad76d..7196fa9047e6 100644
44248 --- a/drivers/net/wireless/mediatek/mt76/dma.c
44249 +++ b/drivers/net/wireless/mediatek/mt76/dma.c
44250 @@ -309,7 +309,7 @@ static int
44251  mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
44252                           struct sk_buff *skb, u32 tx_info)
44254 -       struct mt76_queue_buf buf;
44255 +       struct mt76_queue_buf buf = {};
44256         dma_addr_t addr;
44258         if (q->queued + 1 >= q->ndesc - 1)
44259 diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
44260 index 8bf45497cfca..36a430f09f64 100644
44261 --- a/drivers/net/wireless/mediatek/mt76/mt76.h
44262 +++ b/drivers/net/wireless/mediatek/mt76/mt76.h
44263 @@ -222,6 +222,7 @@ struct mt76_wcid {
44265         u16 idx;
44266         u8 hw_key_idx;
44267 +       u8 hw_key_idx2;
44269         u8 sta:1;
44270         u8 ext_phy:1;
44271 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
44272 index 2eab23898c77..6dbaaf95ee38 100644
44273 --- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
44274 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
44275 @@ -86,6 +86,7 @@ static int mt7615_check_eeprom(struct mt76_dev *dev)
44276         switch (val) {
44277         case 0x7615:
44278         case 0x7622:
44279 +       case 0x7663:
44280                 return 0;
44281         default:
44282                 return -EINVAL;
44283 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
44284 index 59fdd0fc2ad4..8dccb589b756 100644
44285 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
44286 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
44287 @@ -690,7 +690,7 @@ mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp)
44289         int i;
44291 -       for (i = 1; i < txp->nbuf; i++)
44292 +       for (i = 0; i < txp->nbuf; i++)
44293                 dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
44294                                  le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
44296 @@ -966,6 +966,7 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
44297         struct mt7615_dev *dev = phy->dev;
44298         struct mt7615_rate_desc rd;
44299         u32 w5, w27, addr;
44300 +       u16 idx = sta->vif->mt76.omac_idx;
44302         if (!mt76_is_mmio(&dev->mt76)) {
44303                 mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates);
44304 @@ -1017,7 +1018,10 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
44306         mt76_wr(dev, addr + 27 * 4, w27);
44308 -       mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
44309 +       idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
44310 +       addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
44312 +       mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */
44313         sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0);
44314         sta->rate_set_tsf |= rd.rateset;
44316 @@ -1033,7 +1037,7 @@ EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
44317  static int
44318  mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
44319                            struct ieee80211_key_conf *key,
44320 -                          enum mt7615_cipher_type cipher,
44321 +                          enum mt7615_cipher_type cipher, u16 cipher_mask,
44322                            enum set_key_cmd cmd)
44324         u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
44325 @@ -1050,22 +1054,22 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
44326                         memcpy(data + 16, key->key + 24, 8);
44327                         memcpy(data + 24, key->key + 16, 8);
44328                 } else {
44329 -                       if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher)
44330 -                               memmove(data + 16, data, 16);
44331 -                       if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
44332 +                       if (cipher_mask == BIT(cipher))
44333                                 memcpy(data, key->key, key->keylen);
44334 -                       else if (cipher == MT_CIPHER_BIP_CMAC_128)
44335 +                       else if (cipher != MT_CIPHER_BIP_CMAC_128)
44336 +                               memcpy(data, key->key, 16);
44337 +                       if (cipher == MT_CIPHER_BIP_CMAC_128)
44338                                 memcpy(data + 16, key->key, 16);
44339                 }
44340         } else {
44341 -               if (wcid->cipher & ~BIT(cipher)) {
44342 -                       if (cipher != MT_CIPHER_BIP_CMAC_128)
44343 -                               memmove(data, data + 16, 16);
44344 +               if (cipher == MT_CIPHER_BIP_CMAC_128)
44345                         memset(data + 16, 0, 16);
44346 -               } else {
44347 +               else if (cipher_mask)
44348 +                       memset(data, 0, 16);
44349 +               if (!cipher_mask)
44350                         memset(data, 0, sizeof(data));
44351 -               }
44352         }
44354         mt76_wr_copy(dev, addr, data, sizeof(data));
44356         return 0;
44357 @@ -1073,7 +1077,7 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
44359  static int
44360  mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
44361 -                         enum mt7615_cipher_type cipher,
44362 +                         enum mt7615_cipher_type cipher, u16 cipher_mask,
44363                           int keyidx, enum set_key_cmd cmd)
44365         u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
44366 @@ -1083,20 +1087,23 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
44368         w0 = mt76_rr(dev, addr);
44369         w1 = mt76_rr(dev, addr + 4);
44370 -       if (cmd == SET_KEY) {
44371 -               w0 |= MT_WTBL_W0_RX_KEY_VALID |
44372 -                     FIELD_PREP(MT_WTBL_W0_RX_IK_VALID,
44373 -                                cipher == MT_CIPHER_BIP_CMAC_128);
44374 -               if (cipher != MT_CIPHER_BIP_CMAC_128 ||
44375 -                   !wcid->cipher)
44376 -                       w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
44377 -       }  else {
44378 -               if (!(wcid->cipher & ~BIT(cipher)))
44379 -                       w0 &= ~(MT_WTBL_W0_RX_KEY_VALID |
44380 -                               MT_WTBL_W0_KEY_IDX);
44381 -               if (cipher == MT_CIPHER_BIP_CMAC_128)
44382 -                       w0 &= ~MT_WTBL_W0_RX_IK_VALID;
44384 +       if (cipher_mask)
44385 +               w0 |= MT_WTBL_W0_RX_KEY_VALID;
44386 +       else
44387 +               w0 &= ~(MT_WTBL_W0_RX_KEY_VALID | MT_WTBL_W0_KEY_IDX);
44388 +       if (cipher_mask & BIT(MT_CIPHER_BIP_CMAC_128))
44389 +               w0 |= MT_WTBL_W0_RX_IK_VALID;
44390 +       else
44391 +               w0 &= ~MT_WTBL_W0_RX_IK_VALID;
44393 +       if (cmd == SET_KEY &&
44394 +           (cipher != MT_CIPHER_BIP_CMAC_128 ||
44395 +            cipher_mask == BIT(cipher))) {
44396 +               w0 &= ~MT_WTBL_W0_KEY_IDX;
44397 +               w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
44398         }
44400         mt76_wr(dev, MT_WTBL_RICR0, w0);
44401         mt76_wr(dev, MT_WTBL_RICR1, w1);
44403 @@ -1109,24 +1116,25 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
44405  static void
44406  mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
44407 -                             enum mt7615_cipher_type cipher,
44408 +                             enum mt7615_cipher_type cipher, u16 cipher_mask,
44409                               enum set_key_cmd cmd)
44411         u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
44413 -       if (cmd == SET_KEY) {
44414 -               if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
44415 -                       mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
44416 -                                FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher));
44417 -       } else {
44418 -               if (cipher != MT_CIPHER_BIP_CMAC_128 &&
44419 -                   wcid->cipher & BIT(MT_CIPHER_BIP_CMAC_128))
44420 -                       mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
44421 -                                FIELD_PREP(MT_WTBL_W2_KEY_TYPE,
44422 -                                           MT_CIPHER_BIP_CMAC_128));
44423 -               else if (!(wcid->cipher & ~BIT(cipher)))
44424 -                       mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
44425 +       if (!cipher_mask) {
44426 +               mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
44427 +               return;
44428         }
44430 +       if (cmd != SET_KEY)
44431 +               return;
44433 +       if (cipher == MT_CIPHER_BIP_CMAC_128 &&
44434 +           cipher_mask & ~BIT(MT_CIPHER_BIP_CMAC_128))
44435 +               return;
44437 +       mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
44438 +                FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher));
44441  int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
44442 @@ -1135,25 +1143,30 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
44443                               enum set_key_cmd cmd)
44445         enum mt7615_cipher_type cipher;
44446 +       u16 cipher_mask = wcid->cipher;
44447         int err;
44449         cipher = mt7615_mac_get_cipher(key->cipher);
44450         if (cipher == MT_CIPHER_NONE)
44451                 return -EOPNOTSUPP;
44453 -       mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cmd);
44454 -       err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cmd);
44455 +       if (cmd == SET_KEY)
44456 +               cipher_mask |= BIT(cipher);
44457 +       else
44458 +               cipher_mask &= ~BIT(cipher);
44460 +       mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask, cmd);
44461 +       err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask,
44462 +                                        cmd);
44463         if (err < 0)
44464                 return err;
44466 -       err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx, cmd);
44467 +       err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, cipher_mask,
44468 +                                       key->keyidx, cmd);
44469         if (err < 0)
44470                 return err;
44472 -       if (cmd == SET_KEY)
44473 -               wcid->cipher |= BIT(cipher);
44474 -       else
44475 -               wcid->cipher &= ~BIT(cipher);
44476 +       wcid->cipher = cipher_mask;
44478         return 0;
44480 @@ -1821,10 +1834,8 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
44481         int i, aggr;
44482         u32 val, val2;
44484 -       memset(mib, 0, sizeof(*mib));
44486 -       mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
44487 -                                         MT_MIB_SDR3_FCS_ERR_MASK);
44488 +       mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
44489 +                                          MT_MIB_SDR3_FCS_ERR_MASK);
44491         val = mt76_get_field(dev, MT_MIB_SDR14(ext_phy),
44492                              MT_MIB_AMPDU_MPDU_COUNT);
44493 @@ -1837,24 +1848,16 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
44494         aggr = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
44495         for (i = 0; i < 4; i++) {
44496                 val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
44498 -               val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
44499 -               if (val2 > mib->ack_fail_cnt)
44500 -                       mib->ack_fail_cnt = val2;
44502 -               val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
44503 -               if (val2 > mib->ba_miss_cnt)
44504 -                       mib->ba_miss_cnt = val2;
44505 +               mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
44506 +               mib->ack_fail_cnt += FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK,
44507 +                                              val);
44509                 val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
44510 -               val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
44511 -               if (val2 > mib->rts_retries_cnt) {
44512 -                       mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
44513 -                       mib->rts_retries_cnt = val2;
44514 -               }
44515 +               mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
44516 +               mib->rts_retries_cnt += FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK,
44517 +                                                 val);
44519                 val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
44521                 dev->mt76.aggr_stats[aggr++] += val & 0xffff;
44522                 dev->mt76.aggr_stats[aggr++] += val >> 16;
44523         }
44524 @@ -1976,15 +1979,17 @@ void mt7615_dma_reset(struct mt7615_dev *dev)
44525         mt76_clear(dev, MT_WPDMA_GLO_CFG,
44526                    MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
44527                    MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
44529         usleep_range(1000, 2000);
44531 -       mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
44532         for (i = 0; i < __MT_TXQ_MAX; i++)
44533                 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
44535 -       mt76_for_each_q_rx(&dev->mt76, i) {
44536 +       for (i = 0; i < __MT_MCUQ_MAX; i++)
44537 +               mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
44539 +       mt76_for_each_q_rx(&dev->mt76, i)
44540                 mt76_queue_rx_reset(dev, i);
44541 -       }
44543         mt76_set(dev, MT_WPDMA_GLO_CFG,
44544                  MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
44545 @@ -2000,8 +2005,12 @@ void mt7615_tx_token_put(struct mt7615_dev *dev)
44546         spin_lock_bh(&dev->token_lock);
44547         idr_for_each_entry(&dev->token, txwi, id) {
44548                 mt7615_txp_skb_unmap(&dev->mt76, txwi);
44549 -               if (txwi->skb)
44550 -                       dev_kfree_skb_any(txwi->skb);
44551 +               if (txwi->skb) {
44552 +                       struct ieee80211_hw *hw;
44554 +                       hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
44555 +                       ieee80211_free_txskb(hw, txwi->skb);
44556 +               }
44557                 mt76_put_txwi(&dev->mt76, txwi);
44558         }
44559         spin_unlock_bh(&dev->token_lock);
44560 @@ -2304,8 +2313,10 @@ void mt7615_coredump_work(struct work_struct *work)
44561                         break;
44563                 skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
44564 -               if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ)
44565 -                       break;
44566 +               if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
44567 +                       dev_kfree_skb(skb);
44568 +                       continue;
44569 +               }
44571                 memcpy(data, skb->data, skb->len);
44572                 data += skb->len;
44573 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
44574 index 25faf486d279..d334491667a4 100644
44575 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
44576 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
44577 @@ -217,8 +217,6 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
44578         ret = mt7615_mcu_add_dev_info(phy, vif, true);
44579         if (ret)
44580                 goto out;
44582 -       mt7615_mac_set_beacon_filter(phy, vif, true);
44583  out:
44584         mt7615_mutex_release(dev);
44586 @@ -244,7 +242,6 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
44588         mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
44590 -       mt7615_mac_set_beacon_filter(phy, vif, false);
44591         mt7615_mcu_add_dev_info(phy, vif, false);
44593         rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
44594 @@ -337,7 +334,8 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
44595         struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv :
44596                                   &mvif->sta;
44597         struct mt76_wcid *wcid = &msta->wcid;
44598 -       int idx = key->keyidx, err;
44599 +       int idx = key->keyidx, err = 0;
44600 +       u8 *wcid_keyidx = &wcid->hw_key_idx;
44602         /* The hardware does not support per-STA RX GTK, fallback
44603          * to software mode for these.
44604 @@ -352,6 +350,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
44605         /* fall back to sw encryption for unsupported ciphers */
44606         switch (key->cipher) {
44607         case WLAN_CIPHER_SUITE_AES_CMAC:
44608 +               wcid_keyidx = &wcid->hw_key_idx2;
44609                 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
44610                 break;
44611         case WLAN_CIPHER_SUITE_TKIP:
44612 @@ -369,12 +368,13 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
44614         mt7615_mutex_acquire(dev);
44616 -       if (cmd == SET_KEY) {
44617 -               key->hw_key_idx = wcid->idx;
44618 -               wcid->hw_key_idx = idx;
44619 -       } else if (idx == wcid->hw_key_idx) {
44620 -               wcid->hw_key_idx = -1;
44621 -       }
44622 +       if (cmd == SET_KEY)
44623 +               *wcid_keyidx = idx;
44624 +       else if (idx == *wcid_keyidx)
44625 +               *wcid_keyidx = -1;
44626 +       else
44627 +               goto out;
44629         mt76_wcid_key_setup(&dev->mt76, wcid,
44630                             cmd == SET_KEY ? key : NULL);
44632 @@ -383,6 +383,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
44633         else
44634                 err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
44636 +out:
44637         mt7615_mutex_release(dev);
44639         return err;
44640 @@ -544,6 +545,9 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
44641         if (changed & BSS_CHANGED_ARP_FILTER)
44642                 mt7615_mcu_update_arp_filter(hw, vif, info);
44644 +       if (changed & BSS_CHANGED_ASSOC)
44645 +               mt7615_mac_set_beacon_filter(phy, vif, info->assoc);
44647         mt7615_mutex_release(dev);
44650 @@ -803,26 +807,38 @@ mt7615_get_stats(struct ieee80211_hw *hw,
44651         struct mt7615_phy *phy = mt7615_hw_phy(hw);
44652         struct mib_stats *mib = &phy->mib;
44654 +       mt7615_mutex_acquire(phy->dev);
44656         stats->dot11RTSSuccessCount = mib->rts_cnt;
44657         stats->dot11RTSFailureCount = mib->rts_retries_cnt;
44658         stats->dot11FCSErrorCount = mib->fcs_err_cnt;
44659         stats->dot11ACKFailureCount = mib->ack_fail_cnt;
44661 +       memset(mib, 0, sizeof(*mib));
44663 +       mt7615_mutex_release(phy->dev);
44665         return 0;
44668  static u64
44669  mt7615_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
44671 +       struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
44672         struct mt7615_dev *dev = mt7615_hw_dev(hw);
44673         union {
44674                 u64 t64;
44675                 u32 t32[2];
44676         } tsf;
44677 +       u16 idx = mvif->mt76.omac_idx;
44678 +       u32 reg;
44680 +       idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
44681 +       reg = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
44683         mt7615_mutex_acquire(dev);
44685 -       mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
44686 +       mt76_set(dev, reg, MT_LPON_TCR_MODE); /* TSF read */
44687         tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0);
44688         tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1);
44690 @@ -835,18 +851,24 @@ static void
44691  mt7615_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
44692                u64 timestamp)
44694 +       struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
44695         struct mt7615_dev *dev = mt7615_hw_dev(hw);
44696         union {
44697                 u64 t64;
44698                 u32 t32[2];
44699         } tsf = { .t64 = timestamp, };
44700 +       u16 idx = mvif->mt76.omac_idx;
44701 +       u32 reg;
44703 +       idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
44704 +       reg = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
44706         mt7615_mutex_acquire(dev);
44708         mt76_wr(dev, MT_LPON_UTTR0, tsf.t32[0]);
44709         mt76_wr(dev, MT_LPON_UTTR1, tsf.t32[1]);
44710         /* TSF software overwrite */
44711 -       mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_WRITE);
44712 +       mt76_set(dev, reg, MT_LPON_TCR_WRITE);
44714         mt7615_mutex_release(dev);
44716 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
44717 index 631596fc2f36..198e9025b681 100644
44718 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
44719 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
44720 @@ -291,12 +291,20 @@ static int mt7615_mcu_drv_pmctrl(struct mt7615_dev *dev)
44721         u32 addr;
44722         int err;
44724 -       addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST;
44725 +       if (is_mt7663(mdev)) {
44726 +               /* Clear firmware own via N9 eint */
44727 +               mt76_wr(dev, MT_PCIE_DOORBELL_PUSH, MT_CFG_LPCR_HOST_DRV_OWN);
44728 +               mt76_poll(dev, MT_CONN_ON_MISC, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000);
44730 +               addr = MT_CONN_HIF_ON_LPCTL;
44731 +       } else {
44732 +               addr = MT_CFG_LPCR_HOST;
44733 +       }
44735         mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
44737         mt7622_trigger_hif_int(dev, true);
44739 -       addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
44740         err = !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000);
44742         mt7622_trigger_hif_int(dev, false);
44743 @@ -1040,6 +1048,9 @@ mt7615_mcu_sta_ba(struct mt7615_dev *dev,
44745         wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
44746                                                   WTBL_SET, sta_wtbl, &skb);
44747 +       if (IS_ERR(wtbl_hdr))
44748 +               return PTR_ERR(wtbl_hdr);
44750         mt76_connac_mcu_wtbl_ba_tlv(&dev->mt76, skb, params, enable, tx,
44751                                     sta_wtbl, wtbl_hdr);
44753 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
44754 index 491841bc6291..4bc0c379c579 100644
44755 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
44756 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
44757 @@ -133,11 +133,11 @@ struct mt7615_vif {
44758  };
44760  struct mib_stats {
44761 -       u16 ack_fail_cnt;
44762 -       u16 fcs_err_cnt;
44763 -       u16 rts_cnt;
44764 -       u16 rts_retries_cnt;
44765 -       u16 ba_miss_cnt;
44766 +       u32 ack_fail_cnt;
44767 +       u32 fcs_err_cnt;
44768 +       u32 rts_cnt;
44769 +       u32 rts_retries_cnt;
44770 +       u32 ba_miss_cnt;
44771         unsigned long aggr_per;
44772  };
44774 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
44775 index 72395925ddee..15b417d6d889 100644
44776 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
44777 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
44778 @@ -163,10 +163,9 @@ void mt7615_unregister_device(struct mt7615_dev *dev)
44779         mt76_unregister_device(&dev->mt76);
44780         if (mcu_running)
44781                 mt7615_mcu_exit(dev);
44782 -       mt7615_dma_cleanup(dev);
44784         mt7615_tx_token_put(dev);
44786 +       mt7615_dma_cleanup(dev);
44787         tasklet_disable(&dev->irq_tasklet);
44789         mt76_free_device(&dev->mt76);
44790 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
44791 index 6e5db015b32c..6e4710d3ddd3 100644
44792 --- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
44793 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
44794 @@ -447,9 +447,10 @@ enum mt7615_reg_base {
44796  #define MT_LPON(_n)                    ((dev)->reg_map[MT_LPON_BASE] + (_n))
44798 -#define MT_LPON_T0CR                   MT_LPON(0x010)
44799 -#define MT_LPON_T0CR_MODE              GENMASK(1, 0)
44800 -#define MT_LPON_T0CR_WRITE             BIT(0)
44801 +#define MT_LPON_TCR0(_n)               MT_LPON(0x010 + ((_n) * 4))
44802 +#define MT_LPON_TCR2(_n)               MT_LPON(0x0f8 + ((_n) - 2) * 4)
44803 +#define MT_LPON_TCR_MODE               GENMASK(1, 0)
44804 +#define MT_LPON_TCR_WRITE              BIT(0)
44806  #define MT_LPON_UTTR0                  MT_LPON(0x018)
44807  #define MT_LPON_UTTR1                  MT_LPON(0x01c)
44808 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
44809 index 9fb506f2ace6..4393dd21ebbb 100644
44810 --- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
44811 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
44812 @@ -218,12 +218,15 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
44813         int qid, err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
44814         bool mcu = q == dev->q_mcu[MT_MCUQ_WM];
44815         struct mt76_sdio *sdio = &dev->sdio;
44816 +       u8 pad;
44818         qid = mcu ? ARRAY_SIZE(sdio->xmit_buf) - 1 : q->qid;
44819         while (q->first != q->head) {
44820                 struct mt76_queue_entry *e = &q->entry[q->first];
44821                 struct sk_buff *iter;
44823 +               smp_rmb();
44825                 if (!test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) {
44826                         __skb_put_zero(e->skb, 4);
44827                         err = __mt7663s_xmit_queue(dev, e->skb->data,
44828 @@ -234,7 +237,8 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
44829                         goto next;
44830                 }
44832 -               if (len + e->skb->len + 4 > MT76S_XMIT_BUF_SZ)
44833 +               pad = roundup(e->skb->len, 4) - e->skb->len;
44834 +               if (len + e->skb->len + pad + 4 > MT76S_XMIT_BUF_SZ)
44835                         break;
44837                 if (mt7663s_tx_pick_quota(sdio, mcu, e->buf_sz, &pse_sz,
44838 @@ -252,6 +256,11 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
44839                         len += iter->len;
44840                         nframes++;
44841                 }
44843 +               if (unlikely(pad)) {
44844 +                       memset(sdio->xmit_buf[qid] + len, 0, pad);
44845 +                       len += pad;
44846 +               }
44847  next:
44848                 q->first = (q->first + 1) % q->ndesc;
44849                 e->done = true;
44850 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
44851 index 203256862dfd..f8d3673c2cae 100644
44852 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
44853 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
44854 @@ -67,6 +67,7 @@ static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev,
44855         struct mt7615_rate_desc *rate = &wrd->rate;
44856         struct mt7615_sta *sta = wrd->sta;
44857         u32 w5, w27, addr, val;
44858 +       u16 idx;
44860         lockdep_assert_held(&dev->mt76.mutex);
44862 @@ -118,7 +119,11 @@ static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev,
44864         sta->rate_probe = sta->rateset[rate->rateset].probe_rate.idx != -1;
44866 -       mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
44867 +       idx = sta->vif->mt76.omac_idx;
44868 +       idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
44869 +       addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
44871 +       mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */
44872         val = mt76_rr(dev, MT_LPON_UTTR0);
44873         sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset;
44875 diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
44876 index 6cbccfb05f8b..cefd33b74a87 100644
44877 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
44878 +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
44879 @@ -833,6 +833,9 @@ int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
44880         wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, wcid,
44881                                                   WTBL_RESET_AND_SET,
44882                                                   sta_wtbl, &skb);
44883 +       if (IS_ERR(wtbl_hdr))
44884 +               return PTR_ERR(wtbl_hdr);
44886         if (enable) {
44887                 mt76_connac_mcu_wtbl_generic_tlv(dev, skb, vif, sta, sta_wtbl,
44888                                                  wtbl_hdr);
44889 @@ -946,6 +949,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
44891         switch (vif->type) {
44892         case NL80211_IFTYPE_MESH_POINT:
44893 +       case NL80211_IFTYPE_MONITOR:
44894         case NL80211_IFTYPE_AP:
44895                 basic_req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
44896                 break;
44897 @@ -1195,6 +1199,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
44898                         .center_chan = ieee80211_frequency_to_channel(freq1),
44899                         .center_chan2 = ieee80211_frequency_to_channel(freq2),
44900                         .tx_streams = hweight8(phy->antenna_mask),
44901 +                       .ht_op_info = 4, /* set HT 40M allowed */
44902                         .rx_streams = phy->chainmask,
44903                         .short_st = true,
44904                 },
44905 @@ -1287,6 +1292,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
44906         case NL80211_CHAN_WIDTH_20:
44907         default:
44908                 rlm_req.rlm.bw = CMD_CBW_20MHZ;
44909 +               rlm_req.rlm.ht_op_info = 0;
44910                 break;
44911         }
44913 @@ -1306,7 +1312,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
44915         struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
44916         struct cfg80211_scan_request *sreq = &scan_req->req;
44917 -       int n_ssids = 0, err, i, duration = MT76_CONNAC_SCAN_CHANNEL_TIME;
44918 +       int n_ssids = 0, err, i, duration;
44919         int ext_channels_num = max_t(int, sreq->n_channels - 32, 0);
44920         struct ieee80211_channel **scan_list = sreq->channels;
44921         struct mt76_dev *mdev = phy->dev;
44922 @@ -1343,6 +1349,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
44923         req->ssid_type_ext = n_ssids ? BIT(0) : 0;
44924         req->ssids_num = n_ssids;
44926 +       duration = is_mt7921(phy->dev) ? 0 : MT76_CONNAC_SCAN_CHANNEL_TIME;
44927         /* increase channel time for passive scan */
44928         if (!sreq->n_ssids)
44929                 duration *= 2;
44930 diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
44931 index ab671e21f882..02db5d66735d 100644
44932 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
44933 +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
44934 @@ -447,6 +447,10 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
44935             !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
44936                 return -EOPNOTSUPP;
44938 +       /* MT76x0 GTK offloading does not work with more than one VIF */
44939 +       if (is_mt76x0(dev) && !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
44940 +               return -EOPNOTSUPP;
44942         msta = sta ? (struct mt76x02_sta *)sta->drv_priv : NULL;
44943         wcid = msta ? &msta->wcid : &mvif->group_wcid;
44945 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
44946 index 77dcd71e49a5..2f706620686e 100644
44947 --- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
44948 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
44949 @@ -124,7 +124,7 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
44950                 range[i] = mt76_rr(dev, MT_MIB_ARNG(ext_phy, i));
44952         for (i = 0; i < ARRAY_SIZE(bound); i++)
44953 -               bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i) + 1;
44954 +               bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1;
44956         seq_printf(file, "\nPhy %d\n", ext_phy);
44958 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
44959 index 660398ac53c2..738ecf8f4fa2 100644
44960 --- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
44961 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
44962 @@ -124,7 +124,7 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
44963                                    struct ieee80211_channel *chan,
44964                                    u8 chain_idx)
44966 -       int index;
44967 +       int index, target_power;
44968         bool tssi_on;
44970         if (chain_idx > 3)
44971 @@ -133,15 +133,22 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
44972         tssi_on = mt7915_tssi_enabled(dev, chan->band);
44974         if (chan->band == NL80211_BAND_2GHZ) {
44975 -               index = MT_EE_TX0_POWER_2G + chain_idx * 3 + !tssi_on;
44976 +               index = MT_EE_TX0_POWER_2G + chain_idx * 3;
44977 +               target_power = mt7915_eeprom_read(dev, index);
44979 +               if (!tssi_on)
44980 +                       target_power += mt7915_eeprom_read(dev, index + 1);
44981         } else {
44982 -               int group = tssi_on ?
44983 -                           mt7915_get_channel_group(chan->hw_value) : 8;
44984 +               int group = mt7915_get_channel_group(chan->hw_value);
44986 +               index = MT_EE_TX0_POWER_5G + chain_idx * 12;
44987 +               target_power = mt7915_eeprom_read(dev, index + group);
44989 -               index = MT_EE_TX0_POWER_5G + chain_idx * 12 + group;
44990 +               if (!tssi_on)
44991 +                       target_power += mt7915_eeprom_read(dev, index + 8);
44992         }
44994 -       return mt7915_eeprom_read(dev, index);
44995 +       return target_power;
44998  static const u8 sku_cck_delta_map[] = {
44999 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
45000 index ad4e5b95158b..c7d4268d860a 100644
45001 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
45002 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
45003 @@ -4,6 +4,7 @@
45004  #include <linux/etherdevice.h>
45005  #include "mt7915.h"
45006  #include "mac.h"
45007 +#include "mcu.h"
45008  #include "eeprom.h"
45010  #define CCK_RATE(_idx, _rate) {                                                \
45011 @@ -283,9 +284,50 @@ static void mt7915_init_work(struct work_struct *work)
45012         mt7915_register_ext_phy(dev);
45015 +static void mt7915_wfsys_reset(struct mt7915_dev *dev)
45017 +       u32 val = MT_TOP_PWR_KEY | MT_TOP_PWR_SW_PWR_ON | MT_TOP_PWR_PWR_ON;
45018 +       u32 reg = mt7915_reg_map_l1(dev, MT_TOP_MISC);
45020 +#define MT_MCU_DUMMY_RANDOM    GENMASK(15, 0)
45021 +#define MT_MCU_DUMMY_DEFAULT   GENMASK(31, 16)
45023 +       mt76_wr(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_RANDOM);
45025 +       /* change to software control */
45026 +       val |= MT_TOP_PWR_SW_RST;
45027 +       mt76_wr(dev, MT_TOP_PWR_CTRL, val);
45029 +       /* reset wfsys */
45030 +       val &= ~MT_TOP_PWR_SW_RST;
45031 +       mt76_wr(dev, MT_TOP_PWR_CTRL, val);
45033 +       /* release wfsys then mcu re-excutes romcode */
45034 +       val |= MT_TOP_PWR_SW_RST;
45035 +       mt76_wr(dev, MT_TOP_PWR_CTRL, val);
45037 +       /* switch to hw control */
45038 +       val &= ~MT_TOP_PWR_SW_RST;
45039 +       val |= MT_TOP_PWR_HW_CTRL;
45040 +       mt76_wr(dev, MT_TOP_PWR_CTRL, val);
45042 +       /* check whether mcu resets to default */
45043 +       if (!mt76_poll_msec(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_DEFAULT,
45044 +                           MT_MCU_DUMMY_DEFAULT, 1000)) {
45045 +               dev_err(dev->mt76.dev, "wifi subsystem reset failure\n");
45046 +               return;
45047 +       }
45049 +       /* wfsys reset won't clear host registers */
45050 +       mt76_clear(dev, reg, MT_TOP_MISC_FW_STATE);
45052 +       msleep(100);
45055  static int mt7915_init_hardware(struct mt7915_dev *dev)
45057         int ret, idx;
45058 +       u32 val;
45060         mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
45062 @@ -295,6 +337,12 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
45064         dev->dbdc_support = !!(mt7915_l1_rr(dev, MT_HW_BOUND) & BIT(5));
45066 +       val = mt76_rr(dev, mt7915_reg_map_l1(dev, MT_TOP_MISC));
45068 +       /* If MCU was already running, it is likely in a bad state */
45069 +       if (FIELD_GET(MT_TOP_MISC_FW_STATE, val) > FW_STATE_FW_DOWNLOAD)
45070 +               mt7915_wfsys_reset(dev);
45072         ret = mt7915_dma_init(dev);
45073         if (ret)
45074                 return ret;
45075 @@ -308,8 +356,14 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
45076         mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
45078         ret = mt7915_mcu_init(dev);
45079 -       if (ret)
45080 -               return ret;
45081 +       if (ret) {
45082 +               /* Reset and try again */
45083 +               mt7915_wfsys_reset(dev);
45085 +               ret = mt7915_mcu_init(dev);
45086 +               if (ret)
45087 +                       return ret;
45088 +       }
45090         ret = mt7915_eeprom_init(dev);
45091         if (ret < 0)
45092 @@ -675,9 +729,8 @@ void mt7915_unregister_device(struct mt7915_dev *dev)
45093         mt7915_unregister_ext_phy(dev);
45094         mt76_unregister_device(&dev->mt76);
45095         mt7915_mcu_exit(dev);
45096 -       mt7915_dma_cleanup(dev);
45098         mt7915_tx_token_put(dev);
45099 +       mt7915_dma_cleanup(dev);
45101         mt76_free_device(&dev->mt76);
45103 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
45104 index e5a258958ac9..819670767521 100644
45105 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
45106 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
45107 @@ -1091,7 +1091,7 @@ void mt7915_txp_skb_unmap(struct mt76_dev *dev,
45108         int i;
45110         txp = mt7915_txwi_to_txp(dev, t);
45111 -       for (i = 1; i < txp->nbuf; i++)
45112 +       for (i = 0; i < txp->nbuf; i++)
45113                 dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
45114                                  le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
45116 @@ -1470,9 +1470,8 @@ mt7915_update_beacons(struct mt7915_dev *dev)
45119  static void
45120 -mt7915_dma_reset(struct mt7915_phy *phy)
45121 +mt7915_dma_reset(struct mt7915_dev *dev)
45123 -       struct mt7915_dev *dev = phy->dev;
45124         struct mt76_phy *mphy_ext = dev->mt76.phy2;
45125         u32 hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE;
45126         int i;
45127 @@ -1489,18 +1488,20 @@ mt7915_dma_reset(struct mt7915_phy *phy)
45128                            (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
45129                             MT_WFDMA1_GLO_CFG_RX_DMA_EN));
45130         }
45132         usleep_range(1000, 2000);
45134 -       mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], true);
45135         for (i = 0; i < __MT_TXQ_MAX; i++) {
45136 -               mt76_queue_tx_cleanup(dev, phy->mt76->q_tx[i], true);
45137 +               mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
45138                 if (mphy_ext)
45139                         mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true);
45140         }
45142 -       mt76_for_each_q_rx(&dev->mt76, i) {
45143 +       for (i = 0; i < __MT_MCUQ_MAX; i++)
45144 +               mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
45146 +       mt76_for_each_q_rx(&dev->mt76, i)
45147                 mt76_queue_rx_reset(dev, i);
45148 -       }
45150         /* re-init prefetch settings after reset */
45151         mt7915_dma_prefetch(dev);
45152 @@ -1584,7 +1585,7 @@ void mt7915_mac_reset_work(struct work_struct *work)
45153         idr_init(&dev->token);
45155         if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
45156 -               mt7915_dma_reset(&dev->phy);
45157 +               mt7915_dma_reset(dev);
45159                 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
45160                 mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
45161 @@ -1633,39 +1634,30 @@ mt7915_mac_update_mib_stats(struct mt7915_phy *phy)
45162         bool ext_phy = phy != &dev->phy;
45163         int i, aggr0, aggr1;
45165 -       memset(mib, 0, sizeof(*mib));
45167 -       mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
45168 -                                         MT_MIB_SDR3_FCS_ERR_MASK);
45169 +       mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
45170 +                                          MT_MIB_SDR3_FCS_ERR_MASK);
45172         aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
45173         for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
45174 -               u32 val, val2;
45175 +               u32 val;
45177                 val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
45179 -               val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
45180 -               if (val2 > mib->ack_fail_cnt)
45181 -                       mib->ack_fail_cnt = val2;
45183 -               val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
45184 -               if (val2 > mib->ba_miss_cnt)
45185 -                       mib->ba_miss_cnt = val2;
45186 +               mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
45187 +               mib->ack_fail_cnt +=
45188 +                       FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
45190                 val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
45191 -               val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
45192 -               if (val2 > mib->rts_retries_cnt) {
45193 -                       mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
45194 -                       mib->rts_retries_cnt = val2;
45195 -               }
45196 +               mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
45197 +               mib->rts_retries_cnt +=
45198 +                       FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
45200                 val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
45201 -               val2 = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
45203                 dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
45204                 dev->mt76.aggr_stats[aggr0++] += val >> 16;
45205 -               dev->mt76.aggr_stats[aggr1++] += val2 & 0xffff;
45206 -               dev->mt76.aggr_stats[aggr1++] += val2 >> 16;
45208 +               val = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
45209 +               dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
45210 +               dev->mt76.aggr_stats[aggr1++] += val >> 16;
45211         }
45214 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
45215 index d4969b2e1ffb..bf032d943f74 100644
45216 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
45217 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
45218 @@ -317,7 +317,9 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
45219         struct mt7915_sta *msta = sta ? (struct mt7915_sta *)sta->drv_priv :
45220                                   &mvif->sta;
45221         struct mt76_wcid *wcid = &msta->wcid;
45222 +       u8 *wcid_keyidx = &wcid->hw_key_idx;
45223         int idx = key->keyidx;
45224 +       int err = 0;
45226         /* The hardware does not support per-STA RX GTK, fallback
45227          * to software mode for these.
45228 @@ -332,6 +334,7 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
45229         /* fall back to sw encryption for unsupported ciphers */
45230         switch (key->cipher) {
45231         case WLAN_CIPHER_SUITE_AES_CMAC:
45232 +               wcid_keyidx = &wcid->hw_key_idx2;
45233                 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
45234                 break;
45235         case WLAN_CIPHER_SUITE_TKIP:
45236 @@ -347,16 +350,24 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
45237                 return -EOPNOTSUPP;
45238         }
45240 -       if (cmd == SET_KEY) {
45241 -               key->hw_key_idx = wcid->idx;
45242 -               wcid->hw_key_idx = idx;
45243 -       } else if (idx == wcid->hw_key_idx) {
45244 -               wcid->hw_key_idx = -1;
45245 -       }
45246 +       mutex_lock(&dev->mt76.mutex);
45248 +       if (cmd == SET_KEY)
45249 +               *wcid_keyidx = idx;
45250 +       else if (idx == *wcid_keyidx)
45251 +               *wcid_keyidx = -1;
45252 +       else
45253 +               goto out;
45255         mt76_wcid_key_setup(&dev->mt76, wcid,
45256                             cmd == SET_KEY ? key : NULL);
45258 -       return mt7915_mcu_add_key(dev, vif, msta, key, cmd);
45259 +       err = mt7915_mcu_add_key(dev, vif, msta, key, cmd);
45261 +out:
45262 +       mutex_unlock(&dev->mt76.mutex);
45264 +       return err;
45267  static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
45268 @@ -717,13 +728,19 @@ mt7915_get_stats(struct ieee80211_hw *hw,
45269                  struct ieee80211_low_level_stats *stats)
45271         struct mt7915_phy *phy = mt7915_hw_phy(hw);
45272 +       struct mt7915_dev *dev = mt7915_hw_dev(hw);
45273         struct mib_stats *mib = &phy->mib;
45275 +       mutex_lock(&dev->mt76.mutex);
45276         stats->dot11RTSSuccessCount = mib->rts_cnt;
45277         stats->dot11RTSFailureCount = mib->rts_retries_cnt;
45278         stats->dot11FCSErrorCount = mib->fcs_err_cnt;
45279         stats->dot11ACKFailureCount = mib->ack_fail_cnt;
45281 +       memset(mib, 0, sizeof(*mib));
45283 +       mutex_unlock(&dev->mt76.mutex);
45285         return 0;
45288 @@ -833,9 +850,12 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
45289         struct mt7915_phy *phy = mt7915_hw_phy(hw);
45290         struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
45291         struct mt7915_sta_stats *stats = &msta->stats;
45292 +       struct rate_info rxrate = {};
45294 -       if (mt7915_mcu_get_rx_rate(phy, vif, sta, &sinfo->rxrate) == 0)
45295 +       if (!mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
45296 +               sinfo->rxrate = rxrate;
45297                 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
45298 +       }
45300         if (!stats->tx_rate.legacy && !stats->tx_rate.flags)
45301                 return;
45302 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
45303 index 195929242b72..f069a5a03e14 100644
45304 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
45305 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
45306 @@ -351,54 +351,62 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
45307         dev->hw_pattern++;
45310 -static void
45311 +static int
45312  mt7915_mcu_tx_rate_parse(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra,
45313                          struct rate_info *rate, u16 r)
45315         struct ieee80211_supported_band *sband;
45316         u16 ru_idx = le16_to_cpu(ra->ru_idx);
45317 -       u16 flags = 0;
45318 +       bool cck = false;
45320         rate->mcs = FIELD_GET(MT_RA_RATE_MCS, r);
45321         rate->nss = FIELD_GET(MT_RA_RATE_NSS, r) + 1;
45323         switch (FIELD_GET(MT_RA_RATE_TX_MODE, r)) {
45324         case MT_PHY_TYPE_CCK:
45325 +               cck = true;
45326 +               fallthrough;
45327         case MT_PHY_TYPE_OFDM:
45328                 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
45329                         sband = &mphy->sband_5g.sband;
45330                 else
45331                         sband = &mphy->sband_2g.sband;
45333 +               rate->mcs = mt76_get_rate(mphy->dev, sband, rate->mcs, cck);
45334                 rate->legacy = sband->bitrates[rate->mcs].bitrate;
45335                 break;
45336         case MT_PHY_TYPE_HT:
45337         case MT_PHY_TYPE_HT_GF:
45338                 rate->mcs += (rate->nss - 1) * 8;
45339 -               flags |= RATE_INFO_FLAGS_MCS;
45340 +               if (rate->mcs > 31)
45341 +                       return -EINVAL;
45343 +               rate->flags = RATE_INFO_FLAGS_MCS;
45344                 if (ra->gi)
45345 -                       flags |= RATE_INFO_FLAGS_SHORT_GI;
45346 +                       rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
45347                 break;
45348         case MT_PHY_TYPE_VHT:
45349 -               flags |= RATE_INFO_FLAGS_VHT_MCS;
45350 +               if (rate->mcs > 9)
45351 +                       return -EINVAL;
45353 +               rate->flags = RATE_INFO_FLAGS_VHT_MCS;
45354                 if (ra->gi)
45355 -                       flags |= RATE_INFO_FLAGS_SHORT_GI;
45356 +                       rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
45357                 break;
45358         case MT_PHY_TYPE_HE_SU:
45359         case MT_PHY_TYPE_HE_EXT_SU:
45360         case MT_PHY_TYPE_HE_TB:
45361         case MT_PHY_TYPE_HE_MU:
45362 +               if (ra->gi > NL80211_RATE_INFO_HE_GI_3_2 || rate->mcs > 11)
45363 +                       return -EINVAL;
45365                 rate->he_gi = ra->gi;
45366                 rate->he_dcm = FIELD_GET(MT_RA_RATE_DCM_EN, r);
45368 -               flags |= RATE_INFO_FLAGS_HE_MCS;
45369 +               rate->flags = RATE_INFO_FLAGS_HE_MCS;
45370                 break;
45371         default:
45372 -               break;
45373 +               return -EINVAL;
45374         }
45375 -       rate->flags = flags;
45377         if (ru_idx) {
45378                 switch (ru_idx) {
45379 @@ -435,6 +443,8 @@ mt7915_mcu_tx_rate_parse(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra,
45380                         break;
45381                 }
45382         }
45384 +       return 0;
45387  static void
45388 @@ -465,12 +475,12 @@ mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb)
45389                 mphy = dev->mt76.phy2;
45391         /* current rate */
45392 -       mt7915_mcu_tx_rate_parse(mphy, ra, &rate, curr);
45393 -       stats->tx_rate = rate;
45394 +       if (!mt7915_mcu_tx_rate_parse(mphy, ra, &rate, curr))
45395 +               stats->tx_rate = rate;
45397         /* probing rate */
45398 -       mt7915_mcu_tx_rate_parse(mphy, ra, &prob_rate, probe);
45399 -       stats->prob_rate = prob_rate;
45400 +       if (!mt7915_mcu_tx_rate_parse(mphy, ra, &prob_rate, probe))
45401 +               stats->prob_rate = prob_rate;
45403         if (attempts) {
45404                 u16 success = le16_to_cpu(ra->success);
45405 @@ -1188,6 +1198,9 @@ mt7915_mcu_sta_ba(struct mt7915_dev *dev,
45407         wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
45408                                              &skb);
45409 +       if (IS_ERR(wtbl_hdr))
45410 +               return PTR_ERR(wtbl_hdr);
45412         mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr);
45414         ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
45415 @@ -1704,6 +1717,9 @@ int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev,
45416                 return -ENOMEM;
45418         wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, NULL, &skb);
45419 +       if (IS_ERR(wtbl_hdr))
45420 +               return PTR_ERR(wtbl_hdr);
45422         mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, NULL, wtbl_hdr);
45424         return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD(WTBL_UPDATE),
45425 @@ -1728,6 +1744,9 @@ int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
45427         wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
45428                                              &skb);
45429 +       if (IS_ERR(wtbl_hdr))
45430 +               return PTR_ERR(wtbl_hdr);
45432         mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr);
45434         return mt76_mcu_skb_send_msg(&dev->mt76, skb,
45435 @@ -2253,6 +2272,9 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
45437         wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET,
45438                                              sta_wtbl, &skb);
45439 +       if (IS_ERR(wtbl_hdr))
45440 +               return PTR_ERR(wtbl_hdr);
45442         if (enable) {
45443                 mt7915_mcu_wtbl_generic_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr);
45444                 mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr);
45445 @@ -2742,21 +2764,8 @@ static int mt7915_load_ram(struct mt7915_dev *dev)
45447  static int mt7915_load_firmware(struct mt7915_dev *dev)
45449 +       u32 reg = mt7915_reg_map_l1(dev, MT_TOP_MISC);
45450         int ret;
45451 -       u32 val, reg = mt7915_reg_map_l1(dev, MT_TOP_MISC);
45453 -       val = FIELD_PREP(MT_TOP_MISC_FW_STATE, FW_STATE_FW_DOWNLOAD);
45455 -       if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE, val, 1000)) {
45456 -               /* restart firmware once */
45457 -               __mt76_mcu_restart(&dev->mt76);
45458 -               if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE,
45459 -                                   val, 1000)) {
45460 -                       dev_err(dev->mt76.dev,
45461 -                               "Firmware is not ready for download\n");
45462 -                       return -EIO;
45463 -               }
45464 -       }
45466         ret = mt7915_load_patch(dev);
45467         if (ret)
45468 @@ -3501,9 +3510,8 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
45469         struct ieee80211_supported_band *sband;
45470         struct mt7915_mcu_phy_rx_info *res;
45471         struct sk_buff *skb;
45472 -       u16 flags = 0;
45473         int ret;
45474 -       int i;
45475 +       bool cck = false;
45477         ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(PHY_STAT_INFO),
45478                                         &req, sizeof(req), true, &skb);
45479 @@ -3517,48 +3525,53 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
45481         switch (res->mode) {
45482         case MT_PHY_TYPE_CCK:
45483 +               cck = true;
45484 +               fallthrough;
45485         case MT_PHY_TYPE_OFDM:
45486                 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
45487                         sband = &mphy->sband_5g.sband;
45488                 else
45489                         sband = &mphy->sband_2g.sband;
45491 -               for (i = 0; i < sband->n_bitrates; i++) {
45492 -                       if (rate->mcs != (sband->bitrates[i].hw_value & 0xf))
45493 -                               continue;
45495 -                       rate->legacy = sband->bitrates[i].bitrate;
45496 -                       break;
45497 -               }
45498 +               rate->mcs = mt76_get_rate(&dev->mt76, sband, rate->mcs, cck);
45499 +               rate->legacy = sband->bitrates[rate->mcs].bitrate;
45500                 break;
45501         case MT_PHY_TYPE_HT:
45502         case MT_PHY_TYPE_HT_GF:
45503 -               if (rate->mcs > 31)
45504 -                       return -EINVAL;
45506 -               flags |= RATE_INFO_FLAGS_MCS;
45507 +               if (rate->mcs > 31) {
45508 +                       ret = -EINVAL;
45509 +                       goto out;
45510 +               }
45512 +               rate->flags = RATE_INFO_FLAGS_MCS;
45513                 if (res->gi)
45514 -                       flags |= RATE_INFO_FLAGS_SHORT_GI;
45515 +                       rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
45516                 break;
45517         case MT_PHY_TYPE_VHT:
45518 -               flags |= RATE_INFO_FLAGS_VHT_MCS;
45519 +               if (rate->mcs > 9) {
45520 +                       ret = -EINVAL;
45521 +                       goto out;
45522 +               }
45524 +               rate->flags = RATE_INFO_FLAGS_VHT_MCS;
45525                 if (res->gi)
45526 -                       flags |= RATE_INFO_FLAGS_SHORT_GI;
45527 +                       rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
45528                 break;
45529         case MT_PHY_TYPE_HE_SU:
45530         case MT_PHY_TYPE_HE_EXT_SU:
45531         case MT_PHY_TYPE_HE_TB:
45532         case MT_PHY_TYPE_HE_MU:
45533 +               if (res->gi > NL80211_RATE_INFO_HE_GI_3_2 || rate->mcs > 11) {
45534 +                       ret = -EINVAL;
45535 +                       goto out;
45536 +               }
45537                 rate->he_gi = res->gi;
45539 -               flags |= RATE_INFO_FLAGS_HE_MCS;
45540 +               rate->flags = RATE_INFO_FLAGS_HE_MCS;
45541                 break;
45542         default:
45543 -               break;
45544 +               ret = -EINVAL;
45545 +               goto out;
45546         }
45547 -       rate->flags = flags;
45549         switch (res->bw) {
45550         case IEEE80211_STA_RX_BW_160:
45551 @@ -3575,7 +3588,8 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
45552                 break;
45553         }
45555 +out:
45556         dev_kfree_skb(skb);
45558 -       return 0;
45559 +       return ret;
45561 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
45562 index 5c7eefdf2013..1160d1bf8a7c 100644
45563 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
45564 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
45565 @@ -108,11 +108,11 @@ struct mt7915_vif {
45566  };
45568  struct mib_stats {
45569 -       u16 ack_fail_cnt;
45570 -       u16 fcs_err_cnt;
45571 -       u16 rts_cnt;
45572 -       u16 rts_retries_cnt;
45573 -       u16 ba_miss_cnt;
45574 +       u32 ack_fail_cnt;
45575 +       u32 fcs_err_cnt;
45576 +       u32 rts_cnt;
45577 +       u32 rts_retries_cnt;
45578 +       u32 ba_miss_cnt;
45579  };
45581  struct mt7915_hif {
45582 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
45583 index ed0c9a24bb53..dfb8880657bf 100644
45584 --- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
45585 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
45586 @@ -4,6 +4,11 @@
45587  #ifndef __MT7915_REGS_H
45588  #define __MT7915_REGS_H
45590 +/* MCU WFDMA0 */
45591 +#define MT_MCU_WFDMA0_BASE             0x2000
45592 +#define MT_MCU_WFDMA0(ofs)             (MT_MCU_WFDMA0_BASE + (ofs))
45593 +#define MT_MCU_WFDMA0_DUMMY_CR         MT_MCU_WFDMA0(0x120)
45595  /* MCU WFDMA1 */
45596  #define MT_MCU_WFDMA1_BASE             0x3000
45597  #define MT_MCU_WFDMA1(ofs)             (MT_MCU_WFDMA1_BASE + (ofs))
45598 @@ -396,6 +401,14 @@
45599  #define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1      BIT(1)
45600  #define MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO       BIT(2)
45602 +#define MT_TOP_RGU_BASE                                0xf0000
45603 +#define MT_TOP_PWR_CTRL                                (MT_TOP_RGU_BASE + (0x0))
45604 +#define MT_TOP_PWR_KEY                         (0x5746 << 16)
45605 +#define MT_TOP_PWR_SW_RST                      BIT(0)
45606 +#define MT_TOP_PWR_SW_PWR_ON                   GENMASK(3, 2)
45607 +#define MT_TOP_PWR_HW_CTRL                     BIT(4)
45608 +#define MT_TOP_PWR_PWR_ON                      BIT(7)
45610  #define MT_INFRA_CFG_BASE              0xf1000
45611  #define MT_INFRA(ofs)                  (MT_INFRA_CFG_BASE + (ofs))
45613 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
45614 index 0dc8e25e18e4..87a7ea12f3b3 100644
45615 --- a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
45616 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
45617 @@ -9,10 +9,13 @@ mt7921_fw_debug_set(void *data, u64 val)
45619         struct mt7921_dev *dev = data;
45621 -       dev->fw_debug = (u8)val;
45622 +       mt7921_mutex_acquire(dev);
45624 +       dev->fw_debug = (u8)val;
45625         mt7921_mcu_fw_log_2_host(dev, dev->fw_debug);
45627 +       mt7921_mutex_release(dev);
45629         return 0;
45632 @@ -44,14 +47,13 @@ mt7921_ampdu_stat_read_phy(struct mt7921_phy *phy,
45633                 range[i] = mt76_rr(dev, MT_MIB_ARNG(0, i));
45635         for (i = 0; i < ARRAY_SIZE(bound); i++)
45636 -               bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i) + 1;
45637 +               bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1;
45639         seq_printf(file, "\nPhy0\n");
45641         seq_printf(file, "Length: %8d | ", bound[0]);
45642         for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
45643 -               seq_printf(file, "%3d -%3d | ",
45644 -                          bound[i] + 1, bound[i + 1]);
45645 +               seq_printf(file, "%3d  %3d | ", bound[i] + 1, bound[i + 1]);
45647         seq_puts(file, "\nCount:  ");
45648         for (i = 0; i < ARRAY_SIZE(bound); i++)
45649 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
45650 index 3f9097481a5e..a6d2a25b3495 100644
45651 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
45652 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
45653 @@ -400,7 +400,9 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
45655         /* RXD Group 3 - P-RXV */
45656         if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
45657 -               u32 v0, v1, v2;
45658 +               u8 stbc, gi;
45659 +               u32 v0, v1;
45660 +               bool cck;
45662                 rxv = rxd;
45663                 rxd += 2;
45664 @@ -409,7 +411,6 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
45666                 v0 = le32_to_cpu(rxv[0]);
45667                 v1 = le32_to_cpu(rxv[1]);
45668 -               v2 = le32_to_cpu(rxv[2]);
45670                 if (v0 & MT_PRXV_HT_AD_CODE)
45671                         status->enc_flags |= RX_ENC_FLAG_LDPC;
45672 @@ -429,87 +430,87 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
45673                                              status->chain_signal[i]);
45674                 }
45676 -               /* RXD Group 5 - C-RXV */
45677 -               if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
45678 -                       u8 stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
45679 -                       u8 gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
45680 -                       bool cck = false;
45681 +               stbc = FIELD_GET(MT_PRXV_STBC, v0);
45682 +               gi = FIELD_GET(MT_PRXV_SGI, v0);
45683 +               cck = false;
45685 -                       rxd += 18;
45686 -                       if ((u8 *)rxd - skb->data >= skb->len)
45687 -                               return -EINVAL;
45688 +               idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
45689 +               mode = FIELD_GET(MT_PRXV_TX_MODE, v0);
45691 -                       idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
45692 -                       mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
45694 -                       switch (mode) {
45695 -                       case MT_PHY_TYPE_CCK:
45696 -                               cck = true;
45697 -                               fallthrough;
45698 -                       case MT_PHY_TYPE_OFDM:
45699 -                               i = mt76_get_rate(&dev->mt76, sband, i, cck);
45700 -                               break;
45701 -                       case MT_PHY_TYPE_HT_GF:
45702 -                       case MT_PHY_TYPE_HT:
45703 -                               status->encoding = RX_ENC_HT;
45704 -                               if (i > 31)
45705 -                                       return -EINVAL;
45706 -                               break;
45707 -                       case MT_PHY_TYPE_VHT:
45708 -                               status->nss =
45709 -                                       FIELD_GET(MT_PRXV_NSTS, v0) + 1;
45710 -                               status->encoding = RX_ENC_VHT;
45711 -                               if (i > 9)
45712 -                                       return -EINVAL;
45713 -                               break;
45714 -                       case MT_PHY_TYPE_HE_MU:
45715 -                               status->flag |= RX_FLAG_RADIOTAP_HE_MU;
45716 -                               fallthrough;
45717 -                       case MT_PHY_TYPE_HE_SU:
45718 -                       case MT_PHY_TYPE_HE_EXT_SU:
45719 -                       case MT_PHY_TYPE_HE_TB:
45720 -                               status->nss =
45721 -                                       FIELD_GET(MT_PRXV_NSTS, v0) + 1;
45722 -                               status->encoding = RX_ENC_HE;
45723 -                               status->flag |= RX_FLAG_RADIOTAP_HE;
45724 -                               i &= GENMASK(3, 0);
45726 -                               if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
45727 -                                       status->he_gi = gi;
45729 -                               status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
45730 -                               break;
45731 -                       default:
45732 +               switch (mode) {
45733 +               case MT_PHY_TYPE_CCK:
45734 +                       cck = true;
45735 +                       fallthrough;
45736 +               case MT_PHY_TYPE_OFDM:
45737 +                       i = mt76_get_rate(&dev->mt76, sband, i, cck);
45738 +                       break;
45739 +               case MT_PHY_TYPE_HT_GF:
45740 +               case MT_PHY_TYPE_HT:
45741 +                       status->encoding = RX_ENC_HT;
45742 +                       if (i > 31)
45743                                 return -EINVAL;
45744 -                       }
45745 -                       status->rate_idx = i;
45747 -                       switch (FIELD_GET(MT_CRXV_FRAME_MODE, v2)) {
45748 -                       case IEEE80211_STA_RX_BW_20:
45749 -                               break;
45750 -                       case IEEE80211_STA_RX_BW_40:
45751 -                               if (mode & MT_PHY_TYPE_HE_EXT_SU &&
45752 -                                   (idx & MT_PRXV_TX_ER_SU_106T)) {
45753 -                                       status->bw = RATE_INFO_BW_HE_RU;
45754 -                                       status->he_ru =
45755 -                                               NL80211_RATE_INFO_HE_RU_ALLOC_106;
45756 -                               } else {
45757 -                                       status->bw = RATE_INFO_BW_40;
45758 -                               }
45759 -                               break;
45760 -                       case IEEE80211_STA_RX_BW_80:
45761 -                               status->bw = RATE_INFO_BW_80;
45762 -                               break;
45763 -                       case IEEE80211_STA_RX_BW_160:
45764 -                               status->bw = RATE_INFO_BW_160;
45765 -                               break;
45766 -                       default:
45767 +                       break;
45768 +               case MT_PHY_TYPE_VHT:
45769 +                       status->nss =
45770 +                               FIELD_GET(MT_PRXV_NSTS, v0) + 1;
45771 +                       status->encoding = RX_ENC_VHT;
45772 +                       if (i > 9)
45773                                 return -EINVAL;
45774 +                       break;
45775 +               case MT_PHY_TYPE_HE_MU:
45776 +                       status->flag |= RX_FLAG_RADIOTAP_HE_MU;
45777 +                       fallthrough;
45778 +               case MT_PHY_TYPE_HE_SU:
45779 +               case MT_PHY_TYPE_HE_EXT_SU:
45780 +               case MT_PHY_TYPE_HE_TB:
45781 +                       status->nss =
45782 +                               FIELD_GET(MT_PRXV_NSTS, v0) + 1;
45783 +                       status->encoding = RX_ENC_HE;
45784 +                       status->flag |= RX_FLAG_RADIOTAP_HE;
45785 +                       i &= GENMASK(3, 0);
45787 +                       if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
45788 +                               status->he_gi = gi;
45790 +                       status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
45791 +                       break;
45792 +               default:
45793 +                       return -EINVAL;
45794 +               }
45796 +               status->rate_idx = i;
45798 +               switch (FIELD_GET(MT_PRXV_FRAME_MODE, v0)) {
45799 +               case IEEE80211_STA_RX_BW_20:
45800 +                       break;
45801 +               case IEEE80211_STA_RX_BW_40:
45802 +                       if (mode & MT_PHY_TYPE_HE_EXT_SU &&
45803 +                           (idx & MT_PRXV_TX_ER_SU_106T)) {
45804 +                               status->bw = RATE_INFO_BW_HE_RU;
45805 +                               status->he_ru =
45806 +                                       NL80211_RATE_INFO_HE_RU_ALLOC_106;
45807 +                       } else {
45808 +                               status->bw = RATE_INFO_BW_40;
45809                         }
45810 +                       break;
45811 +               case IEEE80211_STA_RX_BW_80:
45812 +                       status->bw = RATE_INFO_BW_80;
45813 +                       break;
45814 +               case IEEE80211_STA_RX_BW_160:
45815 +                       status->bw = RATE_INFO_BW_160;
45816 +                       break;
45817 +               default:
45818 +                       return -EINVAL;
45819 +               }
45821 -                       status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
45822 -                       if (mode < MT_PHY_TYPE_HE_SU && gi)
45823 -                               status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
45824 +               status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
45825 +               if (mode < MT_PHY_TYPE_HE_SU && gi)
45826 +                       status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
45828 +               if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
45829 +                       rxd += 18;
45830 +                       if ((u8 *)rxd - skb->data >= skb->len)
45831 +                               return -EINVAL;
45832                 }
45833         }
45835 @@ -1317,31 +1318,20 @@ mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
45836         struct mib_stats *mib = &phy->mib;
45837         int i, aggr0 = 0, aggr1;
45839 -       memset(mib, 0, sizeof(*mib));
45841 -       mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(0),
45842 -                                         MT_MIB_SDR3_FCS_ERR_MASK);
45843 +       mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(0),
45844 +                                          MT_MIB_SDR3_FCS_ERR_MASK);
45845 +       mib->ack_fail_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR3(0),
45846 +                                           MT_MIB_ACK_FAIL_COUNT_MASK);
45847 +       mib->ba_miss_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR2(0),
45848 +                                          MT_MIB_BA_FAIL_COUNT_MASK);
45849 +       mib->rts_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR0(0),
45850 +                                      MT_MIB_RTS_COUNT_MASK);
45851 +       mib->rts_retries_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR1(0),
45852 +                                              MT_MIB_RTS_FAIL_COUNT_MASK);
45854         for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
45855                 u32 val, val2;
45857 -               val = mt76_rr(dev, MT_MIB_MB_SDR1(0, i));
45859 -               val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
45860 -               if (val2 > mib->ack_fail_cnt)
45861 -                       mib->ack_fail_cnt = val2;
45863 -               val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
45864 -               if (val2 > mib->ba_miss_cnt)
45865 -                       mib->ba_miss_cnt = val2;
45867 -               val = mt76_rr(dev, MT_MIB_MB_SDR0(0, i));
45868 -               val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
45869 -               if (val2 > mib->rts_retries_cnt) {
45870 -                       mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
45871 -                       mib->rts_retries_cnt = val2;
45872 -               }
45874                 val = mt76_rr(dev, MT_TX_AGG_CNT(0, i));
45875                 val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
45877 @@ -1503,8 +1493,10 @@ void mt7921_coredump_work(struct work_struct *work)
45878                         break;
45880                 skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
45881 -               if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ)
45882 -                       break;
45883 +               if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
45884 +                       dev_kfree_skb(skb);
45885 +                       continue;
45886 +               }
45888                 memcpy(data, skb->data, skb->len);
45889                 data += skb->len;
45890 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
45891 index a0c1fa0f20e4..109c8849d106 100644
45892 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
45893 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
45894 @@ -97,18 +97,24 @@ enum rx_pkt_type {
45895  #define MT_RXD3_NORMAL_PF_MODE         BIT(29)
45896  #define MT_RXD3_NORMAL_PF_STS          GENMASK(31, 30)
45898 -/* P-RXV */
45899 +/* P-RXV DW0 */
45900  #define MT_PRXV_TX_RATE                        GENMASK(6, 0)
45901  #define MT_PRXV_TX_DCM                 BIT(4)
45902  #define MT_PRXV_TX_ER_SU_106T          BIT(5)
45903  #define MT_PRXV_NSTS                   GENMASK(9, 7)
45904  #define MT_PRXV_HT_AD_CODE             BIT(11)
45905 +#define MT_PRXV_FRAME_MODE             GENMASK(14, 12)
45906 +#define MT_PRXV_SGI                    GENMASK(16, 15)
45907 +#define MT_PRXV_STBC                   GENMASK(23, 22)
45908 +#define MT_PRXV_TX_MODE                        GENMASK(27, 24)
45909  #define MT_PRXV_HE_RU_ALLOC_L          GENMASK(31, 28)
45910 -#define MT_PRXV_HE_RU_ALLOC_H          GENMASK(3, 0)
45912 +/* P-RXV DW1 */
45913  #define MT_PRXV_RCPI3                  GENMASK(31, 24)
45914  #define MT_PRXV_RCPI2                  GENMASK(23, 16)
45915  #define MT_PRXV_RCPI1                  GENMASK(15, 8)
45916  #define MT_PRXV_RCPI0                  GENMASK(7, 0)
45917 +#define MT_PRXV_HE_RU_ALLOC_H          GENMASK(3, 0)
45919  /* C-RXV */
45920  #define MT_CRXV_HT_STBC                        GENMASK(1, 0)
45921 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
45922 index 729f6c42cdde..ada943c7a950 100644
45923 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
45924 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
45925 @@ -348,6 +348,7 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw,
45926         if (vif == phy->monitor_vif)
45927                 phy->monitor_vif = NULL;
45929 +       mt7921_mutex_acquire(dev);
45930         mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
45932         if (dev->pm.enable) {
45933 @@ -360,7 +361,6 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw,
45935         rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
45937 -       mt7921_mutex_acquire(dev);
45938         dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx);
45939         phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
45940         mt7921_mutex_release(dev);
45941 @@ -413,7 +413,8 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
45942         struct mt7921_sta *msta = sta ? (struct mt7921_sta *)sta->drv_priv :
45943                                   &mvif->sta;
45944         struct mt76_wcid *wcid = &msta->wcid;
45945 -       int idx = key->keyidx;
45946 +       u8 *wcid_keyidx = &wcid->hw_key_idx;
45947 +       int idx = key->keyidx, err = 0;
45949         /* The hardware does not support per-STA RX GTK, fallback
45950          * to software mode for these.
45951 @@ -429,6 +430,7 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
45952         switch (key->cipher) {
45953         case WLAN_CIPHER_SUITE_AES_CMAC:
45954                 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
45955 +               wcid_keyidx = &wcid->hw_key_idx2;
45956                 break;
45957         case WLAN_CIPHER_SUITE_TKIP:
45958         case WLAN_CIPHER_SUITE_CCMP:
45959 @@ -443,16 +445,23 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
45960                 return -EOPNOTSUPP;
45961         }
45963 -       if (cmd == SET_KEY) {
45964 -               key->hw_key_idx = wcid->idx;
45965 -               wcid->hw_key_idx = idx;
45966 -       } else if (idx == wcid->hw_key_idx) {
45967 -               wcid->hw_key_idx = -1;
45968 -       }
45969 +       mt7921_mutex_acquire(dev);
45971 +       if (cmd == SET_KEY)
45972 +               *wcid_keyidx = idx;
45973 +       else if (idx == *wcid_keyidx)
45974 +               *wcid_keyidx = -1;
45975 +       else
45976 +               goto out;
45978         mt76_wcid_key_setup(&dev->mt76, wcid,
45979                             cmd == SET_KEY ? key : NULL);
45981 -       return mt7921_mcu_add_key(dev, vif, msta, key, cmd);
45982 +       err = mt7921_mcu_add_key(dev, vif, msta, key, cmd);
45983 +out:
45984 +       mt7921_mutex_release(dev);
45986 +       return err;
45989  static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
45990 @@ -587,6 +596,9 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
45991         if (changed & BSS_CHANGED_PS)
45992                 mt7921_mcu_uni_bss_ps(dev, vif);
45994 +       if (changed & BSS_CHANGED_ARP_FILTER)
45995 +               mt7921_mcu_update_arp_filter(hw, vif, info);
45997         mt7921_mutex_release(dev);
46000 @@ -814,11 +826,17 @@ mt7921_get_stats(struct ieee80211_hw *hw,
46001         struct mt7921_phy *phy = mt7921_hw_phy(hw);
46002         struct mib_stats *mib = &phy->mib;
46004 +       mt7921_mutex_acquire(phy->dev);
46006         stats->dot11RTSSuccessCount = mib->rts_cnt;
46007         stats->dot11RTSFailureCount = mib->rts_retries_cnt;
46008         stats->dot11FCSErrorCount = mib->fcs_err_cnt;
46009         stats->dot11ACKFailureCount = mib->ack_fail_cnt;
46011 +       memset(mib, 0, sizeof(*mib));
46013 +       mt7921_mutex_release(phy->dev);
46015         return 0;
46018 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
46019 index b5cc72e7e81c..62afbad77596 100644
46020 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
46021 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
46022 @@ -1304,3 +1304,47 @@ mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
46023                 mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
46024         }
46027 +int mt7921_mcu_update_arp_filter(struct ieee80211_hw *hw,
46028 +                                struct ieee80211_vif *vif,
46029 +                                struct ieee80211_bss_conf *info)
46031 +       struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
46032 +       struct mt7921_dev *dev = mt7921_hw_dev(hw);
46033 +       struct sk_buff *skb;
46034 +       int i, len = min_t(int, info->arp_addr_cnt,
46035 +                          IEEE80211_BSS_ARP_ADDR_LIST_LEN);
46036 +       struct {
46037 +               struct {
46038 +                       u8 bss_idx;
46039 +                       u8 pad[3];
46040 +               } __packed hdr;
46041 +               struct mt76_connac_arpns_tlv arp;
46042 +       } req_hdr = {
46043 +               .hdr = {
46044 +                       .bss_idx = mvif->mt76.idx,
46045 +               },
46046 +               .arp = {
46047 +                       .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
46048 +                       .len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)),
46049 +                       .ips_num = len,
46050 +                       .mode = 2,  /* update */
46051 +                       .option = 1,
46052 +               },
46053 +       };
46055 +       skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
46056 +                                sizeof(req_hdr) + len * sizeof(__be32));
46057 +       if (!skb)
46058 +               return -ENOMEM;
46060 +       skb_put_data(skb, &req_hdr, sizeof(req_hdr));
46061 +       for (i = 0; i < len; i++) {
46062 +               u8 *addr = (u8 *)skb_put(skb, sizeof(__be32));
46064 +               memcpy(addr, &info->arp_addr_list[i], sizeof(__be32));
46065 +       }
46067 +       return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_UNI_CMD_OFFLOAD,
46068 +                                    true);
46070 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
46071 index 46e6aeec35ae..25a1a6acb6ba 100644
46072 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
46073 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
46074 @@ -102,11 +102,11 @@ struct mt7921_vif {
46075  };
46077  struct mib_stats {
46078 -       u16 ack_fail_cnt;
46079 -       u16 fcs_err_cnt;
46080 -       u16 rts_cnt;
46081 -       u16 rts_retries_cnt;
46082 -       u16 ba_miss_cnt;
46083 +       u32 ack_fail_cnt;
46084 +       u32 fcs_err_cnt;
46085 +       u32 rts_cnt;
46086 +       u32 rts_retries_cnt;
46087 +       u32 ba_miss_cnt;
46088  };
46090  struct mt7921_phy {
46091 @@ -339,4 +339,7 @@ int mt7921_mac_set_beacon_filter(struct mt7921_phy *phy,
46092                                  bool enable);
46093  void mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif);
46094  void mt7921_coredump_work(struct work_struct *work);
46095 +int mt7921_mcu_update_arp_filter(struct ieee80211_hw *hw,
46096 +                                struct ieee80211_vif *vif,
46097 +                                struct ieee80211_bss_conf *info);
46098  #endif
46099 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
46100 index 5570b4a50531..80f6f29892a4 100644
46101 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
46102 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
46103 @@ -137,7 +137,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
46105         mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
46107 -       mt7921_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
46108 +       mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
46110         ret = devm_request_irq(mdev->dev, pdev->irq, mt7921_irq_handler,
46111                                IRQF_SHARED, KBUILD_MODNAME, dev);
46112 @@ -146,10 +146,12 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
46114         ret = mt7921_register_device(dev);
46115         if (ret)
46116 -               goto err_free_dev;
46117 +               goto err_free_irq;
46119         return 0;
46121 +err_free_irq:
46122 +       devm_free_irq(&pdev->dev, pdev->irq, dev);
46123  err_free_dev:
46124         mt76_free_device(&dev->mt76);
46125  err_free_pci_vec:
46126 @@ -193,7 +195,6 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
46127         mt76_for_each_q_rx(mdev, i) {
46128                 napi_disable(&mdev->napi[i]);
46129         }
46130 -       tasklet_kill(&dev->irq_tasklet);
46132         pci_enable_wake(pdev, pci_choose_state(pdev, state), true);
46134 @@ -208,13 +209,16 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
46136         /* disable interrupt */
46137         mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
46138 +       mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
46139 +       synchronize_irq(pdev->irq);
46140 +       tasklet_kill(&dev->irq_tasklet);
46142 -       pci_save_state(pdev);
46143 -       err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
46144 +       err = mt7921_mcu_fw_pmctrl(dev);
46145         if (err)
46146                 goto restore;
46148 -       err = mt7921_mcu_drv_pmctrl(dev);
46149 +       pci_save_state(pdev);
46150 +       err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
46151         if (err)
46152                 goto restore;
46154 @@ -237,18 +241,18 @@ static int mt7921_pci_resume(struct pci_dev *pdev)
46155         struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
46156         int i, err;
46158 -       err = mt7921_mcu_fw_pmctrl(dev);
46159 -       if (err < 0)
46160 -               return err;
46162         err = pci_set_power_state(pdev, PCI_D0);
46163         if (err)
46164                 return err;
46166         pci_restore_state(pdev);
46168 +       err = mt7921_mcu_drv_pmctrl(dev);
46169 +       if (err < 0)
46170 +               return err;
46172         /* enable interrupt */
46173 -       mt7921_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
46174 +       mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
46175         mt7921_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
46176                           MT_INT_MCU_CMD);
46178 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
46179 index 6dad7f6ab09d..73878d3e2495 100644
46180 --- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
46181 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
46182 @@ -96,8 +96,8 @@
46183  #define MT_WF_MIB_BASE(_band)          ((_band) ? 0xa4800 : 0x24800)
46184  #define MT_WF_MIB(_band, ofs)          (MT_WF_MIB_BASE(_band) + (ofs))
46186 -#define MT_MIB_SDR3(_band)             MT_WF_MIB(_band, 0x014)
46187 -#define MT_MIB_SDR3_FCS_ERR_MASK       GENMASK(15, 0)
46188 +#define MT_MIB_SDR3(_band)             MT_WF_MIB(_band, 0x698)
46189 +#define MT_MIB_SDR3_FCS_ERR_MASK       GENMASK(31, 16)
46191  #define MT_MIB_SDR9(_band)             MT_WF_MIB(_band, 0x02c)
46192  #define MT_MIB_SDR9_BUSY_MASK          GENMASK(23, 0)
46193 @@ -121,16 +121,21 @@
46194  #define MT_MIB_RTS_RETRIES_COUNT_MASK  GENMASK(31, 16)
46195  #define MT_MIB_RTS_COUNT_MASK          GENMASK(15, 0)
46197 -#define MT_MIB_MB_SDR1(_band, n)       MT_WF_MIB(_band, 0x104 + ((n) << 4))
46198 -#define MT_MIB_BA_MISS_COUNT_MASK      GENMASK(15, 0)
46199 -#define MT_MIB_ACK_FAIL_COUNT_MASK     GENMASK(31, 16)
46200 +#define MT_MIB_MB_BSDR0(_band)         MT_WF_MIB(_band, 0x688)
46201 +#define MT_MIB_RTS_COUNT_MASK          GENMASK(15, 0)
46202 +#define MT_MIB_MB_BSDR1(_band)         MT_WF_MIB(_band, 0x690)
46203 +#define MT_MIB_RTS_FAIL_COUNT_MASK     GENMASK(15, 0)
46204 +#define MT_MIB_MB_BSDR2(_band)         MT_WF_MIB(_band, 0x518)
46205 +#define MT_MIB_BA_FAIL_COUNT_MASK      GENMASK(15, 0)
46206 +#define MT_MIB_MB_BSDR3(_band)         MT_WF_MIB(_band, 0x520)
46207 +#define MT_MIB_ACK_FAIL_COUNT_MASK     GENMASK(15, 0)
46209  #define MT_MIB_MB_SDR2(_band, n)       MT_WF_MIB(_band, 0x108 + ((n) << 4))
46210  #define MT_MIB_FRAME_RETRIES_COUNT_MASK        GENMASK(15, 0)
46212 -#define MT_TX_AGG_CNT(_band, n)                MT_WF_MIB(_band, 0x0a8 + ((n) << 2))
46213 -#define MT_TX_AGG_CNT2(_band, n)       MT_WF_MIB(_band, 0x164 + ((n) << 2))
46214 -#define MT_MIB_ARNG(_band, n)          MT_WF_MIB(_band, 0x4b8 + ((n) << 2))
46215 +#define MT_TX_AGG_CNT(_band, n)                MT_WF_MIB(_band, 0x7dc + ((n) << 2))
46216 +#define MT_TX_AGG_CNT2(_band, n)       MT_WF_MIB(_band, 0x7ec + ((n) << 2))
46217 +#define MT_MIB_ARNG(_band, n)          MT_WF_MIB(_band, 0x0b0 + ((n) << 2))
46218  #define MT_MIB_ARNCR_RANGE(val, n)     (((val) >> ((n) << 3)) & GENMASK(7, 0))
46220  #define MT_WTBLON_TOP_BASE             0x34000
46221 @@ -357,11 +362,11 @@
46222  #define MT_INFRA_CFG_BASE              0xfe000
46223  #define MT_INFRA(ofs)                  (MT_INFRA_CFG_BASE + (ofs))
46225 -#define MT_HIF_REMAP_L1                        MT_INFRA(0x260)
46226 +#define MT_HIF_REMAP_L1                        MT_INFRA(0x24c)
46227  #define MT_HIF_REMAP_L1_MASK           GENMASK(15, 0)
46228  #define MT_HIF_REMAP_L1_OFFSET         GENMASK(15, 0)
46229  #define MT_HIF_REMAP_L1_BASE           GENMASK(31, 16)
46230 -#define MT_HIF_REMAP_BASE_L1           0xe0000
46231 +#define MT_HIF_REMAP_BASE_L1           0x40000
46233  #define MT_SWDEF_BASE                  0x41f200
46234  #define MT_SWDEF(ofs)                  (MT_SWDEF_BASE + (ofs))
46235 @@ -384,7 +389,7 @@
46236  #define MT_HW_CHIPID                   0x70010200
46237  #define MT_HW_REV                      0x70010204
46239 -#define MT_PCIE_MAC_BASE               0x74030000
46240 +#define MT_PCIE_MAC_BASE               0x10000
46241  #define MT_PCIE_MAC(ofs)               (MT_PCIE_MAC_BASE + (ofs))
46242  #define MT_PCIE_MAC_INT_ENABLE         MT_PCIE_MAC(0x188)
46244 diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
46245 index 0b6facb17ff7..a18d2896ee1f 100644
46246 --- a/drivers/net/wireless/mediatek/mt76/sdio.c
46247 +++ b/drivers/net/wireless/mediatek/mt76/sdio.c
46248 @@ -256,6 +256,9 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
46250         q->entry[q->head].skb = tx_info.skb;
46251         q->entry[q->head].buf_sz = len;
46253 +       smp_wmb();
46255         q->head = (q->head + 1) % q->ndesc;
46256         q->queued++;
46258 diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
46259 index b8fe8adc43a3..451ed60c6296 100644
46260 --- a/drivers/net/wireless/mediatek/mt76/tx.c
46261 +++ b/drivers/net/wireless/mediatek/mt76/tx.c
46262 @@ -461,11 +461,11 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
46263         int ret = 0;
46265         while (1) {
46266 +               int n_frames = 0;
46268                 if (test_bit(MT76_STATE_PM, &phy->state) ||
46269 -                   test_bit(MT76_RESET, &phy->state)) {
46270 -                       ret = -EBUSY;
46271 -                       break;
46272 -               }
46273 +                   test_bit(MT76_RESET, &phy->state))
46274 +                       return -EBUSY;
46276                 if (dev->queue_ops->tx_cleanup &&
46277                     q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
46278 @@ -497,11 +497,16 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
46279                 }
46281                 if (!mt76_txq_stopped(q))
46282 -                       ret += mt76_txq_send_burst(phy, q, mtxq);
46283 +                       n_frames = mt76_txq_send_burst(phy, q, mtxq);
46285                 spin_unlock_bh(&q->lock);
46287                 ieee80211_return_txq(phy->hw, txq, false);
46289 +               if (unlikely(n_frames < 0))
46290 +                       return n_frames;
46292 +               ret += n_frames;
46293         }
46295         return ret;
46296 diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
46297 index c868582c5d22..aa3b64902cf9 100644
46298 --- a/drivers/net/wireless/mediatek/mt7601u/eeprom.c
46299 +++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
46300 @@ -99,7 +99,7 @@ mt7601u_has_tssi(struct mt7601u_dev *dev, u8 *eeprom)
46302         u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
46304 -       return ~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
46305 +       return (u16)~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
46308  static void
46309 diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
46310 index 1b205e7d97a8..37f40039e4ca 100644
46311 --- a/drivers/net/wireless/microchip/wilc1000/netdev.c
46312 +++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
46313 @@ -575,7 +575,6 @@ static int wilc_mac_open(struct net_device *ndev)
46315         struct wilc_vif *vif = netdev_priv(ndev);
46316         struct wilc *wl = vif->wilc;
46317 -       unsigned char mac_add[ETH_ALEN] = {0};
46318         int ret = 0;
46319         struct mgmt_frame_regs mgmt_regs = {};
46321 @@ -598,9 +597,12 @@ static int wilc_mac_open(struct net_device *ndev)
46323         wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), vif->iftype,
46324                                 vif->idx);
46325 -       wilc_get_mac_address(vif, mac_add);
46326 -       netdev_dbg(ndev, "Mac address: %pM\n", mac_add);
46327 -       ether_addr_copy(ndev->dev_addr, mac_add);
46329 +       if (is_valid_ether_addr(ndev->dev_addr))
46330 +               wilc_set_mac_address(vif, ndev->dev_addr);
46331 +       else
46332 +               wilc_get_mac_address(vif, ndev->dev_addr);
46333 +       netdev_dbg(ndev, "Mac address: %pM\n", ndev->dev_addr);
46335         if (!is_valid_ether_addr(ndev->dev_addr)) {
46336                 netdev_err(ndev, "Wrong MAC address\n");
46337 @@ -639,7 +641,14 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
46338         int srcu_idx;
46340         if (!is_valid_ether_addr(addr->sa_data))
46341 -               return -EINVAL;
46342 +               return -EADDRNOTAVAIL;
46344 +       if (!vif->mac_opened) {
46345 +               eth_commit_mac_addr_change(dev, p);
46346 +               return 0;
46347 +       }
46349 +       /* Verify MAC Address is not already in use: */
46351         srcu_idx = srcu_read_lock(&wilc->srcu);
46352         list_for_each_entry_rcu(tmp_vif, &wilc->vif_list, list) {
46353 @@ -647,7 +656,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
46354                 if (ether_addr_equal(addr->sa_data, mac_addr)) {
46355                         if (vif != tmp_vif) {
46356                                 srcu_read_unlock(&wilc->srcu, srcu_idx);
46357 -                               return -EINVAL;
46358 +                               return -EADDRNOTAVAIL;
46359                         }
46360                         srcu_read_unlock(&wilc->srcu, srcu_idx);
46361                         return 0;
46362 @@ -659,9 +668,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
46363         if (result)
46364                 return result;
46366 -       ether_addr_copy(vif->bssid, addr->sa_data);
46367 -       ether_addr_copy(vif->ndev->dev_addr, addr->sa_data);
46369 +       eth_commit_mac_addr_change(dev, p);
46370         return result;
46373 diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
46374 index 351ff909ab1c..e14b9fc2c67a 100644
46375 --- a/drivers/net/wireless/microchip/wilc1000/sdio.c
46376 +++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
46377 @@ -947,7 +947,7 @@ static int wilc_sdio_sync_ext(struct wilc *wilc, int nint)
46378                         for (i = 0; (i < 3) && (nint > 0); i++, nint--)
46379                                 reg |= BIT(i);
46381 -                       ret = wilc_sdio_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
46382 +                       ret = wilc_sdio_write_reg(wilc, WILC_INTR2_ENABLE, reg);
46383                         if (ret) {
46384                                 dev_err(&func->dev,
46385                                         "Failed write reg (%08x)...\n",
46386 diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
46387 index c775c177933b..8dc80574d08d 100644
46388 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c
46389 +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
46390 @@ -570,8 +570,10 @@ qtnf_event_handle_external_auth(struct qtnf_vif *vif,
46391                 return 0;
46393         if (ev->ssid_len) {
46394 -               memcpy(auth.ssid.ssid, ev->ssid, ev->ssid_len);
46395 -               auth.ssid.ssid_len = ev->ssid_len;
46396 +               int len = clamp_val(ev->ssid_len, 0, IEEE80211_MAX_SSID_LEN);
46398 +               memcpy(auth.ssid.ssid, ev->ssid, len);
46399 +               auth.ssid.ssid_len = len;
46400         }
46402         auth.key_mgmt_suite = le32_to_cpu(ev->akm_suite);
46403 diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
46404 index 6e8bd99e8911..1866f6c2acab 100644
46405 --- a/drivers/net/wireless/realtek/rtlwifi/base.c
46406 +++ b/drivers/net/wireless/realtek/rtlwifi/base.c
46407 @@ -440,9 +440,14 @@ static void rtl_watchdog_wq_callback(struct work_struct *work);
46408  static void rtl_fwevt_wq_callback(struct work_struct *work);
46409  static void rtl_c2hcmd_wq_callback(struct work_struct *work);
46411 -static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
46412 +static int _rtl_init_deferred_work(struct ieee80211_hw *hw)
46414         struct rtl_priv *rtlpriv = rtl_priv(hw);
46415 +       struct workqueue_struct *wq;
46417 +       wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
46418 +       if (!wq)
46419 +               return -ENOMEM;
46421         /* <1> timer */
46422         timer_setup(&rtlpriv->works.watchdog_timer,
46423 @@ -451,11 +456,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
46424                     rtl_easy_concurrent_retrytimer_callback, 0);
46425         /* <2> work queue */
46426         rtlpriv->works.hw = hw;
46427 -       rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
46428 -       if (unlikely(!rtlpriv->works.rtl_wq)) {
46429 -               pr_err("Failed to allocate work queue\n");
46430 -               return;
46431 -       }
46432 +       rtlpriv->works.rtl_wq = wq;
46434         INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
46435                           rtl_watchdog_wq_callback);
46436 @@ -466,6 +467,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
46437                           rtl_swlps_rfon_wq_callback);
46438         INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq, rtl_fwevt_wq_callback);
46439         INIT_DELAYED_WORK(&rtlpriv->works.c2hcmd_wq, rtl_c2hcmd_wq_callback);
46440 +       return 0;
46443  void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq)
46444 @@ -565,9 +567,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
46445         rtlmac->link_state = MAC80211_NOLINK;
46447         /* <6> init deferred work */
46448 -       _rtl_init_deferred_work(hw);
46450 -       return 0;
46451 +       return _rtl_init_deferred_work(hw);
46453  EXPORT_SYMBOL_GPL(rtl_init_core);
46455 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
46456 index 27c8a5d96520..fcaaf664cbec 100644
46457 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
46458 +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
46459 @@ -249,7 +249,7 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
46460         0x824, 0x00030FE0,
46461         0x828, 0x00000000,
46462         0x82C, 0x002081DD,
46463 -       0x830, 0x2AAA8E24,
46464 +       0x830, 0x2AAAEEC8,
46465         0x834, 0x0037A706,
46466         0x838, 0x06489B44,
46467         0x83C, 0x0000095B,
46468 @@ -324,10 +324,10 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
46469         0x9D8, 0x00000000,
46470         0x9DC, 0x00000000,
46471         0x9E0, 0x00005D00,
46472 -       0x9E4, 0x00000002,
46473 +       0x9E4, 0x00000003,
46474         0x9E8, 0x00000001,
46475         0xA00, 0x00D047C8,
46476 -       0xA04, 0x01FF000C,
46477 +       0xA04, 0x01FF800C,
46478         0xA08, 0x8C8A8300,
46479         0xA0C, 0x2E68000F,
46480         0xA10, 0x9500BB78,
46481 @@ -1320,7 +1320,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46482                 0x083, 0x00021800,
46483                 0x084, 0x00028000,
46484                 0x085, 0x00048000,
46485 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
46486 +               0x086, 0x0009483A,
46487 +       0xA0000000,     0x00000000,
46488                 0x086, 0x00094838,
46489 +       0xB0000000,     0x00000000,
46490                 0x087, 0x00044980,
46491                 0x088, 0x00048000,
46492                 0x089, 0x0000D480,
46493 @@ -1409,36 +1413,32 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46494                 0x03C, 0x000CA000,
46495                 0x0EF, 0x00000000,
46496                 0x0EF, 0x00001100,
46497 -       0xFF0F0104, 0xABCD,
46498 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
46499                 0x034, 0x0004ADF3,
46500                 0x034, 0x00049DF0,
46501 -       0xFF0F0204, 0xCDEF,
46502 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
46503                 0x034, 0x0004ADF3,
46504                 0x034, 0x00049DF0,
46505 -       0xFF0F0404, 0xCDEF,
46506 -               0x034, 0x0004ADF3,
46507 -               0x034, 0x00049DF0,
46508 -       0xFF0F0200, 0xCDEF,
46509 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
46510                 0x034, 0x0004ADF5,
46511                 0x034, 0x00049DF2,
46512 -       0xFF0F02C0, 0xCDEF,
46513 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
46514 +               0x034, 0x0004A0F3,
46515 +               0x034, 0x000490B1,
46516 +               0x9000040c,     0x00000000,     0x40000000,     0x00000000,
46517                 0x034, 0x0004A0F3,
46518                 0x034, 0x000490B1,
46519 -       0xCDCDCDCD, 0xCDCD,
46520 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
46521 +               0x034, 0x0004ADF5,
46522 +               0x034, 0x00049DF2,
46523 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
46524 +               0x034, 0x0004ADF3,
46525 +               0x034, 0x00049DF0,
46526 +       0xA0000000,     0x00000000,
46527                 0x034, 0x0004ADF7,
46528                 0x034, 0x00049DF3,
46529 -       0xFF0F0104, 0xDEAD,
46530 -       0xFF0F0104, 0xABCD,
46531 -               0x034, 0x00048DED,
46532 -               0x034, 0x00047DEA,
46533 -               0x034, 0x00046DE7,
46534 -               0x034, 0x00045CE9,
46535 -               0x034, 0x00044CE6,
46536 -               0x034, 0x000438C6,
46537 -               0x034, 0x00042886,
46538 -               0x034, 0x00041486,
46539 -               0x034, 0x00040447,
46540 -       0xFF0F0204, 0xCDEF,
46541 +       0xB0000000,     0x00000000,
46542 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
46543                 0x034, 0x00048DED,
46544                 0x034, 0x00047DEA,
46545                 0x034, 0x00046DE7,
46546 @@ -1448,7 +1448,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46547                 0x034, 0x00042886,
46548                 0x034, 0x00041486,
46549                 0x034, 0x00040447,
46550 -       0xFF0F0404, 0xCDEF,
46551 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
46552                 0x034, 0x00048DED,
46553                 0x034, 0x00047DEA,
46554                 0x034, 0x00046DE7,
46555 @@ -1458,7 +1458,17 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46556                 0x034, 0x00042886,
46557                 0x034, 0x00041486,
46558                 0x034, 0x00040447,
46559 -       0xFF0F02C0, 0xCDEF,
46560 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
46561 +               0x034, 0x000480AE,
46562 +               0x034, 0x000470AB,
46563 +               0x034, 0x0004608B,
46564 +               0x034, 0x00045069,
46565 +               0x034, 0x00044048,
46566 +               0x034, 0x00043045,
46567 +               0x034, 0x00042026,
46568 +               0x034, 0x00041023,
46569 +               0x034, 0x00040002,
46570 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
46571                 0x034, 0x000480AE,
46572                 0x034, 0x000470AB,
46573                 0x034, 0x0004608B,
46574 @@ -1468,7 +1478,17 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46575                 0x034, 0x00042026,
46576                 0x034, 0x00041023,
46577                 0x034, 0x00040002,
46578 -       0xCDCDCDCD, 0xCDCD,
46579 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
46580 +               0x034, 0x00048DED,
46581 +               0x034, 0x00047DEA,
46582 +               0x034, 0x00046DE7,
46583 +               0x034, 0x00045CE9,
46584 +               0x034, 0x00044CE6,
46585 +               0x034, 0x000438C6,
46586 +               0x034, 0x00042886,
46587 +               0x034, 0x00041486,
46588 +               0x034, 0x00040447,
46589 +       0xA0000000,     0x00000000,
46590                 0x034, 0x00048DEF,
46591                 0x034, 0x00047DEC,
46592                 0x034, 0x00046DE9,
46593 @@ -1478,38 +1498,36 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46594                 0x034, 0x0004248A,
46595                 0x034, 0x0004108D,
46596                 0x034, 0x0004008A,
46597 -       0xFF0F0104, 0xDEAD,
46598 -       0xFF0F0200, 0xABCD,
46599 +       0xB0000000,     0x00000000,
46600 +       0x80000210,     0x00000000,     0x40000000,     0x00000000,
46601                 0x034, 0x0002ADF4,
46602 -       0xFF0F02C0, 0xCDEF,
46603 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
46604 +               0x034, 0x0002A0F3,
46605 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
46606                 0x034, 0x0002A0F3,
46607 -       0xCDCDCDCD, 0xCDCD,
46608 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
46609 +               0x034, 0x0002ADF4,
46610 +       0xA0000000,     0x00000000,
46611                 0x034, 0x0002ADF7,
46612 -       0xFF0F0200, 0xDEAD,
46613 -       0xFF0F0104, 0xABCD,
46614 -               0x034, 0x00029DF4,
46615 -       0xFF0F0204, 0xCDEF,
46616 +       0xB0000000,     0x00000000,
46617 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
46618                 0x034, 0x00029DF4,
46619 -       0xFF0F0404, 0xCDEF,
46620 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
46621                 0x034, 0x00029DF4,
46622 -       0xFF0F0200, 0xCDEF,
46623 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
46624                 0x034, 0x00029DF1,
46625 -       0xFF0F02C0, 0xCDEF,
46626 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
46627 +               0x034, 0x000290F0,
46628 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
46629                 0x034, 0x000290F0,
46630 -       0xCDCDCDCD, 0xCDCD,
46631 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
46632 +               0x034, 0x00029DF1,
46633 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
46634 +               0x034, 0x00029DF4,
46635 +       0xA0000000,     0x00000000,
46636                 0x034, 0x00029DF2,
46637 -       0xFF0F0104, 0xDEAD,
46638 -       0xFF0F0104, 0xABCD,
46639 -               0x034, 0x00028DF1,
46640 -               0x034, 0x00027DEE,
46641 -               0x034, 0x00026DEB,
46642 -               0x034, 0x00025CEC,
46643 -               0x034, 0x00024CE9,
46644 -               0x034, 0x000238CA,
46645 -               0x034, 0x00022889,
46646 -               0x034, 0x00021489,
46647 -               0x034, 0x0002044A,
46648 -       0xFF0F0204, 0xCDEF,
46649 +       0xB0000000,     0x00000000,
46650 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
46651                 0x034, 0x00028DF1,
46652                 0x034, 0x00027DEE,
46653                 0x034, 0x00026DEB,
46654 @@ -1519,7 +1537,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46655                 0x034, 0x00022889,
46656                 0x034, 0x00021489,
46657                 0x034, 0x0002044A,
46658 -       0xFF0F0404, 0xCDEF,
46659 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
46660                 0x034, 0x00028DF1,
46661                 0x034, 0x00027DEE,
46662                 0x034, 0x00026DEB,
46663 @@ -1529,7 +1547,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46664                 0x034, 0x00022889,
46665                 0x034, 0x00021489,
46666                 0x034, 0x0002044A,
46667 -       0xFF0F02C0, 0xCDEF,
46668 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
46669                 0x034, 0x000280AF,
46670                 0x034, 0x000270AC,
46671                 0x034, 0x0002608B,
46672 @@ -1539,7 +1557,27 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46673                 0x034, 0x00022026,
46674                 0x034, 0x00021023,
46675                 0x034, 0x00020002,
46676 -       0xCDCDCDCD, 0xCDCD,
46677 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
46678 +               0x034, 0x000280AF,
46679 +               0x034, 0x000270AC,
46680 +               0x034, 0x0002608B,
46681 +               0x034, 0x00025069,
46682 +               0x034, 0x00024048,
46683 +               0x034, 0x00023045,
46684 +               0x034, 0x00022026,
46685 +               0x034, 0x00021023,
46686 +               0x034, 0x00020002,
46687 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
46688 +               0x034, 0x00028DF1,
46689 +               0x034, 0x00027DEE,
46690 +               0x034, 0x00026DEB,
46691 +               0x034, 0x00025CEC,
46692 +               0x034, 0x00024CE9,
46693 +               0x034, 0x000238CA,
46694 +               0x034, 0x00022889,
46695 +               0x034, 0x00021489,
46696 +               0x034, 0x0002044A,
46697 +       0xA0000000,     0x00000000,
46698                 0x034, 0x00028DEE,
46699                 0x034, 0x00027DEB,
46700                 0x034, 0x00026CCD,
46701 @@ -1549,27 +1587,24 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46702                 0x034, 0x00022849,
46703                 0x034, 0x00021449,
46704                 0x034, 0x0002004D,
46705 -       0xFF0F0104, 0xDEAD,
46706 -       0xFF0F02C0, 0xABCD,
46707 +       0xB0000000,     0x00000000,
46708 +       0x8000020c,     0x00000000,     0x40000000,     0x00000000,
46709 +               0x034, 0x0000A0D7,
46710 +               0x034, 0x000090D3,
46711 +               0x034, 0x000080B1,
46712 +               0x034, 0x000070AE,
46713 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
46714                 0x034, 0x0000A0D7,
46715                 0x034, 0x000090D3,
46716                 0x034, 0x000080B1,
46717                 0x034, 0x000070AE,
46718 -       0xCDCDCDCD, 0xCDCD,
46719 +       0xA0000000,     0x00000000,
46720                 0x034, 0x0000ADF7,
46721                 0x034, 0x00009DF4,
46722                 0x034, 0x00008DF1,
46723                 0x034, 0x00007DEE,
46724 -       0xFF0F02C0, 0xDEAD,
46725 -       0xFF0F0104, 0xABCD,
46726 -               0x034, 0x00006DEB,
46727 -               0x034, 0x00005CEC,
46728 -               0x034, 0x00004CE9,
46729 -               0x034, 0x000038CA,
46730 -               0x034, 0x00002889,
46731 -               0x034, 0x00001489,
46732 -               0x034, 0x0000044A,
46733 -       0xFF0F0204, 0xCDEF,
46734 +       0xB0000000,     0x00000000,
46735 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
46736                 0x034, 0x00006DEB,
46737                 0x034, 0x00005CEC,
46738                 0x034, 0x00004CE9,
46739 @@ -1577,7 +1612,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46740                 0x034, 0x00002889,
46741                 0x034, 0x00001489,
46742                 0x034, 0x0000044A,
46743 -       0xFF0F0404, 0xCDEF,
46744 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
46745                 0x034, 0x00006DEB,
46746                 0x034, 0x00005CEC,
46747                 0x034, 0x00004CE9,
46748 @@ -1585,7 +1620,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46749                 0x034, 0x00002889,
46750                 0x034, 0x00001489,
46751                 0x034, 0x0000044A,
46752 -       0xFF0F02C0, 0xCDEF,
46753 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
46754                 0x034, 0x0000608D,
46755                 0x034, 0x0000506B,
46756                 0x034, 0x0000404A,
46757 @@ -1593,7 +1628,23 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46758                 0x034, 0x00002044,
46759                 0x034, 0x00001025,
46760                 0x034, 0x00000004,
46761 -       0xCDCDCDCD, 0xCDCD,
46762 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
46763 +               0x034, 0x0000608D,
46764 +               0x034, 0x0000506B,
46765 +               0x034, 0x0000404A,
46766 +               0x034, 0x00003047,
46767 +               0x034, 0x00002044,
46768 +               0x034, 0x00001025,
46769 +               0x034, 0x00000004,
46770 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
46771 +               0x034, 0x00006DEB,
46772 +               0x034, 0x00005CEC,
46773 +               0x034, 0x00004CE9,
46774 +               0x034, 0x000038CA,
46775 +               0x034, 0x00002889,
46776 +               0x034, 0x00001489,
46777 +               0x034, 0x0000044A,
46778 +       0xA0000000,     0x00000000,
46779                 0x034, 0x00006DCD,
46780                 0x034, 0x00005CCD,
46781                 0x034, 0x00004CCA,
46782 @@ -1601,11 +1652,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46783                 0x034, 0x00002888,
46784                 0x034, 0x00001488,
46785                 0x034, 0x00000486,
46786 -       0xFF0F0104, 0xDEAD,
46787 +       0xB0000000,     0x00000000,
46788                 0x0EF, 0x00000000,
46789                 0x018, 0x0001712A,
46790                 0x0EF, 0x00000040,
46791 -       0xFF0F0104, 0xABCD,
46792 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
46793                 0x035, 0x00000187,
46794                 0x035, 0x00008187,
46795                 0x035, 0x00010187,
46796 @@ -1615,7 +1666,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46797                 0x035, 0x00040188,
46798                 0x035, 0x00048188,
46799                 0x035, 0x00050188,
46800 -       0xFF0F0204, 0xCDEF,
46801 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
46802                 0x035, 0x00000187,
46803                 0x035, 0x00008187,
46804                 0x035, 0x00010187,
46805 @@ -1625,7 +1676,37 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46806                 0x035, 0x00040188,
46807                 0x035, 0x00048188,
46808                 0x035, 0x00050188,
46809 -       0xFF0F0404, 0xCDEF,
46810 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
46811 +               0x035, 0x00000128,
46812 +               0x035, 0x00008128,
46813 +               0x035, 0x00010128,
46814 +               0x035, 0x000201C8,
46815 +               0x035, 0x000281C8,
46816 +               0x035, 0x000301C8,
46817 +               0x035, 0x000401C8,
46818 +               0x035, 0x000481C8,
46819 +               0x035, 0x000501C8,
46820 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
46821 +               0x035, 0x00000145,
46822 +               0x035, 0x00008145,
46823 +               0x035, 0x00010145,
46824 +               0x035, 0x00020196,
46825 +               0x035, 0x00028196,
46826 +               0x035, 0x00030196,
46827 +               0x035, 0x000401C7,
46828 +               0x035, 0x000481C7,
46829 +               0x035, 0x000501C7,
46830 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
46831 +               0x035, 0x00000128,
46832 +               0x035, 0x00008128,
46833 +               0x035, 0x00010128,
46834 +               0x035, 0x000201C8,
46835 +               0x035, 0x000281C8,
46836 +               0x035, 0x000301C8,
46837 +               0x035, 0x000401C8,
46838 +               0x035, 0x000481C8,
46839 +               0x035, 0x000501C8,
46840 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
46841                 0x035, 0x00000187,
46842                 0x035, 0x00008187,
46843                 0x035, 0x00010187,
46844 @@ -1635,7 +1716,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46845                 0x035, 0x00040188,
46846                 0x035, 0x00048188,
46847                 0x035, 0x00050188,
46848 -       0xCDCDCDCD, 0xCDCD,
46849 +       0xA0000000,     0x00000000,
46850                 0x035, 0x00000145,
46851                 0x035, 0x00008145,
46852                 0x035, 0x00010145,
46853 @@ -1645,11 +1726,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46854                 0x035, 0x000401C7,
46855                 0x035, 0x000481C7,
46856                 0x035, 0x000501C7,
46857 -       0xFF0F0104, 0xDEAD,
46858 +       0xB0000000,     0x00000000,
46859                 0x0EF, 0x00000000,
46860                 0x018, 0x0001712A,
46861                 0x0EF, 0x00000010,
46862 -       0xFF0F0104, 0xABCD,
46863 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
46864                 0x036, 0x00085733,
46865                 0x036, 0x0008D733,
46866                 0x036, 0x00095733,
46867 @@ -1662,7 +1743,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46868                 0x036, 0x000CE4B4,
46869                 0x036, 0x000D64B4,
46870                 0x036, 0x000DE4B4,
46871 -       0xFF0F0204, 0xCDEF,
46872 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
46873                 0x036, 0x00085733,
46874                 0x036, 0x0008D733,
46875                 0x036, 0x00095733,
46876 @@ -1675,7 +1756,46 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46877                 0x036, 0x000CE4B4,
46878                 0x036, 0x000D64B4,
46879                 0x036, 0x000DE4B4,
46880 -       0xFF0F0404, 0xCDEF,
46881 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
46882 +               0x036, 0x000063B5,
46883 +               0x036, 0x0000E3B5,
46884 +               0x036, 0x000163B5,
46885 +               0x036, 0x0001E3B5,
46886 +               0x036, 0x000263B5,
46887 +               0x036, 0x0002E3B5,
46888 +               0x036, 0x000363B5,
46889 +               0x036, 0x0003E3B5,
46890 +               0x036, 0x000463B5,
46891 +               0x036, 0x0004E3B5,
46892 +               0x036, 0x000563B5,
46893 +               0x036, 0x0005E3B5,
46894 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
46895 +               0x036, 0x000056B3,
46896 +               0x036, 0x0000D6B3,
46897 +               0x036, 0x000156B3,
46898 +               0x036, 0x0001D6B3,
46899 +               0x036, 0x00026634,
46900 +               0x036, 0x0002E634,
46901 +               0x036, 0x00036634,
46902 +               0x036, 0x0003E634,
46903 +               0x036, 0x000467B4,
46904 +               0x036, 0x0004E7B4,
46905 +               0x036, 0x000567B4,
46906 +               0x036, 0x0005E7B4,
46907 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
46908 +               0x036, 0x000063B5,
46909 +               0x036, 0x0000E3B5,
46910 +               0x036, 0x000163B5,
46911 +               0x036, 0x0001E3B5,
46912 +               0x036, 0x000263B5,
46913 +               0x036, 0x0002E3B5,
46914 +               0x036, 0x000363B5,
46915 +               0x036, 0x0003E3B5,
46916 +               0x036, 0x000463B5,
46917 +               0x036, 0x0004E3B5,
46918 +               0x036, 0x000563B5,
46919 +               0x036, 0x0005E3B5,
46920 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
46921                 0x036, 0x00085733,
46922                 0x036, 0x0008D733,
46923                 0x036, 0x00095733,
46924 @@ -1688,7 +1808,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46925                 0x036, 0x000CE4B4,
46926                 0x036, 0x000D64B4,
46927                 0x036, 0x000DE4B4,
46928 -       0xCDCDCDCD, 0xCDCD,
46929 +       0xA0000000,     0x00000000,
46930                 0x036, 0x000056B3,
46931                 0x036, 0x0000D6B3,
46932                 0x036, 0x000156B3,
46933 @@ -1701,103 +1821,162 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46934                 0x036, 0x0004E7B4,
46935                 0x036, 0x000567B4,
46936                 0x036, 0x0005E7B4,
46937 -       0xFF0F0104, 0xDEAD,
46938 +       0xB0000000,     0x00000000,
46939                 0x0EF, 0x00000000,
46940                 0x0EF, 0x00000008,
46941 -       0xFF0F0104, 0xABCD,
46942 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
46943                 0x03C, 0x000001C8,
46944                 0x03C, 0x00000492,
46945 -       0xFF0F0204, 0xCDEF,
46946 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
46947                 0x03C, 0x000001C8,
46948                 0x03C, 0x00000492,
46949 -       0xFF0F0404, 0xCDEF,
46950 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
46951 +               0x03C, 0x000001B6,
46952 +               0x03C, 0x00000492,
46953 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
46954 +               0x03C, 0x0000022A,
46955 +               0x03C, 0x00000594,
46956 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
46957 +               0x03C, 0x000001B6,
46958 +               0x03C, 0x00000492,
46959 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
46960                 0x03C, 0x000001C8,
46961                 0x03C, 0x00000492,
46962 -       0xCDCDCDCD, 0xCDCD,
46963 +       0xA0000000,     0x00000000,
46964                 0x03C, 0x0000022A,
46965                 0x03C, 0x00000594,
46966 -       0xFF0F0104, 0xDEAD,
46967 -       0xFF0F0104, 0xABCD,
46968 +       0xB0000000,     0x00000000,
46969 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
46970                 0x03C, 0x00000800,
46971 -       0xFF0F0204, 0xCDEF,
46972 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
46973                 0x03C, 0x00000800,
46974 -       0xFF0F0404, 0xCDEF,
46975 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
46976                 0x03C, 0x00000800,
46977 -       0xFF0F02C0, 0xCDEF,
46978 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
46979                 0x03C, 0x00000820,
46980 -       0xCDCDCDCD, 0xCDCD,
46981 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
46982 +               0x03C, 0x00000820,
46983 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
46984 +               0x03C, 0x00000800,
46985 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
46986 +               0x03C, 0x00000800,
46987 +       0xA0000000,     0x00000000,
46988                 0x03C, 0x00000900,
46989 -       0xFF0F0104, 0xDEAD,
46990 +       0xB0000000,     0x00000000,
46991                 0x0EF, 0x00000000,
46992                 0x018, 0x0001712A,
46993                 0x0EF, 0x00000002,
46994 -       0xFF0F0104, 0xABCD,
46995 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
46996                 0x008, 0x0004E400,
46997 -       0xFF0F0204, 0xCDEF,
46998 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
46999                 0x008, 0x0004E400,
47000 -       0xFF0F0404, 0xCDEF,
47001 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
47002 +               0x008, 0x00002000,
47003 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
47004 +               0x008, 0x00002000,
47005 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
47006 +               0x008, 0x00002000,
47007 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
47008 +               0x008, 0x00002000,
47009 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
47010                 0x008, 0x0004E400,
47011 -       0xCDCDCDCD, 0xCDCD,
47012 +       0xA0000000,     0x00000000,
47013                 0x008, 0x00002000,
47014 -       0xFF0F0104, 0xDEAD,
47015 +       0xB0000000,     0x00000000,
47016                 0x0EF, 0x00000000,
47017                 0x0DF, 0x000000C0,
47018 -               0x01F, 0x00040064,
47019 -       0xFF0F0104, 0xABCD,
47020 +               0x01F, 0x00000064,
47021 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
47022                 0x058, 0x000A7284,
47023                 0x059, 0x000600EC,
47024 -       0xFF0F0204, 0xCDEF,
47025 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
47026                 0x058, 0x000A7284,
47027                 0x059, 0x000600EC,
47028 -       0xFF0F0404, 0xCDEF,
47029 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
47030 +               0x058, 0x00081184,
47031 +               0x059, 0x0006016C,
47032 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
47033 +               0x058, 0x00081184,
47034 +               0x059, 0x0006016C,
47035 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
47036 +               0x058, 0x00081184,
47037 +               0x059, 0x0006016C,
47038 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
47039                 0x058, 0x000A7284,
47040                 0x059, 0x000600EC,
47041 -       0xCDCDCDCD, 0xCDCD,
47042 +       0xA0000000,     0x00000000,
47043                 0x058, 0x00081184,
47044                 0x059, 0x0006016C,
47045 -       0xFF0F0104, 0xDEAD,
47046 -       0xFF0F0104, 0xABCD,
47047 +       0xB0000000,     0x00000000,
47048 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
47049                 0x061, 0x000E8D73,
47050                 0x062, 0x00093FC5,
47051 -       0xFF0F0204, 0xCDEF,
47052 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
47053                 0x061, 0x000E8D73,
47054                 0x062, 0x00093FC5,
47055 -       0xFF0F0404, 0xCDEF,
47056 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
47057 +               0x061, 0x000EFD83,
47058 +               0x062, 0x00093FCC,
47059 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
47060 +               0x061, 0x000EAD53,
47061 +               0x062, 0x00093BC4,
47062 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
47063 +               0x061, 0x000EFD83,
47064 +               0x062, 0x00093FCC,
47065 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
47066                 0x061, 0x000E8D73,
47067                 0x062, 0x00093FC5,
47068 -       0xCDCDCDCD, 0xCDCD,
47069 +       0xA0000000,     0x00000000,
47070                 0x061, 0x000EAD53,
47071                 0x062, 0x00093BC4,
47072 -       0xFF0F0104, 0xDEAD,
47073 -       0xFF0F0104, 0xABCD,
47074 +       0xB0000000,     0x00000000,
47075 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
47076                 0x063, 0x000110E9,
47077 -       0xFF0F0204, 0xCDEF,
47078 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
47079                 0x063, 0x000110E9,
47080 -       0xFF0F0404, 0xCDEF,
47081 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
47082 +               0x063, 0x000110EB,
47083 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
47084                 0x063, 0x000110E9,
47085 -       0xFF0F0200, 0xCDEF,
47086 -               0x063, 0x000710E9,
47087 -       0xFF0F02C0, 0xCDEF,
47088 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
47089                 0x063, 0x000110E9,
47090 -       0xCDCDCDCD, 0xCDCD,
47091 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
47092 +               0x063, 0x000110EB,
47093 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
47094 +               0x063, 0x000110E9,
47095 +       0xA0000000,     0x00000000,
47096                 0x063, 0x000714E9,
47097 -       0xFF0F0104, 0xDEAD,
47098 -       0xFF0F0104, 0xABCD,
47099 +       0xB0000000,     0x00000000,
47100 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
47101 +               0x064, 0x0001C27C,
47102 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
47103 +               0x064, 0x0001C27C,
47104 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
47105                 0x064, 0x0001C27C,
47106 -       0xFF0F0204, 0xCDEF,
47107 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
47108 +               0x064, 0x0001C67C,
47109 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
47110                 0x064, 0x0001C27C,
47111 -       0xFF0F0404, 0xCDEF,
47112 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
47113                 0x064, 0x0001C27C,
47114 -       0xCDCDCDCD, 0xCDCD,
47115 +       0xA0000000,     0x00000000,
47116                 0x064, 0x0001C67C,
47117 -       0xFF0F0104, 0xDEAD,
47118 -       0xFF0F0200, 0xABCD,
47119 +       0xB0000000,     0x00000000,
47120 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
47121 +               0x065, 0x00091016,
47122 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
47123 +               0x065, 0x00091016,
47124 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
47125                 0x065, 0x00093016,
47126 -       0xFF0F02C0, 0xCDEF,
47127 +               0x9000020c,     0x00000000,     0x40000000,     0x00000000,
47128                 0x065, 0x00093015,
47129 -       0xCDCDCDCD, 0xCDCD,
47130 +               0x9000040c,     0x00000000,     0x40000000,     0x00000000,
47131 +               0x065, 0x00093015,
47132 +               0x90000200,     0x00000000,     0x40000000,     0x00000000,
47133 +               0x065, 0x00093016,
47134 +               0xA0000000,     0x00000000,
47135                 0x065, 0x00091016,
47136 -       0xFF0F0200, 0xDEAD,
47137 +               0xB0000000,     0x00000000,
47138                 0x018, 0x00000006,
47139                 0x0EF, 0x00002000,
47140                 0x03B, 0x0003824B,
47141 @@ -1895,9 +2074,10 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
47142                 0x0B4, 0x0001214C,
47143                 0x0B7, 0x0003000C,
47144                 0x01C, 0x000539D2,
47145 +               0x0C4, 0x000AFE00,
47146                 0x018, 0x0001F12A,
47147 -               0x0FE, 0x00000000,
47148 -               0x0FE, 0x00000000,
47149 +               0xFFE, 0x00000000,
47150 +               0xFFE, 0x00000000,
47151                 0x018, 0x0001712A,
47153  };
47154 @@ -2017,6 +2197,7 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = {
47155  u32 RTL8812AE_MAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8812AE_MAC_REG_ARRAY);
47157  u32 RTL8821AE_MAC_REG_ARRAY[] = {
47158 +               0x421, 0x0000000F,
47159                 0x428, 0x0000000A,
47160                 0x429, 0x00000010,
47161                 0x430, 0x00000000,
47162 @@ -2485,7 +2666,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
47163                 0x81C, 0xA6360001,
47164                 0x81C, 0xA5380001,
47165                 0x81C, 0xA43A0001,
47166 -               0x81C, 0xA33C0001,
47167 +               0x81C, 0x683C0001,
47168                 0x81C, 0x673E0001,
47169                 0x81C, 0x66400001,
47170                 0x81C, 0x65420001,
47171 @@ -2519,7 +2700,66 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
47172                 0x81C, 0x017A0001,
47173                 0x81C, 0x017C0001,
47174                 0x81C, 0x017E0001,
47175 -       0xFF0F02C0, 0xABCD,
47176 +       0x8000020c,     0x00000000,     0x40000000,     0x00000000,
47177 +               0x81C, 0xFB000101,
47178 +               0x81C, 0xFA020101,
47179 +               0x81C, 0xF9040101,
47180 +               0x81C, 0xF8060101,
47181 +               0x81C, 0xF7080101,
47182 +               0x81C, 0xF60A0101,
47183 +               0x81C, 0xF50C0101,
47184 +               0x81C, 0xF40E0101,
47185 +               0x81C, 0xF3100101,
47186 +               0x81C, 0xF2120101,
47187 +               0x81C, 0xF1140101,
47188 +               0x81C, 0xF0160101,
47189 +               0x81C, 0xEF180101,
47190 +               0x81C, 0xEE1A0101,
47191 +               0x81C, 0xED1C0101,
47192 +               0x81C, 0xEC1E0101,
47193 +               0x81C, 0xEB200101,
47194 +               0x81C, 0xEA220101,
47195 +               0x81C, 0xE9240101,
47196 +               0x81C, 0xE8260101,
47197 +               0x81C, 0xE7280101,
47198 +               0x81C, 0xE62A0101,
47199 +               0x81C, 0xE52C0101,
47200 +               0x81C, 0xE42E0101,
47201 +               0x81C, 0xE3300101,
47202 +               0x81C, 0xA5320101,
47203 +               0x81C, 0xA4340101,
47204 +               0x81C, 0xA3360101,
47205 +               0x81C, 0x87380101,
47206 +               0x81C, 0x863A0101,
47207 +               0x81C, 0x853C0101,
47208 +               0x81C, 0x843E0101,
47209 +               0x81C, 0x69400101,
47210 +               0x81C, 0x68420101,
47211 +               0x81C, 0x67440101,
47212 +               0x81C, 0x66460101,
47213 +               0x81C, 0x49480101,
47214 +               0x81C, 0x484A0101,
47215 +               0x81C, 0x474C0101,
47216 +               0x81C, 0x2A4E0101,
47217 +               0x81C, 0x29500101,
47218 +               0x81C, 0x28520101,
47219 +               0x81C, 0x27540101,
47220 +               0x81C, 0x26560101,
47221 +               0x81C, 0x25580101,
47222 +               0x81C, 0x245A0101,
47223 +               0x81C, 0x235C0101,
47224 +               0x81C, 0x055E0101,
47225 +               0x81C, 0x04600101,
47226 +               0x81C, 0x03620101,
47227 +               0x81C, 0x02640101,
47228 +               0x81C, 0x01660101,
47229 +               0x81C, 0x01680101,
47230 +               0x81C, 0x016A0101,
47231 +               0x81C, 0x016C0101,
47232 +               0x81C, 0x016E0101,
47233 +               0x81C, 0x01700101,
47234 +               0x81C, 0x01720101,
47235 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
47236                 0x81C, 0xFB000101,
47237                 0x81C, 0xFA020101,
47238                 0x81C, 0xF9040101,
47239 @@ -2578,7 +2818,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
47240                 0x81C, 0x016E0101,
47241                 0x81C, 0x01700101,
47242                 0x81C, 0x01720101,
47243 -       0xCDCDCDCD, 0xCDCD,
47244 +       0xA0000000,     0x00000000,
47245                 0x81C, 0xFF000101,
47246                 0x81C, 0xFF020101,
47247                 0x81C, 0xFE040101,
47248 @@ -2637,7 +2877,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
47249                 0x81C, 0x046E0101,
47250                 0x81C, 0x03700101,
47251                 0x81C, 0x02720101,
47252 -       0xFF0F02C0, 0xDEAD,
47253 +       0xB0000000,     0x00000000,
47254                 0x81C, 0x01740101,
47255                 0x81C, 0x01760101,
47256                 0x81C, 0x01780101,
47257 diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
47258 index 948cb79050ea..e7d51ac9b689 100644
47259 --- a/drivers/net/wireless/realtek/rtw88/debug.c
47260 +++ b/drivers/net/wireless/realtek/rtw88/debug.c
47261 @@ -270,7 +270,7 @@ static ssize_t rtw_debugfs_set_rsvd_page(struct file *filp,
47263         if (num != 2) {
47264                 rtw_warn(rtwdev, "invalid arguments\n");
47265 -               return num;
47266 +               return -EINVAL;
47267         }
47269         debugfs_priv->rsvd_page.page_offset = offset;
47270 diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
47271 index 35afea91fd29..92b9cf1f9525 100644
47272 --- a/drivers/net/wireless/realtek/rtw88/main.h
47273 +++ b/drivers/net/wireless/realtek/rtw88/main.h
47274 @@ -1166,6 +1166,7 @@ struct rtw_chip_info {
47275         bool en_dis_dpd;
47276         u16 dpd_ratemask;
47277         u8 iqk_threshold;
47278 +       u8 lck_threshold;
47279         const struct rtw_pwr_track_tbl *pwr_track_tbl;
47281         u8 bfer_su_max_num;
47282 @@ -1534,6 +1535,7 @@ struct rtw_dm_info {
47283         u32 rrsr_mask_min;
47284         u8 thermal_avg[RTW_RF_PATH_MAX];
47285         u8 thermal_meter_k;
47286 +       u8 thermal_meter_lck;
47287         s8 delta_power_index[RTW_RF_PATH_MAX];
47288         s8 delta_power_index_last[RTW_RF_PATH_MAX];
47289         u8 default_ofdm_index;
47290 diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
47291 index 786a48649946..6b5c885798a4 100644
47292 --- a/drivers/net/wireless/realtek/rtw88/pci.c
47293 +++ b/drivers/net/wireless/realtek/rtw88/pci.c
47294 @@ -581,23 +581,30 @@ static int rtw_pci_start(struct rtw_dev *rtwdev)
47296         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
47298 +       rtw_pci_napi_start(rtwdev);
47300         spin_lock_bh(&rtwpci->irq_lock);
47301 +       rtwpci->running = true;
47302         rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
47303         spin_unlock_bh(&rtwpci->irq_lock);
47305 -       rtw_pci_napi_start(rtwdev);
47307         return 0;
47310  static void rtw_pci_stop(struct rtw_dev *rtwdev)
47312         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
47313 +       struct pci_dev *pdev = rtwpci->pdev;
47315 +       spin_lock_bh(&rtwpci->irq_lock);
47316 +       rtwpci->running = false;
47317 +       rtw_pci_disable_interrupt(rtwdev, rtwpci);
47318 +       spin_unlock_bh(&rtwpci->irq_lock);
47320 +       synchronize_irq(pdev->irq);
47321         rtw_pci_napi_stop(rtwdev);
47323         spin_lock_bh(&rtwpci->irq_lock);
47324 -       rtw_pci_disable_interrupt(rtwdev, rtwpci);
47325         rtw_pci_dma_release(rtwdev, rtwpci);
47326         spin_unlock_bh(&rtwpci->irq_lock);
47328 @@ -1138,7 +1145,8 @@ static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
47329                 rtw_fw_c2h_cmd_isr(rtwdev);
47331         /* all of the jobs for this interrupt have been done */
47332 -       rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);
47333 +       if (rtwpci->running)
47334 +               rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);
47335         spin_unlock_bh(&rtwpci->irq_lock);
47337         return IRQ_HANDLED;
47338 @@ -1558,7 +1566,8 @@ static int rtw_pci_napi_poll(struct napi_struct *napi, int budget)
47339         if (work_done < budget) {
47340                 napi_complete_done(napi, work_done);
47341                 spin_lock_bh(&rtwpci->irq_lock);
47342 -               rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
47343 +               if (rtwpci->running)
47344 +                       rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
47345                 spin_unlock_bh(&rtwpci->irq_lock);
47346                 /* When ISR happens during polling and before napi_complete
47347                  * while no further data is received. Data on the dma_ring will
47348 diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
47349 index e76fc549a788..0ffae887527a 100644
47350 --- a/drivers/net/wireless/realtek/rtw88/pci.h
47351 +++ b/drivers/net/wireless/realtek/rtw88/pci.h
47352 @@ -211,6 +211,7 @@ struct rtw_pci {
47353         spinlock_t irq_lock;
47354         u32 irq_mask[4];
47355         bool irq_enabled;
47356 +       bool running;
47358         /* napi structure */
47359         struct net_device netdev;
47360 diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
47361 index e114ddecac09..21e77fcfa4d5 100644
47362 --- a/drivers/net/wireless/realtek/rtw88/phy.c
47363 +++ b/drivers/net/wireless/realtek/rtw88/phy.c
47364 @@ -1584,7 +1584,7 @@ void rtw_phy_load_tables(struct rtw_dev *rtwdev)
47366  EXPORT_SYMBOL(rtw_phy_load_tables);
47368 -static u8 rtw_get_channel_group(u8 channel)
47369 +static u8 rtw_get_channel_group(u8 channel, u8 rate)
47371         switch (channel) {
47372         default:
47373 @@ -1628,6 +1628,7 @@ static u8 rtw_get_channel_group(u8 channel)
47374         case 106:
47375                 return 4;
47376         case 14:
47377 +               return rate <= DESC_RATE11M ? 5 : 4;
47378         case 108:
47379         case 110:
47380         case 112:
47381 @@ -1879,7 +1880,7 @@ void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw,
47382         s8 *remnant = &pwr_param->pwr_remnant;
47384         pwr_idx = &rtwdev->efuse.txpwr_idx_table[path];
47385 -       group = rtw_get_channel_group(ch);
47386 +       group = rtw_get_channel_group(ch, rate);
47388         /* base power index for 2.4G/5G */
47389         if (IS_CH_2G_BAND(ch)) {
47390 @@ -2219,6 +2220,20 @@ s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev,
47392  EXPORT_SYMBOL(rtw_phy_pwrtrack_get_pwridx);
47394 +bool rtw_phy_pwrtrack_need_lck(struct rtw_dev *rtwdev)
47396 +       struct rtw_dm_info *dm_info = &rtwdev->dm_info;
47397 +       u8 delta_lck;
47399 +       delta_lck = abs(dm_info->thermal_avg[0] - dm_info->thermal_meter_lck);
47400 +       if (delta_lck >= rtwdev->chip->lck_threshold) {
47401 +               dm_info->thermal_meter_lck = dm_info->thermal_avg[0];
47402 +               return true;
47403 +       }
47404 +       return false;
47406 +EXPORT_SYMBOL(rtw_phy_pwrtrack_need_lck);
47408  bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev)
47410         struct rtw_dm_info *dm_info = &rtwdev->dm_info;
47411 diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
47412 index a4fcfb878550..a0742a69446d 100644
47413 --- a/drivers/net/wireless/realtek/rtw88/phy.h
47414 +++ b/drivers/net/wireless/realtek/rtw88/phy.h
47415 @@ -55,6 +55,7 @@ u8 rtw_phy_pwrtrack_get_delta(struct rtw_dev *rtwdev, u8 path);
47416  s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev,
47417                                struct rtw_swing_table *swing_table,
47418                                u8 tbl_path, u8 therm_path, u8 delta);
47419 +bool rtw_phy_pwrtrack_need_lck(struct rtw_dev *rtwdev);
47420  bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev);
47421  void rtw_phy_config_swing_table(struct rtw_dev *rtwdev,
47422                                 struct rtw_swing_table *swing_table);
47423 diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
47424 index ea518aa78552..819af34dac34 100644
47425 --- a/drivers/net/wireless/realtek/rtw88/reg.h
47426 +++ b/drivers/net/wireless/realtek/rtw88/reg.h
47427 @@ -652,8 +652,13 @@
47428  #define RF_TXATANK     0x64
47429  #define RF_TRXIQ       0x66
47430  #define RF_RXIQGEN     0x8d
47431 +#define RF_SYN_PFD     0xb0
47432  #define RF_XTALX2      0xb8
47433 +#define RF_SYN_CTRL    0xbb
47434  #define RF_MALSEL      0xbe
47435 +#define RF_SYN_AAC     0xc9
47436 +#define RF_AAC_CTRL    0xca
47437 +#define RF_FAST_LCK    0xcc
47438  #define RF_RCKD                0xde
47439  #define RF_TXADBG      0xde
47440  #define RF_LUTDBG      0xdf
47441 diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
47442 index dd560c28abb2..448922cb2e63 100644
47443 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
47444 +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
47445 @@ -1126,6 +1126,7 @@ static void rtw8822c_pwrtrack_init(struct rtw_dev *rtwdev)
47447         dm_info->pwr_trk_triggered = false;
47448         dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k;
47449 +       dm_info->thermal_meter_lck = rtwdev->efuse.thermal_meter_k;
47452  static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
47453 @@ -2108,6 +2109,26 @@ static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev)
47454         rtw_write32_set(rtwdev, REG_RX_BREAK, BIT_COM_RX_GCK_EN);
47457 +static void rtw8822c_do_lck(struct rtw_dev *rtwdev)
47459 +       u32 val;
47461 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_CTRL, RFREG_MASK, 0x80010);
47462 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_PFD, RFREG_MASK, 0x1F0FA);
47463 +       fsleep(1);
47464 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_AAC_CTRL, RFREG_MASK, 0x80000);
47465 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_AAC, RFREG_MASK, 0x80001);
47466 +       read_poll_timeout(rtw_read_rf, val, val != 0x1, 1000, 100000,
47467 +                         true, rtwdev, RF_PATH_A, RF_AAC_CTRL, 0x1000);
47468 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_PFD, RFREG_MASK, 0x1F0F8);
47469 +       rtw_write_rf(rtwdev, RF_PATH_B, RF_SYN_CTRL, RFREG_MASK, 0x80010);
47471 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x0f000);
47472 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x4f000);
47473 +       fsleep(1);
47474 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x0f000);
47477  static void rtw8822c_do_iqk(struct rtw_dev *rtwdev)
47479         struct rtw_iqk_para para = {0};
47480 @@ -3538,11 +3559,12 @@ static void __rtw8822c_pwr_track(struct rtw_dev *rtwdev)
47482         rtw_phy_config_swing_table(rtwdev, &swing_table);
47484 +       if (rtw_phy_pwrtrack_need_lck(rtwdev))
47485 +               rtw8822c_do_lck(rtwdev);
47487         for (i = 0; i < rtwdev->hal.rf_path_num; i++)
47488                 rtw8822c_pwr_track_path(rtwdev, &swing_table, i);
47490 -       if (rtw_phy_pwrtrack_need_iqk(rtwdev))
47491 -               rtw8822c_do_iqk(rtwdev);
47494  static void rtw8822c_pwr_track(struct rtw_dev *rtwdev)
47495 @@ -4351,6 +4373,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
47496         .dpd_ratemask = DIS_DPD_RATEALL,
47497         .pwr_track_tbl = &rtw8822c_rtw_pwr_track_tbl,
47498         .iqk_threshold = 8,
47499 +       .lck_threshold = 8,
47500         .bfer_su_max_num = 2,
47501         .bfer_mu_max_num = 1,
47502         .rx_ldpc = true,
47503 diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
47504 index fe0287b22a25..e0c502bc4270 100644
47505 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
47506 +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
47507 @@ -1513,7 +1513,7 @@ static int rsi_restore(struct device *dev)
47509  static const struct dev_pm_ops rsi_pm_ops = {
47510         .suspend = rsi_suspend,
47511 -       .resume = rsi_resume,
47512 +       .resume_noirq = rsi_resume,
47513         .freeze = rsi_freeze,
47514         .thaw = rsi_thaw,
47515         .restore = rsi_restore,
47516 diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c
47517 index e14d88e558f0..85abd0a2d1c9 100644
47518 --- a/drivers/net/wireless/ti/wlcore/boot.c
47519 +++ b/drivers/net/wireless/ti/wlcore/boot.c
47520 @@ -72,6 +72,7 @@ static int wlcore_validate_fw_ver(struct wl1271 *wl)
47521         unsigned int *min_ver = (wl->fw_type == WL12XX_FW_TYPE_MULTI) ?
47522                 wl->min_mr_fw_ver : wl->min_sr_fw_ver;
47523         char min_fw_str[32] = "";
47524 +       int off = 0;
47525         int i;
47527         /* the chip must be exactly equal */
47528 @@ -105,13 +106,15 @@ static int wlcore_validate_fw_ver(struct wl1271 *wl)
47529         return 0;
47531  fail:
47532 -       for (i = 0; i < NUM_FW_VER; i++)
47533 +       for (i = 0; i < NUM_FW_VER && off < sizeof(min_fw_str); i++)
47534                 if (min_ver[i] == WLCORE_FW_VER_IGNORE)
47535 -                       snprintf(min_fw_str, sizeof(min_fw_str),
47536 -                                 "%s*.", min_fw_str);
47537 +                       off += snprintf(min_fw_str + off,
47538 +                                       sizeof(min_fw_str) - off,
47539 +                                       "*.");
47540                 else
47541 -                       snprintf(min_fw_str, sizeof(min_fw_str),
47542 -                                 "%s%u.", min_fw_str, min_ver[i]);
47543 +                       off += snprintf(min_fw_str + off,
47544 +                                       sizeof(min_fw_str) - off,
47545 +                                       "%u.", min_ver[i]);
47547         wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is invalid.\n"
47548                      "Please use at least FW %s\n"
47549 diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
47550 index b143293e694f..a9e13e6d65c5 100644
47551 --- a/drivers/net/wireless/ti/wlcore/debugfs.h
47552 +++ b/drivers/net/wireless/ti/wlcore/debugfs.h
47553 @@ -78,13 +78,14 @@ static ssize_t sub## _ ##name## _read(struct file *file,            \
47554         struct wl1271 *wl = file->private_data;                         \
47555         struct struct_type *stats = wl->stats.fw_stats;                 \
47556         char buf[DEBUGFS_FORMAT_BUFFER_SIZE] = "";                      \
47557 +       int pos = 0;                                                    \
47558         int i;                                                          \
47559                                                                         \
47560         wl1271_debugfs_update_stats(wl);                                \
47561                                                                         \
47562 -       for (i = 0; i < len; i++)                                       \
47563 -               snprintf(buf, sizeof(buf), "%s[%d] = %d\n",             \
47564 -                        buf, i, stats->sub.name[i]);                   \
47565 +       for (i = 0; i < len && pos < sizeof(buf); i++)                  \
47566 +               pos += snprintf(buf + pos, sizeof(buf) - pos,           \
47567 +                        "[%d] = %d\n", i, stats->sub.name[i]);         \
47568                                                                         \
47569         return wl1271_format_buffer(userbuf, count, ppos, "%s", buf);   \
47570  }                                                                      \
47571 diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
47572 index e98e04ee9a2c..59b7b93c5963 100644
47573 --- a/drivers/net/wireless/wl3501.h
47574 +++ b/drivers/net/wireless/wl3501.h
47575 @@ -379,16 +379,7 @@ struct wl3501_get_confirm {
47576         u8      mib_value[100];
47577  };
47579 -struct wl3501_join_req {
47580 -       u16                         next_blk;
47581 -       u8                          sig_id;
47582 -       u8                          reserved;
47583 -       struct iw_mgmt_data_rset    operational_rset;
47584 -       u16                         reserved2;
47585 -       u16                         timeout;
47586 -       u16                         probe_delay;
47587 -       u8                          timestamp[8];
47588 -       u8                          local_time[8];
47589 +struct wl3501_req {
47590         u16                         beacon_period;
47591         u16                         dtim_period;
47592         u16                         cap_info;
47593 @@ -401,6 +392,19 @@ struct wl3501_join_req {
47594         struct iw_mgmt_data_rset    bss_basic_rset;
47595  };
47597 +struct wl3501_join_req {
47598 +       u16                         next_blk;
47599 +       u8                          sig_id;
47600 +       u8                          reserved;
47601 +       struct iw_mgmt_data_rset    operational_rset;
47602 +       u16                         reserved2;
47603 +       u16                         timeout;
47604 +       u16                         probe_delay;
47605 +       u8                          timestamp[8];
47606 +       u8                          local_time[8];
47607 +       struct wl3501_req           req;
47610  struct wl3501_join_confirm {
47611         u16     next_blk;
47612         u8      sig_id;
47613 @@ -443,16 +447,7 @@ struct wl3501_scan_confirm {
47614         u16                         status;
47615         char                        timestamp[8];
47616         char                        localtime[8];
47617 -       u16                         beacon_period;
47618 -       u16                         dtim_period;
47619 -       u16                         cap_info;
47620 -       u8                          bss_type;
47621 -       u8                          bssid[ETH_ALEN];
47622 -       struct iw_mgmt_essid_pset   ssid;
47623 -       struct iw_mgmt_ds_pset      ds_pset;
47624 -       struct iw_mgmt_cf_pset      cf_pset;
47625 -       struct iw_mgmt_ibss_pset    ibss_pset;
47626 -       struct iw_mgmt_data_rset    bss_basic_rset;
47627 +       struct wl3501_req           req;
47628         u8                          rssi;
47629  };
47631 @@ -471,8 +466,10 @@ struct wl3501_md_req {
47632         u16     size;
47633         u8      pri;
47634         u8      service_class;
47635 -       u8      daddr[ETH_ALEN];
47636 -       u8      saddr[ETH_ALEN];
47637 +       struct {
47638 +               u8      daddr[ETH_ALEN];
47639 +               u8      saddr[ETH_ALEN];
47640 +       } addr;
47641  };
47643  struct wl3501_md_ind {
47644 @@ -484,8 +481,10 @@ struct wl3501_md_ind {
47645         u8      reception;
47646         u8      pri;
47647         u8      service_class;
47648 -       u8      daddr[ETH_ALEN];
47649 -       u8      saddr[ETH_ALEN];
47650 +       struct {
47651 +               u8      daddr[ETH_ALEN];
47652 +               u8      saddr[ETH_ALEN];
47653 +       } addr;
47654  };
47656  struct wl3501_md_confirm {
47657 diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
47658 index 8ca5789c7b37..672f5d5f3f2c 100644
47659 --- a/drivers/net/wireless/wl3501_cs.c
47660 +++ b/drivers/net/wireless/wl3501_cs.c
47661 @@ -469,6 +469,7 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len)
47662         struct wl3501_md_req sig = {
47663                 .sig_id = WL3501_SIG_MD_REQ,
47664         };
47665 +       size_t sig_addr_len = sizeof(sig.addr);
47666         u8 *pdata = (char *)data;
47667         int rc = -EIO;
47669 @@ -484,9 +485,9 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len)
47670                         goto out;
47671                 }
47672                 rc = 0;
47673 -               memcpy(&sig.daddr[0], pdata, 12);
47674 -               pktlen = len - 12;
47675 -               pdata += 12;
47676 +               memcpy(&sig.addr, pdata, sig_addr_len);
47677 +               pktlen = len - sig_addr_len;
47678 +               pdata += sig_addr_len;
47679                 sig.data = bf;
47680                 if (((*pdata) * 256 + (*(pdata + 1))) > 1500) {
47681                         u8 addr4[ETH_ALEN] = {
47682 @@ -589,7 +590,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas)
47683         struct wl3501_join_req sig = {
47684                 .sig_id           = WL3501_SIG_JOIN_REQ,
47685                 .timeout          = 10,
47686 -               .ds_pset = {
47687 +               .req.ds_pset = {
47688                         .el = {
47689                                 .id  = IW_MGMT_INFO_ELEMENT_DS_PARAMETER_SET,
47690                                 .len = 1,
47691 @@ -598,7 +599,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas)
47692                 },
47693         };
47695 -       memcpy(&sig.beacon_period, &this->bss_set[stas].beacon_period, 72);
47696 +       memcpy(&sig.req, &this->bss_set[stas].req, sizeof(sig.req));
47697         return wl3501_esbq_exec(this, &sig, sizeof(sig));
47700 @@ -666,35 +667,37 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr)
47701         if (sig.status == WL3501_STATUS_SUCCESS) {
47702                 pr_debug("success");
47703                 if ((this->net_type == IW_MODE_INFRA &&
47704 -                    (sig.cap_info & WL3501_MGMT_CAPABILITY_ESS)) ||
47705 +                    (sig.req.cap_info & WL3501_MGMT_CAPABILITY_ESS)) ||
47706                     (this->net_type == IW_MODE_ADHOC &&
47707 -                    (sig.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) ||
47708 +                    (sig.req.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) ||
47709                     this->net_type == IW_MODE_AUTO) {
47710                         if (!this->essid.el.len)
47711                                 matchflag = 1;
47712                         else if (this->essid.el.len == 3 &&
47713                                  !memcmp(this->essid.essid, "ANY", 3))
47714                                 matchflag = 1;
47715 -                       else if (this->essid.el.len != sig.ssid.el.len)
47716 +                       else if (this->essid.el.len != sig.req.ssid.el.len)
47717                                 matchflag = 0;
47718 -                       else if (memcmp(this->essid.essid, sig.ssid.essid,
47719 +                       else if (memcmp(this->essid.essid, sig.req.ssid.essid,
47720                                         this->essid.el.len))
47721                                 matchflag = 0;
47722                         else
47723                                 matchflag = 1;
47724                         if (matchflag) {
47725                                 for (i = 0; i < this->bss_cnt; i++) {
47726 -                                       if (ether_addr_equal_unaligned(this->bss_set[i].bssid, sig.bssid)) {
47727 +                                       if (ether_addr_equal_unaligned(this->bss_set[i].req.bssid,
47728 +                                                                      sig.req.bssid)) {
47729                                                 matchflag = 0;
47730                                                 break;
47731                                         }
47732                                 }
47733                         }
47734                         if (matchflag && (i < 20)) {
47735 -                               memcpy(&this->bss_set[i].beacon_period,
47736 -                                      &sig.beacon_period, 73);
47737 +                               memcpy(&this->bss_set[i].req,
47738 +                                      &sig.req, sizeof(sig.req));
47739                                 this->bss_cnt++;
47740                                 this->rssi = sig.rssi;
47741 +                               this->bss_set[i].rssi = sig.rssi;
47742                         }
47743                 }
47744         } else if (sig.status == WL3501_STATUS_TIMEOUT) {
47745 @@ -886,19 +889,19 @@ static void wl3501_mgmt_join_confirm(struct net_device *dev, u16 addr)
47746                         if (this->join_sta_bss < this->bss_cnt) {
47747                                 const int i = this->join_sta_bss;
47748                                 memcpy(this->bssid,
47749 -                                      this->bss_set[i].bssid, ETH_ALEN);
47750 -                               this->chan = this->bss_set[i].ds_pset.chan;
47751 +                                      this->bss_set[i].req.bssid, ETH_ALEN);
47752 +                               this->chan = this->bss_set[i].req.ds_pset.chan;
47753                                 iw_copy_mgmt_info_element(&this->keep_essid.el,
47754 -                                                    &this->bss_set[i].ssid.el);
47755 +                                                    &this->bss_set[i].req.ssid.el);
47756                                 wl3501_mgmt_auth(this);
47757                         }
47758                 } else {
47759                         const int i = this->join_sta_bss;
47761 -                       memcpy(&this->bssid, &this->bss_set[i].bssid, ETH_ALEN);
47762 -                       this->chan = this->bss_set[i].ds_pset.chan;
47763 +                       memcpy(&this->bssid, &this->bss_set[i].req.bssid, ETH_ALEN);
47764 +                       this->chan = this->bss_set[i].req.ds_pset.chan;
47765                         iw_copy_mgmt_info_element(&this->keep_essid.el,
47766 -                                                 &this->bss_set[i].ssid.el);
47767 +                                                 &this->bss_set[i].req.ssid.el);
47768                         wl3501_online(dev);
47769                 }
47770         } else {
47771 @@ -980,7 +983,8 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev,
47772         } else {
47773                 skb->dev = dev;
47774                 skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */
47775 -               skb_copy_to_linear_data(skb, (unsigned char *)&sig.daddr, 12);
47776 +               skb_copy_to_linear_data(skb, (unsigned char *)&sig.addr,
47777 +                                       sizeof(sig.addr));
47778                 wl3501_receive(this, skb->data, pkt_len);
47779                 skb_put(skb, pkt_len);
47780                 skb->protocol   = eth_type_trans(skb, dev);
47781 @@ -1571,30 +1575,30 @@ static int wl3501_get_scan(struct net_device *dev, struct iw_request_info *info,
47782         for (i = 0; i < this->bss_cnt; ++i) {
47783                 iwe.cmd                 = SIOCGIWAP;
47784                 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
47785 -               memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].bssid, ETH_ALEN);
47786 +               memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].req.bssid, ETH_ALEN);
47787                 current_ev = iwe_stream_add_event(info, current_ev,
47788                                                   extra + IW_SCAN_MAX_DATA,
47789                                                   &iwe, IW_EV_ADDR_LEN);
47790                 iwe.cmd           = SIOCGIWESSID;
47791                 iwe.u.data.flags  = 1;
47792 -               iwe.u.data.length = this->bss_set[i].ssid.el.len;
47793 +               iwe.u.data.length = this->bss_set[i].req.ssid.el.len;
47794                 current_ev = iwe_stream_add_point(info, current_ev,
47795                                                   extra + IW_SCAN_MAX_DATA,
47796                                                   &iwe,
47797 -                                                 this->bss_set[i].ssid.essid);
47798 +                                                 this->bss_set[i].req.ssid.essid);
47799                 iwe.cmd    = SIOCGIWMODE;
47800 -               iwe.u.mode = this->bss_set[i].bss_type;
47801 +               iwe.u.mode = this->bss_set[i].req.bss_type;
47802                 current_ev = iwe_stream_add_event(info, current_ev,
47803                                                   extra + IW_SCAN_MAX_DATA,
47804                                                   &iwe, IW_EV_UINT_LEN);
47805                 iwe.cmd = SIOCGIWFREQ;
47806 -               iwe.u.freq.m = this->bss_set[i].ds_pset.chan;
47807 +               iwe.u.freq.m = this->bss_set[i].req.ds_pset.chan;
47808                 iwe.u.freq.e = 0;
47809                 current_ev = iwe_stream_add_event(info, current_ev,
47810                                                   extra + IW_SCAN_MAX_DATA,
47811                                                   &iwe, IW_EV_FREQ_LEN);
47812                 iwe.cmd = SIOCGIWENCODE;
47813 -               if (this->bss_set[i].cap_info & WL3501_MGMT_CAPABILITY_PRIVACY)
47814 +               if (this->bss_set[i].req.cap_info & WL3501_MGMT_CAPABILITY_PRIVACY)
47815                         iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
47816                 else
47817                         iwe.u.data.flags = IW_ENCODE_DISABLED;
47818 diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
47819 index f1469ac8ff42..3fe5b81eda2d 100644
47820 --- a/drivers/nfc/pn533/pn533.c
47821 +++ b/drivers/nfc/pn533/pn533.c
47822 @@ -706,6 +706,9 @@ static bool pn533_target_type_a_is_valid(struct pn533_target_type_a *type_a,
47823         if (PN533_TYPE_A_SEL_CASCADE(type_a->sel_res) != 0)
47824                 return false;
47826 +       if (type_a->nfcid_len > NFC_NFCID1_MAXSIZE)
47827 +               return false;
47829         return true;
47832 diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
47833 index 0896e21642be..091b2e77d39b 100644
47834 --- a/drivers/nvme/host/core.c
47835 +++ b/drivers/nvme/host/core.c
47836 @@ -2681,7 +2681,8 @@ static void nvme_set_latency_tolerance(struct device *dev, s32 val)
47838         if (ctrl->ps_max_latency_us != latency) {
47839                 ctrl->ps_max_latency_us = latency;
47840 -               nvme_configure_apst(ctrl);
47841 +               if (ctrl->state == NVME_CTRL_LIVE)
47842 +                       nvme_configure_apst(ctrl);
47843         }
47846 @@ -3189,7 +3190,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
47847                 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
47848         }
47850 -       ret = nvme_mpath_init(ctrl, id);
47851 +       ret = nvme_mpath_init_identify(ctrl, id);
47852         kfree(id);
47854         if (ret < 0)
47855 @@ -4579,6 +4580,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
47856                 min(default_ps_max_latency_us, (unsigned long)S32_MAX));
47858         nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
47859 +       nvme_mpath_init_ctrl(ctrl);
47861         return 0;
47862  out_free_name:
47863 diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
47864 index 6ffa8de2a0d7..5eee603bc249 100644
47865 --- a/drivers/nvme/host/fc.c
47866 +++ b/drivers/nvme/host/fc.c
47867 @@ -2460,6 +2460,18 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
47868  static void
47869  __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
47871 +       int q;
47873 +       /*
47874 +        * if aborting io, the queues are no longer good, mark them
47875 +        * all as not live.
47876 +        */
47877 +       if (ctrl->ctrl.queue_count > 1) {
47878 +               for (q = 1; q < ctrl->ctrl.queue_count; q++)
47879 +                       clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags);
47880 +       }
47881 +       clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
47883         /*
47884          * If io queues are present, stop them and terminate all outstanding
47885          * ios on them. As FC allocates FC exchange for each io, the
47886 diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
47887 index a1d476e1ac02..56852e6edd81 100644
47888 --- a/drivers/nvme/host/multipath.c
47889 +++ b/drivers/nvme/host/multipath.c
47890 @@ -668,6 +668,10 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
47891                 if (desc.state) {
47892                         /* found the group desc: update */
47893                         nvme_update_ns_ana_state(&desc, ns);
47894 +               } else {
47895 +                       /* group desc not found: trigger a re-read */
47896 +                       set_bit(NVME_NS_ANA_PENDING, &ns->flags);
47897 +                       queue_work(nvme_wq, &ns->ctrl->ana_work);
47898                 }
47899         } else {
47900                 ns->ana_state = NVME_ANA_OPTIMIZED; 
47901 @@ -705,9 +709,18 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
47902         put_disk(head->disk);
47905 -int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
47906 +void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
47908 -       int error;
47909 +       mutex_init(&ctrl->ana_lock);
47910 +       timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
47911 +       INIT_WORK(&ctrl->ana_work, nvme_ana_work);
47914 +int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
47916 +       size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT;
47917 +       size_t ana_log_size;
47918 +       int error = 0;
47920         /* check if multipath is enabled and we have the capability */
47921         if (!multipath || !ctrl->subsys ||
47922 @@ -719,37 +732,31 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
47923         ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
47924         ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
47926 -       mutex_init(&ctrl->ana_lock);
47927 -       timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
47928 -       ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
47929 -               ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
47930 -       ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
47932 -       if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
47933 +       ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
47934 +               ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) +
47935 +               ctrl->max_namespaces * sizeof(__le32);
47936 +       if (ana_log_size > max_transfer_size) {
47937                 dev_err(ctrl->device,
47938 -                       "ANA log page size (%zd) larger than MDTS (%d).\n",
47939 -                       ctrl->ana_log_size,
47940 -                       ctrl->max_hw_sectors << SECTOR_SHIFT);
47941 +                       "ANA log page size (%zd) larger than MDTS (%zd).\n",
47942 +                       ana_log_size, max_transfer_size);
47943                 dev_err(ctrl->device, "disabling ANA support.\n");
47944 -               return 0;
47945 +               goto out_uninit;
47946         }
47948 -       INIT_WORK(&ctrl->ana_work, nvme_ana_work);
47949 -       kfree(ctrl->ana_log_buf);
47950 -       ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
47951 -       if (!ctrl->ana_log_buf) {
47952 -               error = -ENOMEM;
47953 -               goto out;
47954 +       if (ana_log_size > ctrl->ana_log_size) {
47955 +               nvme_mpath_stop(ctrl);
47956 +               kfree(ctrl->ana_log_buf);
47957 +               ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL);
47958 +               if (!ctrl->ana_log_buf)
47959 +                       return -ENOMEM;
47960         }
47962 +       ctrl->ana_log_size = ana_log_size;
47963         error = nvme_read_ana_log(ctrl);
47964         if (error)
47965 -               goto out_free_ana_log_buf;
47966 +               goto out_uninit;
47967         return 0;
47968 -out_free_ana_log_buf:
47969 -       kfree(ctrl->ana_log_buf);
47970 -       ctrl->ana_log_buf = NULL;
47971 -out:
47973 +out_uninit:
47974 +       nvme_mpath_uninit(ctrl);
47975         return error;
47978 diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
47979 index 07b34175c6ce..447b0720aef5 100644
47980 --- a/drivers/nvme/host/nvme.h
47981 +++ b/drivers/nvme/host/nvme.h
47982 @@ -668,7 +668,8 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
47983  int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
47984  void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
47985  void nvme_mpath_remove_disk(struct nvme_ns_head *head);
47986 -int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
47987 +int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
47988 +void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
47989  void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
47990  void nvme_mpath_stop(struct nvme_ctrl *ctrl);
47991  bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
47992 @@ -742,7 +743,10 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
47993  static inline void nvme_trace_bio_complete(struct request *req)
47996 -static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
47997 +static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
48000 +static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
48001                 struct nvme_id_ctrl *id)
48003         if (ctrl->subsys->cmic & (1 << 3))
48004 diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
48005 index 7249ae74f71f..c92a15c3fbc5 100644
48006 --- a/drivers/nvme/host/pci.c
48007 +++ b/drivers/nvme/host/pci.c
48008 @@ -852,7 +852,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
48009                                 return nvme_setup_prp_simple(dev, req,
48010                                                              &cmnd->rw, &bv);
48012 -                       if (iod->nvmeq->qid &&
48013 +                       if (iod->nvmeq->qid && sgl_threshold &&
48014                             dev->ctrl.sgls & ((1 << 0) | (1 << 1)))
48015                                 return nvme_setup_sgl_simple(dev, req,
48016                                                              &cmnd->rw, &bv);
48017 diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
48018 index a0f00cb8f9f3..8c2ae6284c3b 100644
48019 --- a/drivers/nvme/host/tcp.c
48020 +++ b/drivers/nvme/host/tcp.c
48021 @@ -874,7 +874,7 @@ static void nvme_tcp_state_change(struct sock *sk)
48023         struct nvme_tcp_queue *queue;
48025 -       read_lock(&sk->sk_callback_lock);
48026 +       read_lock_bh(&sk->sk_callback_lock);
48027         queue = sk->sk_user_data;
48028         if (!queue)
48029                 goto done;
48030 @@ -895,7 +895,7 @@ static void nvme_tcp_state_change(struct sock *sk)
48032         queue->state_change(sk);
48033  done:
48034 -       read_unlock(&sk->sk_callback_lock);
48035 +       read_unlock_bh(&sk->sk_callback_lock);
48038  static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
48039 @@ -940,7 +940,6 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
48040                 if (ret <= 0)
48041                         return ret;
48043 -               nvme_tcp_advance_req(req, ret);
48044                 if (queue->data_digest)
48045                         nvme_tcp_ddgst_update(queue->snd_hash, page,
48046                                         offset, ret);
48047 @@ -957,6 +956,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
48048                         }
48049                         return 1;
48050                 }
48051 +               nvme_tcp_advance_req(req, ret);
48052         }
48053         return -EAGAIN;
48055 @@ -1137,7 +1137,8 @@ static void nvme_tcp_io_work(struct work_struct *w)
48056                                 pending = true;
48057                         else if (unlikely(result < 0))
48058                                 break;
48059 -               }
48060 +               } else
48061 +                       pending = !llist_empty(&queue->req_list);
48063                 result = nvme_tcp_try_recv(queue);
48064                 if (result > 0)
48065 diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
48066 index fe6b8aa90b53..5a1ab49908c3 100644
48067 --- a/drivers/nvme/target/admin-cmd.c
48068 +++ b/drivers/nvme/target/admin-cmd.c
48069 @@ -307,7 +307,7 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
48070         case NVME_LOG_ANA:
48071                 return nvmet_execute_get_log_page_ana(req);
48072         }
48073 -       pr_err("unhandled lid %d on qid %d\n",
48074 +       pr_debug("unhandled lid %d on qid %d\n",
48075                req->cmd->get_log_page.lid, req->sq->qid);
48076         req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
48077         nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
48078 @@ -659,7 +659,7 @@ static void nvmet_execute_identify(struct nvmet_req *req)
48079                 return nvmet_execute_identify_desclist(req);
48080         }
48082 -       pr_err("unhandled identify cns %d on qid %d\n",
48083 +       pr_debug("unhandled identify cns %d on qid %d\n",
48084                req->cmd->identify.cns, req->sq->qid);
48085         req->error_loc = offsetof(struct nvme_identify, cns);
48086         nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
48087 @@ -919,15 +919,21 @@ void nvmet_execute_async_event(struct nvmet_req *req)
48088  void nvmet_execute_keep_alive(struct nvmet_req *req)
48090         struct nvmet_ctrl *ctrl = req->sq->ctrl;
48091 +       u16 status = 0;
48093         if (!nvmet_check_transfer_len(req, 0))
48094                 return;
48096 +       if (!ctrl->kato) {
48097 +               status = NVME_SC_KA_TIMEOUT_INVALID;
48098 +               goto out;
48099 +       }
48101         pr_debug("ctrl %d update keep-alive timer for %d secs\n",
48102                 ctrl->cntlid, ctrl->kato);
48104         mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
48105 -       nvmet_req_complete(req, 0);
48106 +out:
48107 +       nvmet_req_complete(req, status);
48110  u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
48111 @@ -971,7 +977,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
48112                 return 0;
48113         }
48115 -       pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
48116 +       pr_debug("unhandled cmd %d on qid %d\n", cmd->common.opcode,
48117                req->sq->qid);
48118         req->error_loc = offsetof(struct nvme_common_command, opcode);
48119         return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
48120 diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
48121 index a027433b8be8..348057fdc568 100644
48122 --- a/drivers/nvme/target/core.c
48123 +++ b/drivers/nvme/target/core.c
48124 @@ -1371,7 +1371,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
48125                 goto out_free_changed_ns_list;
48127         if (subsys->cntlid_min > subsys->cntlid_max)
48128 -               goto out_free_changed_ns_list;
48129 +               goto out_free_sqs;
48131         ret = ida_simple_get(&cntlid_ida,
48132                              subsys->cntlid_min, subsys->cntlid_max,
48133 diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
48134 index 682854e0e079..4845d12e374a 100644
48135 --- a/drivers/nvme/target/discovery.c
48136 +++ b/drivers/nvme/target/discovery.c
48137 @@ -178,12 +178,14 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
48138         if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
48139                 req->error_loc =
48140                         offsetof(struct nvme_get_log_page_command, lid);
48141 -               status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
48142 +               status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
48143                 goto out;
48144         }
48146         /* Spec requires dword aligned offsets */
48147         if (offset & 0x3) {
48148 +               req->error_loc =
48149 +                       offsetof(struct nvme_get_log_page_command, lpo);
48150                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
48151                 goto out;
48152         }
48153 @@ -250,7 +252,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
48155         if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
48156                 req->error_loc = offsetof(struct nvme_identify, cns);
48157 -               status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
48158 +               status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
48159                 goto out;
48160         }
48162 diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
48163 index 9a8b3726a37c..429263ca9b97 100644
48164 --- a/drivers/nvme/target/io-cmd-bdev.c
48165 +++ b/drivers/nvme/target/io-cmd-bdev.c
48166 @@ -258,7 +258,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
48168         sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
48170 -       if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
48171 +       if (nvmet_use_inline_bvec(req)) {
48172                 bio = &req->b.inline_bio;
48173                 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
48174         } else {
48175 diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
48176 index 715d4376c997..7fdbdc496597 100644
48177 --- a/drivers/nvme/target/io-cmd-file.c
48178 +++ b/drivers/nvme/target/io-cmd-file.c
48179 @@ -49,9 +49,11 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
48181         ns->file = filp_open(ns->device_path, flags, 0);
48182         if (IS_ERR(ns->file)) {
48183 -               pr_err("failed to open file %s: (%ld)\n",
48184 -                               ns->device_path, PTR_ERR(ns->file));
48185 -               return PTR_ERR(ns->file);
48186 +               ret = PTR_ERR(ns->file);
48187 +               pr_err("failed to open file %s: (%d)\n",
48188 +                       ns->device_path, ret);
48189 +               ns->file = NULL;
48190 +               return ret;
48191         }
48193         ret = nvmet_file_ns_revalidate(ns);
48194 diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
48195 index 3e189e753bcf..14913a4588ec 100644
48196 --- a/drivers/nvme/target/loop.c
48197 +++ b/drivers/nvme/target/loop.c
48198 @@ -588,8 +588,10 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
48200         ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
48201                                 0 /* no quirks, we're perfect! */);
48202 -       if (ret)
48203 +       if (ret) {
48204 +               kfree(ctrl);
48205                 goto out;
48206 +       }
48208         if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
48209                 WARN_ON_ONCE(1);
48210 diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
48211 index 4b84edb49f22..5aad34b106dc 100644
48212 --- a/drivers/nvme/target/nvmet.h
48213 +++ b/drivers/nvme/target/nvmet.h
48214 @@ -614,4 +614,10 @@ static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
48215         return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
48218 +static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
48220 +       return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
48221 +              req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
48224  #endif /* _NVMET_H */
48225 diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
48226 index 2798944899b7..39b1473f7204 100644
48227 --- a/drivers/nvme/target/passthru.c
48228 +++ b/drivers/nvme/target/passthru.c
48229 @@ -194,7 +194,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
48230         if (req->sg_cnt > BIO_MAX_VECS)
48231                 return -EINVAL;
48233 -       if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
48234 +       if (nvmet_use_inline_bvec(req)) {
48235                 bio = &req->p.inline_bio;
48236                 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
48237         } else {
48238 diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
48239 index 6c1f3ab7649c..7d607f435e36 100644
48240 --- a/drivers/nvme/target/rdma.c
48241 +++ b/drivers/nvme/target/rdma.c
48242 @@ -700,7 +700,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
48244         struct nvmet_rdma_rsp *rsp =
48245                 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
48246 -       struct nvmet_rdma_queue *queue = cq->cq_context;
48247 +       struct nvmet_rdma_queue *queue = wc->qp->qp_context;
48249         nvmet_rdma_release_rsp(rsp);
48251 @@ -786,7 +786,7 @@ static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc)
48253         struct nvmet_rdma_rsp *rsp =
48254                 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe);
48255 -       struct nvmet_rdma_queue *queue = cq->cq_context;
48256 +       struct nvmet_rdma_queue *queue = wc->qp->qp_context;
48257         struct rdma_cm_id *cm_id = rsp->queue->cm_id;
48258         u16 status;
48260 diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
48261 index d658c6e8263a..d958b5da9b88 100644
48262 --- a/drivers/nvme/target/tcp.c
48263 +++ b/drivers/nvme/target/tcp.c
48264 @@ -525,11 +525,36 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
48265         struct nvmet_tcp_cmd *cmd =
48266                 container_of(req, struct nvmet_tcp_cmd, req);
48267         struct nvmet_tcp_queue  *queue = cmd->queue;
48268 +       struct nvme_sgl_desc *sgl;
48269 +       u32 len;
48271 +       if (unlikely(cmd == queue->cmd)) {
48272 +               sgl = &cmd->req.cmd->common.dptr.sgl;
48273 +               len = le32_to_cpu(sgl->length);
48275 +               /*
48276 +                * Wait for inline data before processing the response.
48277 +                * Avoid using helpers, this might happen before
48278 +                * nvmet_req_init is completed.
48279 +                */
48280 +               if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
48281 +                   len && len < cmd->req.port->inline_data_size &&
48282 +                   nvme_is_write(cmd->req.cmd))
48283 +                       return;
48284 +       }
48286         llist_add(&cmd->lentry, &queue->resp_list);
48287         queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
48290 +static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
48292 +       if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
48293 +               nvmet_tcp_queue_response(&cmd->req);
48294 +       else
48295 +               cmd->req.execute(&cmd->req);
48298  static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
48300         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
48301 @@ -961,7 +986,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
48302                         le32_to_cpu(req->cmd->common.dptr.sgl.length));
48304                 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
48305 -               return -EAGAIN;
48306 +               return 0;
48307         }
48309         ret = nvmet_tcp_map_data(queue->cmd);
48310 @@ -1104,10 +1129,8 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
48311                 return 0;
48312         }
48314 -       if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
48315 -           cmd->rbytes_done == cmd->req.transfer_len) {
48316 -               cmd->req.execute(&cmd->req);
48317 -       }
48318 +       if (cmd->rbytes_done == cmd->req.transfer_len)
48319 +               nvmet_tcp_execute_request(cmd);
48321         nvmet_prepare_receive_pdu(queue);
48322         return 0;
48323 @@ -1144,9 +1167,9 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
48324                 goto out;
48325         }
48327 -       if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
48328 -           cmd->rbytes_done == cmd->req.transfer_len)
48329 -               cmd->req.execute(&cmd->req);
48330 +       if (cmd->rbytes_done == cmd->req.transfer_len)
48331 +               nvmet_tcp_execute_request(cmd);
48333         ret = 0;
48334  out:
48335         nvmet_prepare_receive_pdu(queue);
48336 @@ -1434,7 +1457,7 @@ static void nvmet_tcp_state_change(struct sock *sk)
48338         struct nvmet_tcp_queue *queue;
48340 -       write_lock_bh(&sk->sk_callback_lock);
48341 +       read_lock_bh(&sk->sk_callback_lock);
48342         queue = sk->sk_user_data;
48343         if (!queue)
48344                 goto done;
48345 @@ -1452,7 +1475,7 @@ static void nvmet_tcp_state_change(struct sock *sk)
48346                         queue->idx, sk->sk_state);
48347         }
48348  done:
48349 -       write_unlock_bh(&sk->sk_callback_lock);
48350 +       read_unlock_bh(&sk->sk_callback_lock);
48353  static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
48354 diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
48355 index 75d2594c16e1..267a0d9e99ba 100644
48356 --- a/drivers/nvmem/Kconfig
48357 +++ b/drivers/nvmem/Kconfig
48358 @@ -272,6 +272,7 @@ config SPRD_EFUSE
48360  config NVMEM_RMEM
48361         tristate "Reserved Memory Based Driver Support"
48362 +       depends on HAS_IOMEM
48363         help
48364           This driver maps reserved memory into an nvmem device. It might be
48365           useful to expose information left by firmware in memory.
48366 diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
48367 index 6cace24dfbf7..100d69d8f2e1 100644
48368 --- a/drivers/nvmem/qfprom.c
48369 +++ b/drivers/nvmem/qfprom.c
48370 @@ -127,6 +127,16 @@ static void qfprom_disable_fuse_blowing(const struct qfprom_priv *priv,
48372         int ret;
48374 +       /*
48375 +        * This may be a shared rail and may be able to run at a lower rate
48376 +        * when we're not blowing fuses.  At the moment, the regulator framework
48377 +        * applies voltage constraints even on disabled rails, so remove our
48378 +        * constraints and allow the rail to be adjusted by other users.
48379 +        */
48380 +       ret = regulator_set_voltage(priv->vcc, 0, INT_MAX);
48381 +       if (ret)
48382 +               dev_warn(priv->dev, "Failed to set 0 voltage (ignoring)\n");
48384         ret = regulator_disable(priv->vcc);
48385         if (ret)
48386                 dev_warn(priv->dev, "Failed to disable regulator (ignoring)\n");
48387 @@ -172,6 +182,17 @@ static int qfprom_enable_fuse_blowing(const struct qfprom_priv *priv,
48388                 goto err_clk_prepared;
48389         }
48391 +       /*
48392 +        * Hardware requires 1.8V min for fuse blowing; this may be
48393 +        * a rail shared do don't specify a max--regulator constraints
48394 +        * will handle.
48395 +        */
48396 +       ret = regulator_set_voltage(priv->vcc, 1800000, INT_MAX);
48397 +       if (ret) {
48398 +               dev_err(priv->dev, "Failed to set 1.8 voltage\n");
48399 +               goto err_clk_rate_set;
48400 +       }
48402         ret = regulator_enable(priv->vcc);
48403         if (ret) {
48404                 dev_err(priv->dev, "Failed to enable regulator\n");
48405 diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
48406 index 23effe5e50ec..2d132949572d 100644
48407 --- a/drivers/of/overlay.c
48408 +++ b/drivers/of/overlay.c
48409 @@ -796,6 +796,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
48410                 if (!fragment->target) {
48411                         of_node_put(fragment->overlay);
48412                         ret = -EINVAL;
48413 +                       of_node_put(node);
48414                         goto err_free_fragments;
48415                 }
48417 diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
48418 index 4547ac44c8d4..8fa1a7fdf12c 100644
48419 --- a/drivers/parport/ieee1284.c
48420 +++ b/drivers/parport/ieee1284.c
48421 @@ -202,7 +202,7 @@ int parport_wait_peripheral(struct parport *port,
48422                         /* parport_wait_event didn't time out, but the
48423                          * peripheral wasn't actually ready either.
48424                          * Wait for another 10ms. */
48425 -                       schedule_timeout_interruptible(msecs_to_jiffies(10));
48426 +                       schedule_msec_hrtimeout_interruptible((10));
48427                 }
48428         }
48430 diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
48431 index 2c11bd3fe1fd..8cb6b61c0880 100644
48432 --- a/drivers/parport/ieee1284_ops.c
48433 +++ b/drivers/parport/ieee1284_ops.c
48434 @@ -520,7 +520,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
48435                         /* Yield the port for a while. */
48436                         if (count && dev->port->irq != PARPORT_IRQ_NONE) {
48437                                 parport_release (dev);
48438 -                               schedule_timeout_interruptible(msecs_to_jiffies(40));
48439 +                               schedule_msec_hrtimeout_interruptible((40));
48440                                 parport_claim_or_block (dev);
48441                         }
48442                         else
48443 diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
48444 index 53aa35cb3a49..a59ecbec601f 100644
48445 --- a/drivers/pci/controller/dwc/pci-keystone.c
48446 +++ b/drivers/pci/controller/dwc/pci-keystone.c
48447 @@ -798,7 +798,8 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
48448         int ret;
48450         pp->bridge->ops = &ks_pcie_ops;
48451 -       pp->bridge->child_ops = &ks_child_pcie_ops;
48452 +       if (!ks_pcie->is_am6)
48453 +               pp->bridge->child_ops = &ks_child_pcie_ops;
48455         ret = ks_pcie_config_legacy_irq(ks_pcie);
48456         if (ret)
48457 diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
48458 index 1c25d8337151..8d028a88b375 100644
48459 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c
48460 +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
48461 @@ -705,6 +705,8 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
48462                 }
48463         }
48465 +       dw_pcie_iatu_detect(pci);
48467         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
48468         if (!res)
48469                 return -EINVAL;
48470 diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
48471 index 7e55b2b66182..24192b40e3a2 100644
48472 --- a/drivers/pci/controller/dwc/pcie-designware-host.c
48473 +++ b/drivers/pci/controller/dwc/pcie-designware-host.c
48474 @@ -398,6 +398,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
48475                 if (ret)
48476                         goto err_free_msi;
48477         }
48478 +       dw_pcie_iatu_detect(pci);
48480         dw_pcie_setup_rc(pp);
48481         dw_pcie_msi_init(pp);
48482 diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
48483 index 004cb860e266..a945f0c0e73d 100644
48484 --- a/drivers/pci/controller/dwc/pcie-designware.c
48485 +++ b/drivers/pci/controller/dwc/pcie-designware.c
48486 @@ -660,11 +660,9 @@ static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
48487         pci->num_ob_windows = ob;
48490 -void dw_pcie_setup(struct dw_pcie *pci)
48491 +void dw_pcie_iatu_detect(struct dw_pcie *pci)
48493 -       u32 val;
48494         struct device *dev = pci->dev;
48495 -       struct device_node *np = dev->of_node;
48496         struct platform_device *pdev = to_platform_device(dev);
48498         if (pci->version >= 0x480A || (!pci->version &&
48499 @@ -693,6 +691,13 @@ void dw_pcie_setup(struct dw_pcie *pci)
48501         dev_info(pci->dev, "Detected iATU regions: %u outbound, %u inbound",
48502                  pci->num_ob_windows, pci->num_ib_windows);
48505 +void dw_pcie_setup(struct dw_pcie *pci)
48507 +       u32 val;
48508 +       struct device *dev = pci->dev;
48509 +       struct device_node *np = dev->of_node;
48511         if (pci->link_gen > 0)
48512                 dw_pcie_link_set_max_speed(pci, pci->link_gen);
48513 diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
48514 index 7247c8b01f04..7d6e9b7576be 100644
48515 --- a/drivers/pci/controller/dwc/pcie-designware.h
48516 +++ b/drivers/pci/controller/dwc/pcie-designware.h
48517 @@ -306,6 +306,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
48518  void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
48519                          enum dw_pcie_region_type type);
48520  void dw_pcie_setup(struct dw_pcie *pci);
48521 +void dw_pcie_iatu_detect(struct dw_pcie *pci);
48523  static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
48525 diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
48526 index 6fa216e52d14..0e94190ca4e8 100644
48527 --- a/drivers/pci/controller/dwc/pcie-tegra194.c
48528 +++ b/drivers/pci/controller/dwc/pcie-tegra194.c
48529 @@ -1645,7 +1645,7 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
48530         if (pcie->ep_state == EP_STATE_ENABLED)
48531                 return;
48533 -       ret = pm_runtime_get_sync(dev);
48534 +       ret = pm_runtime_resume_and_get(dev);
48535         if (ret < 0) {
48536                 dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
48537                         ret);
48538 diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c
48539 index f964fd26f7e0..ffd84656544f 100644
48540 --- a/drivers/pci/controller/pci-thunder-ecam.c
48541 +++ b/drivers/pci/controller/pci-thunder-ecam.c
48542 @@ -116,7 +116,7 @@ static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn,
48543          * the config space access window.  Since we are working with
48544          * the high-order 32 bits, shift everything down by 32 bits.
48545          */
48546 -       node_bits = (cfg->res.start >> 32) & (1 << 12);
48547 +       node_bits = upper_32_bits(cfg->res.start) & (1 << 12);
48549         v |= node_bits;
48550         set_val(v, where, size, val);
48551 diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c
48552 index 1a3f70ac61fc..0660b9da204f 100644
48553 --- a/drivers/pci/controller/pci-thunder-pem.c
48554 +++ b/drivers/pci/controller/pci-thunder-pem.c
48555 @@ -12,6 +12,7 @@
48556  #include <linux/pci-acpi.h>
48557  #include <linux/pci-ecam.h>
48558  #include <linux/platform_device.h>
48559 +#include <linux/io-64-nonatomic-lo-hi.h>
48560  #include "../pci.h"
48562  #if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
48563 @@ -324,9 +325,9 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
48564          * structure here for the BAR.
48565          */
48566         bar4_start = res_pem->start + 0xf00000;
48567 -       pem_pci->ea_entry[0] = (u32)bar4_start | 2;
48568 -       pem_pci->ea_entry[1] = (u32)(res_pem->end - bar4_start) & ~3u;
48569 -       pem_pci->ea_entry[2] = (u32)(bar4_start >> 32);
48570 +       pem_pci->ea_entry[0] = lower_32_bits(bar4_start) | 2;
48571 +       pem_pci->ea_entry[1] = lower_32_bits(res_pem->end - bar4_start) & ~3u;
48572 +       pem_pci->ea_entry[2] = upper_32_bits(bar4_start);
48574         cfg->priv = pem_pci;
48575         return 0;
48576 @@ -334,9 +335,9 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
48578  #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
48580 -#define PEM_RES_BASE           0x87e0c0000000UL
48581 -#define PEM_NODE_MASK          GENMASK(45, 44)
48582 -#define PEM_INDX_MASK          GENMASK(26, 24)
48583 +#define PEM_RES_BASE           0x87e0c0000000ULL
48584 +#define PEM_NODE_MASK          GENMASK_ULL(45, 44)
48585 +#define PEM_INDX_MASK          GENMASK_ULL(26, 24)
48586  #define PEM_MIN_DOM_IN_NODE    4
48587  #define PEM_MAX_DOM_IN_NODE    10
48589 diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
48590 index 2afdc865253e..7f503dd4ff81 100644
48591 --- a/drivers/pci/controller/pci-xgene.c
48592 +++ b/drivers/pci/controller/pci-xgene.c
48593 @@ -354,7 +354,8 @@ static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
48594         if (IS_ERR(port->csr_base))
48595                 return PTR_ERR(port->csr_base);
48597 -       port->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
48598 +       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
48599 +       port->cfg_base = devm_ioremap_resource(dev, res);
48600         if (IS_ERR(port->cfg_base))
48601                 return PTR_ERR(port->cfg_base);
48602         port->cfg_addr = res->start;
48603 diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
48604 index e330e6811f0b..08bc788d9422 100644
48605 --- a/drivers/pci/controller/pcie-brcmstb.c
48606 +++ b/drivers/pci/controller/pcie-brcmstb.c
48607 @@ -1148,6 +1148,7 @@ static int brcm_pcie_suspend(struct device *dev)
48609         brcm_pcie_turn_off(pcie);
48610         ret = brcm_phy_stop(pcie);
48611 +       reset_control_rearm(pcie->rescal);
48612         clk_disable_unprepare(pcie->clk);
48614         return ret;
48615 @@ -1163,9 +1164,13 @@ static int brcm_pcie_resume(struct device *dev)
48616         base = pcie->base;
48617         clk_prepare_enable(pcie->clk);
48619 +       ret = reset_control_reset(pcie->rescal);
48620 +       if (ret)
48621 +               goto err_disable_clk;
48623         ret = brcm_phy_start(pcie);
48624         if (ret)
48625 -               goto err;
48626 +               goto err_reset;
48628         /* Take bridge out of reset so we can access the SERDES reg */
48629         pcie->bridge_sw_init_set(pcie, 0);
48630 @@ -1180,14 +1185,16 @@ static int brcm_pcie_resume(struct device *dev)
48632         ret = brcm_pcie_setup(pcie);
48633         if (ret)
48634 -               goto err;
48635 +               goto err_reset;
48637         if (pcie->msi)
48638                 brcm_msi_set_regs(pcie->msi);
48640         return 0;
48642 -err:
48643 +err_reset:
48644 +       reset_control_rearm(pcie->rescal);
48645 +err_disable_clk:
48646         clk_disable_unprepare(pcie->clk);
48647         return ret;
48649 @@ -1197,7 +1204,7 @@ static void __brcm_pcie_remove(struct brcm_pcie *pcie)
48650         brcm_msi_remove(pcie);
48651         brcm_pcie_turn_off(pcie);
48652         brcm_phy_stop(pcie);
48653 -       reset_control_assert(pcie->rescal);
48654 +       reset_control_rearm(pcie->rescal);
48655         clk_disable_unprepare(pcie->clk);
48658 @@ -1278,13 +1285,13 @@ static int brcm_pcie_probe(struct platform_device *pdev)
48659                 return PTR_ERR(pcie->perst_reset);
48660         }
48662 -       ret = reset_control_deassert(pcie->rescal);
48663 +       ret = reset_control_reset(pcie->rescal);
48664         if (ret)
48665                 dev_err(&pdev->dev, "failed to deassert 'rescal'\n");
48667         ret = brcm_phy_start(pcie);
48668         if (ret) {
48669 -               reset_control_assert(pcie->rescal);
48670 +               reset_control_rearm(pcie->rescal);
48671                 clk_disable_unprepare(pcie->clk);
48672                 return ret;
48673         }
48674 @@ -1296,6 +1303,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
48675         pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
48676         if (pcie->type == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
48677                 dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n");
48678 +               ret = -ENODEV;
48679                 goto fail;
48680         }
48682 diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
48683 index 908475d27e0e..eede4e8f3f75 100644
48684 --- a/drivers/pci/controller/pcie-iproc-msi.c
48685 +++ b/drivers/pci/controller/pcie-iproc-msi.c
48686 @@ -271,7 +271,7 @@ static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
48687                                     NULL, NULL);
48688         }
48690 -       return hwirq;
48691 +       return 0;
48694  static void iproc_msi_irq_domain_free(struct irq_domain *domain,
48695 diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
48696 index c0ac4e9cbe72..f9760e73d568 100644
48697 --- a/drivers/pci/endpoint/functions/pci-epf-test.c
48698 +++ b/drivers/pci/endpoint/functions/pci-epf-test.c
48699 @@ -833,15 +833,18 @@ static int pci_epf_test_bind(struct pci_epf *epf)
48700                 return -EINVAL;
48702         epc_features = pci_epc_get_features(epc, epf->func_no);
48703 -       if (epc_features) {
48704 -               linkup_notifier = epc_features->linkup_notifier;
48705 -               core_init_notifier = epc_features->core_init_notifier;
48706 -               test_reg_bar = pci_epc_get_first_free_bar(epc_features);
48707 -               if (test_reg_bar < 0)
48708 -                       return -EINVAL;
48709 -               pci_epf_configure_bar(epf, epc_features);
48710 +       if (!epc_features) {
48711 +               dev_err(&epf->dev, "epc_features not implemented\n");
48712 +               return -EOPNOTSUPP;
48713         }
48715 +       linkup_notifier = epc_features->linkup_notifier;
48716 +       core_init_notifier = epc_features->core_init_notifier;
48717 +       test_reg_bar = pci_epc_get_first_free_bar(epc_features);
48718 +       if (test_reg_bar < 0)
48719 +               return -EINVAL;
48720 +       pci_epf_configure_bar(epf, epc_features);
48722         epf_test->test_reg_bar = test_reg_bar;
48723         epf_test->epc_features = epc_features;
48725 @@ -922,6 +925,7 @@ static int __init pci_epf_test_init(void)
48727         ret = pci_epf_register_driver(&test_driver);
48728         if (ret) {
48729 +               destroy_workqueue(kpcitest_workqueue);
48730                 pr_err("Failed to register pci epf test driver --> %d\n", ret);
48731                 return ret;
48732         }
48733 @@ -932,6 +936,8 @@ module_init(pci_epf_test_init);
48735  static void __exit pci_epf_test_exit(void)
48737 +       if (kpcitest_workqueue)
48738 +               destroy_workqueue(kpcitest_workqueue);
48739         pci_epf_unregister_driver(&test_driver);
48741  module_exit(pci_epf_test_exit);
48742 diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
48743 index 3365c93abf0e..f031302ad401 100644
48744 --- a/drivers/pci/hotplug/acpiphp_glue.c
48745 +++ b/drivers/pci/hotplug/acpiphp_glue.c
48746 @@ -533,6 +533,7 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
48747                         slot->flags &= ~SLOT_ENABLED;
48748                         continue;
48749                 }
48750 +               pci_dev_put(dev);
48751         }
48754 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
48755 index 16a17215f633..e4d4e399004b 100644
48756 --- a/drivers/pci/pci.c
48757 +++ b/drivers/pci/pci.c
48758 @@ -1870,20 +1870,10 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
48759         int err;
48760         int i, bars = 0;
48762 -       /*
48763 -        * Power state could be unknown at this point, either due to a fresh
48764 -        * boot or a device removal call.  So get the current power state
48765 -        * so that things like MSI message writing will behave as expected
48766 -        * (e.g. if the device really is in D0 at enable time).
48767 -        */
48768 -       if (dev->pm_cap) {
48769 -               u16 pmcsr;
48770 -               pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
48771 -               dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
48772 -       }
48774 -       if (atomic_inc_return(&dev->enable_cnt) > 1)
48775 +       if (atomic_inc_return(&dev->enable_cnt) > 1) {
48776 +               pci_update_current_state(dev, dev->current_state);
48777                 return 0;               /* already enabled */
48778 +       }
48780         bridge = pci_upstream_bridge(dev);
48781         if (bridge)
48782 diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
48783 index ef7c4661314f..9684b468267f 100644
48784 --- a/drivers/pci/pci.h
48785 +++ b/drivers/pci/pci.h
48786 @@ -624,6 +624,12 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
48787  #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
48788  int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
48789                           struct resource *res);
48790 +#else
48791 +static inline int acpi_get_rc_resources(struct device *dev, const char *hid,
48792 +                                       u16 segment, struct resource *res)
48794 +       return -ENODEV;
48796  #endif
48798  int pci_rebar_get_current_size(struct pci_dev *pdev, int bar);
48799 diff --git a/drivers/pci/pcie/rcec.c b/drivers/pci/pcie/rcec.c
48800 index 2c5c552994e4..d0bcd141ac9c 100644
48801 --- a/drivers/pci/pcie/rcec.c
48802 +++ b/drivers/pci/pcie/rcec.c
48803 @@ -32,7 +32,7 @@ static bool rcec_assoc_rciep(struct pci_dev *rcec, struct pci_dev *rciep)
48805         /* Same bus, so check bitmap */
48806         for_each_set_bit(devn, &bitmap, 32)
48807 -               if (devn == rciep->devfn)
48808 +               if (devn == PCI_SLOT(rciep->devfn))
48809                         return true;
48811         return false;
48812 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
48813 index 953f15abc850..be51670572fa 100644
48814 --- a/drivers/pci/probe.c
48815 +++ b/drivers/pci/probe.c
48816 @@ -2353,6 +2353,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
48817         pci_set_of_node(dev);
48819         if (pci_setup_device(dev)) {
48820 +               pci_release_of_node(dev);
48821                 pci_bus_put(dev->bus);
48822                 kfree(dev);
48823                 return NULL;
48824 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
48825 index 653660e3ba9e..c87fd7a275e4 100644
48826 --- a/drivers/pci/quirks.c
48827 +++ b/drivers/pci/quirks.c
48828 @@ -3558,6 +3558,106 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
48829         dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
48832 +static bool acs_on_downstream;
48833 +static bool acs_on_multifunction;
48835 +#define NUM_ACS_IDS 16
48836 +struct acs_on_id {
48837 +       unsigned short vendor;
48838 +       unsigned short device;
48840 +static struct acs_on_id acs_on_ids[NUM_ACS_IDS];
48841 +static u8 max_acs_id;
48843 +static __init int pcie_acs_override_setup(char *p)
48845 +       if (!p)
48846 +               return -EINVAL;
48848 +       while (*p) {
48849 +               if (!strncmp(p, "downstream", 10))
48850 +                       acs_on_downstream = true;
48851 +               if (!strncmp(p, "multifunction", 13))
48852 +                       acs_on_multifunction = true;
48853 +               if (!strncmp(p, "id:", 3)) {
48854 +                       char opt[5];
48855 +                       int ret;
48856 +                       long val;
48858 +                       if (max_acs_id >= NUM_ACS_IDS - 1) {
48859 +                               pr_warn("Out of PCIe ACS override slots (%d)\n",
48860 +                                               NUM_ACS_IDS);
48861 +                               goto next;
48862 +                       }
48864 +                       p += 3;
48865 +                       snprintf(opt, 5, "%s", p);
48866 +                       ret = kstrtol(opt, 16, &val);
48867 +                       if (ret) {
48868 +                               pr_warn("PCIe ACS ID parse error %d\n", ret);
48869 +                               goto next;
48870 +                       }
48871 +                       acs_on_ids[max_acs_id].vendor = val;
48873 +                       p += strcspn(p, ":");
48874 +                       if (*p != ':') {
48875 +                               pr_warn("PCIe ACS invalid ID\n");
48876 +                               goto next;
48877 +                       }
48879 +                       p++;
48880 +                       snprintf(opt, 5, "%s", p);
48881 +                       ret = kstrtol(opt, 16, &val);
48882 +                       if (ret) {
48883 +                               pr_warn("PCIe ACS ID parse error %d\n", ret);
48884 +                               goto next;
48885 +                       }
48886 +                       acs_on_ids[max_acs_id].device = val;
48887 +                       max_acs_id++;
48888 +               }
48889 +next:
48890 +               p += strcspn(p, ",");
48891 +               if (*p == ',')
48892 +                       p++;
48893 +       }
48895 +       if (acs_on_downstream || acs_on_multifunction || max_acs_id)
48896 +               pr_warn("Warning: PCIe ACS overrides enabled; This may allow non-IOMMU protected peer-to-peer DMA\n");
48898 +       return 0;
48900 +early_param("pcie_acs_override", pcie_acs_override_setup);
48902 +static int pcie_acs_overrides(struct pci_dev *dev, u16 acs_flags)
48904 +       int i;
48906 +       /* Never override ACS for legacy devices or devices with ACS caps */
48907 +       if (!pci_is_pcie(dev) ||
48908 +               pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS))
48909 +                       return -ENOTTY;
48911 +       for (i = 0; i < max_acs_id; i++)
48912 +               if (acs_on_ids[i].vendor == dev->vendor &&
48913 +                       acs_on_ids[i].device == dev->device)
48914 +                               return 1;
48916 +       switch (pci_pcie_type(dev)) {
48917 +       case PCI_EXP_TYPE_DOWNSTREAM:
48918 +       case PCI_EXP_TYPE_ROOT_PORT:
48919 +               if (acs_on_downstream)
48920 +                       return 1;
48921 +               break;
48922 +       case PCI_EXP_TYPE_ENDPOINT:
48923 +       case PCI_EXP_TYPE_UPSTREAM:
48924 +       case PCI_EXP_TYPE_LEG_END:
48925 +       case PCI_EXP_TYPE_RC_END:
48926 +               if (acs_on_multifunction && dev->multifunction)
48927 +                       return 1;
48928 +       }
48930 +       return -ENOTTY;
48932  /*
48933   * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
48934   * The device will throw a Link Down error on AER-capable systems and
48935 @@ -4773,6 +4873,7 @@ static const struct pci_dev_acs_enabled {
48936         { PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
48937         /* Zhaoxin Root/Downstream Ports */
48938         { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
48939 +       { PCI_ANY_ID, PCI_ANY_ID, pcie_acs_overrides },
48940         { 0 }
48941  };
48943 diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
48944 index 7915d10f9aa1..bd549070c011 100644
48945 --- a/drivers/pci/vpd.c
48946 +++ b/drivers/pci/vpd.c
48947 @@ -570,7 +570,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
48948  DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
48949  DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
48950                 quirk_blacklist_vpd);
48951 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
48952  /*
48953   * The Amazon Annapurna Labs 0x0031 device id is reused for other non Root Port
48954   * device types, so the quirk is registered for the PCI_CLASS_BRIDGE_PCI class.
48955 diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
48956 index 933bd8410fc2..ef9676418c9f 100644
48957 --- a/drivers/perf/arm_pmu_platform.c
48958 +++ b/drivers/perf/arm_pmu_platform.c
48959 @@ -6,6 +6,7 @@
48960   * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
48961   */
48962  #define pr_fmt(fmt) "hw perfevents: " fmt
48963 +#define dev_fmt pr_fmt
48965  #include <linux/bug.h>
48966  #include <linux/cpumask.h>
48967 @@ -100,10 +101,8 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
48968         struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
48970         num_irqs = platform_irq_count(pdev);
48971 -       if (num_irqs < 0) {
48972 -               pr_err("unable to count PMU IRQs\n");
48973 -               return num_irqs;
48974 -       }
48975 +       if (num_irqs < 0)
48976 +               return dev_err_probe(&pdev->dev, num_irqs, "unable to count PMU IRQs\n");
48978         /*
48979          * In this case we have no idea which CPUs are covered by the PMU.
48980 @@ -236,7 +235,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
48982         ret = armpmu_register(pmu);
48983         if (ret)
48984 -               goto out_free;
48985 +               goto out_free_irqs;
48987         return 0;
48989 diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
48990 index 26a0badabe38..19f32ae877b9 100644
48991 --- a/drivers/phy/cadence/phy-cadence-sierra.c
48992 +++ b/drivers/phy/cadence/phy-cadence-sierra.c
48993 @@ -319,6 +319,12 @@ static int cdns_sierra_phy_on(struct phy *gphy)
48994         u32 val;
48995         int ret;
48997 +       ret = reset_control_deassert(sp->phy_rst);
48998 +       if (ret) {
48999 +               dev_err(dev, "Failed to take the PHY out of reset\n");
49000 +               return ret;
49001 +       }
49003         /* Take the PHY lane group out of reset */
49004         ret = reset_control_deassert(ins->lnk_rst);
49005         if (ret) {
49006 @@ -616,7 +622,6 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
49008         pm_runtime_enable(dev);
49009         phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
49010 -       reset_control_deassert(sp->phy_rst);
49011         return PTR_ERR_OR_ZERO(phy_provider);
49013  put_child:
49014 diff --git a/drivers/phy/ingenic/phy-ingenic-usb.c b/drivers/phy/ingenic/phy-ingenic-usb.c
49015 index ea127b177f46..28c28d816484 100644
49016 --- a/drivers/phy/ingenic/phy-ingenic-usb.c
49017 +++ b/drivers/phy/ingenic/phy-ingenic-usb.c
49018 @@ -352,8 +352,8 @@ static int ingenic_usb_phy_probe(struct platform_device *pdev)
49019         }
49021         priv->phy = devm_phy_create(dev, NULL, &ingenic_usb_phy_ops);
49022 -       if (IS_ERR(priv))
49023 -               return PTR_ERR(priv);
49024 +       if (IS_ERR(priv->phy))
49025 +               return PTR_ERR(priv->phy);
49027         phy_set_drvdata(priv->phy, priv);
49029 diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
49030 index 6c96f2bf5266..c8ee23fc3a83 100644
49031 --- a/drivers/phy/marvell/Kconfig
49032 +++ b/drivers/phy/marvell/Kconfig
49033 @@ -3,8 +3,8 @@
49034  # Phy drivers for Marvell platforms
49036  config ARMADA375_USBCLUSTER_PHY
49037 -       def_bool y
49038 -       depends on MACH_ARMADA_375 || COMPILE_TEST
49039 +       bool "Armada 375 USB cluster PHY support" if COMPILE_TEST
49040 +       default y if MACH_ARMADA_375
49041         depends on OF && HAS_IOMEM
49042         select GENERIC_PHY
49044 diff --git a/drivers/phy/ralink/phy-mt7621-pci.c b/drivers/phy/ralink/phy-mt7621-pci.c
49045 index 9a610b414b1f..753cb5bab930 100644
49046 --- a/drivers/phy/ralink/phy-mt7621-pci.c
49047 +++ b/drivers/phy/ralink/phy-mt7621-pci.c
49048 @@ -62,7 +62,7 @@
49050  #define RG_PE1_FRC_MSTCKDIV                    BIT(5)
49052 -#define XTAL_MASK                              GENMASK(7, 6)
49053 +#define XTAL_MASK                              GENMASK(8, 6)
49055  #define MAX_PHYS       2
49057 @@ -319,9 +319,9 @@ static int mt7621_pci_phy_probe(struct platform_device *pdev)
49058                 return PTR_ERR(phy->regmap);
49060         phy->phy = devm_phy_create(dev, dev->of_node, &mt7621_pci_phy_ops);
49061 -       if (IS_ERR(phy)) {
49062 +       if (IS_ERR(phy->phy)) {
49063                 dev_err(dev, "failed to create phy\n");
49064 -               return PTR_ERR(phy);
49065 +               return PTR_ERR(phy->phy);
49066         }
49068         phy_set_drvdata(phy->phy, phy);
49069 diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
49070 index c9cfafe89cbf..e28e25f98708 100644
49071 --- a/drivers/phy/ti/phy-j721e-wiz.c
49072 +++ b/drivers/phy/ti/phy-j721e-wiz.c
49073 @@ -615,6 +615,12 @@ static void wiz_clock_cleanup(struct wiz *wiz, struct device_node *node)
49074                 of_clk_del_provider(clk_node);
49075                 of_node_put(clk_node);
49076         }
49078 +       for (i = 0; i < wiz->clk_div_sel_num; i++) {
49079 +               clk_node = of_get_child_by_name(node, clk_div_sel[i].node_name);
49080 +               of_clk_del_provider(clk_node);
49081 +               of_node_put(clk_node);
49082 +       }
49085  static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
49086 @@ -947,27 +953,24 @@ static int wiz_probe(struct platform_device *pdev)
49087                 goto err_get_sync;
49088         }
49090 +       ret = wiz_init(wiz);
49091 +       if (ret) {
49092 +               dev_err(dev, "WIZ initialization failed\n");
49093 +               goto err_wiz_init;
49094 +       }
49096         serdes_pdev = of_platform_device_create(child_node, NULL, dev);
49097         if (!serdes_pdev) {
49098                 dev_WARN(dev, "Unable to create SERDES platform device\n");
49099                 ret = -ENOMEM;
49100 -               goto err_pdev_create;
49101 -       }
49102 -       wiz->serdes_pdev = serdes_pdev;
49104 -       ret = wiz_init(wiz);
49105 -       if (ret) {
49106 -               dev_err(dev, "WIZ initialization failed\n");
49107                 goto err_wiz_init;
49108         }
49109 +       wiz->serdes_pdev = serdes_pdev;
49111         of_node_put(child_node);
49112         return 0;
49114  err_wiz_init:
49115 -       of_platform_device_destroy(&serdes_pdev->dev, NULL);
49117 -err_pdev_create:
49118         wiz_clock_cleanup(wiz, node);
49120  err_get_sync:
49121 diff --git a/drivers/phy/ti/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c
49122 index 9887f908f540..812e5409d359 100644
49123 --- a/drivers/phy/ti/phy-twl4030-usb.c
49124 +++ b/drivers/phy/ti/phy-twl4030-usb.c
49125 @@ -779,7 +779,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
49127         usb_remove_phy(&twl->phy);
49128         pm_runtime_get_sync(twl->dev);
49129 -       cancel_delayed_work(&twl->id_workaround_work);
49130 +       cancel_delayed_work_sync(&twl->id_workaround_work);
49131         device_remove_file(twl->dev, &dev_attr_vbus);
49133         /* set transceiver mode to power on defaults */
49134 diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
49135 index e71ebccc479c..03c32b2c5d30 100644
49136 --- a/drivers/pinctrl/pinctrl-at91-pio4.c
49137 +++ b/drivers/pinctrl/pinctrl-at91-pio4.c
49138 @@ -801,6 +801,10 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
49140         conf = atmel_pin_config_read(pctldev, pin_id);
49142 +       /* Keep slew rate enabled by default. */
49143 +       if (atmel_pioctrl->slew_rate_support)
49144 +               conf |= ATMEL_PIO_SR_MASK;
49146         for (i = 0; i < num_configs; i++) {
49147                 unsigned int param = pinconf_to_config_param(configs[i]);
49148                 unsigned int arg = pinconf_to_config_argument(configs[i]);
49149 @@ -808,10 +812,6 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
49150                 dev_dbg(pctldev->dev, "%s: pin=%u, config=0x%lx\n",
49151                         __func__, pin_id, configs[i]);
49153 -               /* Keep slew rate enabled by default. */
49154 -               if (atmel_pioctrl->slew_rate_support)
49155 -                       conf |= ATMEL_PIO_SR_MASK;
49157                 switch (param) {
49158                 case PIN_CONFIG_BIAS_DISABLE:
49159                         conf &= (~ATMEL_PIO_PUEN_MASK);
49160 diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
49161 index f2746125b077..3de0f767b7d1 100644
49162 --- a/drivers/pinctrl/pinctrl-ingenic.c
49163 +++ b/drivers/pinctrl/pinctrl-ingenic.c
49164 @@ -667,7 +667,9 @@ static int jz4770_pwm_pwm7_pins[] = { 0x6b, };
49165  static int jz4770_mac_rmii_pins[] = {
49166         0xa9, 0xab, 0xaa, 0xac, 0xa5, 0xa4, 0xad, 0xae, 0xa6, 0xa8,
49167  };
49168 -static int jz4770_mac_mii_pins[] = { 0xa7, 0xaf, };
49169 +static int jz4770_mac_mii_pins[] = {
49170 +       0x7b, 0x7a, 0x7d, 0x7c, 0xa7, 0x24, 0xaf,
49173  static const struct group_desc jz4770_groups[] = {
49174         INGENIC_PIN_GROUP("uart0-data", jz4770_uart0_data, 0),
49175 @@ -2107,26 +2109,48 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
49176         enum pin_config_param param = pinconf_to_config_param(*config);
49177         unsigned int idx = pin % PINS_PER_GPIO_CHIP;
49178         unsigned int offt = pin / PINS_PER_GPIO_CHIP;
49179 -       bool pull;
49180 +       unsigned int bias;
49181 +       bool pull, pullup, pulldown;
49183 -       if (jzpc->info->version >= ID_JZ4770)
49184 -               pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
49185 -       else
49186 -               pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
49187 +       if (jzpc->info->version >= ID_X1830) {
49188 +               unsigned int half = PINS_PER_GPIO_CHIP / 2;
49189 +               unsigned int idxh = (pin % half) * 2;
49191 +               if (idx < half)
49192 +                       regmap_read(jzpc->map, offt * jzpc->info->reg_offset +
49193 +                                       X1830_GPIO_PEL, &bias);
49194 +               else
49195 +                       regmap_read(jzpc->map, offt * jzpc->info->reg_offset +
49196 +                                       X1830_GPIO_PEH, &bias);
49198 +               bias = (bias >> idxh) & (GPIO_PULL_UP | GPIO_PULL_DOWN);
49200 +               pullup = (bias == GPIO_PULL_UP) && (jzpc->info->pull_ups[offt] & BIT(idx));
49201 +               pulldown = (bias == GPIO_PULL_DOWN) && (jzpc->info->pull_downs[offt] & BIT(idx));
49203 +       } else {
49204 +               if (jzpc->info->version >= ID_JZ4770)
49205 +                       pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
49206 +               else
49207 +                       pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
49209 +               pullup = pull && (jzpc->info->pull_ups[offt] & BIT(idx));
49210 +               pulldown = pull && (jzpc->info->pull_downs[offt] & BIT(idx));
49211 +       }
49213         switch (param) {
49214         case PIN_CONFIG_BIAS_DISABLE:
49215 -               if (pull)
49216 +               if (pullup || pulldown)
49217                         return -EINVAL;
49218                 break;
49220         case PIN_CONFIG_BIAS_PULL_UP:
49221 -               if (!pull || !(jzpc->info->pull_ups[offt] & BIT(idx)))
49222 +               if (!pullup)
49223                         return -EINVAL;
49224                 break;
49226         case PIN_CONFIG_BIAS_PULL_DOWN:
49227 -               if (!pull || !(jzpc->info->pull_downs[offt] & BIT(idx)))
49228 +               if (!pulldown)
49229                         return -EINVAL;
49230                 break;
49232 @@ -2144,7 +2168,7 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
49233         if (jzpc->info->version >= ID_X1830) {
49234                 unsigned int idx = pin % PINS_PER_GPIO_CHIP;
49235                 unsigned int half = PINS_PER_GPIO_CHIP / 2;
49236 -               unsigned int idxh = pin % half * 2;
49237 +               unsigned int idxh = (pin % half) * 2;
49238                 unsigned int offt = pin / PINS_PER_GPIO_CHIP;
49240                 if (idx < half) {
49241 diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
49242 index 7771316dfffa..10890fde9a75 100644
49243 --- a/drivers/pinctrl/pinctrl-single.c
49244 +++ b/drivers/pinctrl/pinctrl-single.c
49245 @@ -270,20 +270,44 @@ static void __maybe_unused pcs_writel(unsigned val, void __iomem *reg)
49246         writel(val, reg);
49249 +static unsigned int pcs_pin_reg_offset_get(struct pcs_device *pcs,
49250 +                                          unsigned int pin)
49252 +       unsigned int mux_bytes = pcs->width / BITS_PER_BYTE;
49254 +       if (pcs->bits_per_mux) {
49255 +               unsigned int pin_offset_bytes;
49257 +               pin_offset_bytes = (pcs->bits_per_pin * pin) / BITS_PER_BYTE;
49258 +               return (pin_offset_bytes / mux_bytes) * mux_bytes;
49259 +       }
49261 +       return pin * mux_bytes;
49264 +static unsigned int pcs_pin_shift_reg_get(struct pcs_device *pcs,
49265 +                                         unsigned int pin)
49267 +       return (pin % (pcs->width / pcs->bits_per_pin)) * pcs->bits_per_pin;
49270  static void pcs_pin_dbg_show(struct pinctrl_dev *pctldev,
49271                                         struct seq_file *s,
49272                                         unsigned pin)
49274         struct pcs_device *pcs;
49275 -       unsigned val, mux_bytes;
49276 +       unsigned int val;
49277         unsigned long offset;
49278         size_t pa;
49280         pcs = pinctrl_dev_get_drvdata(pctldev);
49282 -       mux_bytes = pcs->width / BITS_PER_BYTE;
49283 -       offset = pin * mux_bytes;
49284 +       offset = pcs_pin_reg_offset_get(pcs, pin);
49285         val = pcs->read(pcs->base + offset);
49287 +       if (pcs->bits_per_mux)
49288 +               val &= pcs->fmask << pcs_pin_shift_reg_get(pcs, pin);
49290         pa = pcs->res->start + offset;
49292         seq_printf(s, "%zx %08x %s ", pa, val, DRIVER_NAME);
49293 @@ -384,7 +408,6 @@ static int pcs_request_gpio(struct pinctrl_dev *pctldev,
49294         struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev);
49295         struct pcs_gpiofunc_range *frange = NULL;
49296         struct list_head *pos, *tmp;
49297 -       int mux_bytes = 0;
49298         unsigned data;
49300         /* If function mask is null, return directly. */
49301 @@ -392,29 +415,27 @@ static int pcs_request_gpio(struct pinctrl_dev *pctldev,
49302                 return -ENOTSUPP;
49304         list_for_each_safe(pos, tmp, &pcs->gpiofuncs) {
49305 +               u32 offset;
49307                 frange = list_entry(pos, struct pcs_gpiofunc_range, node);
49308                 if (pin >= frange->offset + frange->npins
49309                         || pin < frange->offset)
49310                         continue;
49311 -               mux_bytes = pcs->width / BITS_PER_BYTE;
49313 -               if (pcs->bits_per_mux) {
49314 -                       int byte_num, offset, pin_shift;
49315 +               offset = pcs_pin_reg_offset_get(pcs, pin);
49317 -                       byte_num = (pcs->bits_per_pin * pin) / BITS_PER_BYTE;
49318 -                       offset = (byte_num / mux_bytes) * mux_bytes;
49319 -                       pin_shift = pin % (pcs->width / pcs->bits_per_pin) *
49320 -                                   pcs->bits_per_pin;
49321 +               if (pcs->bits_per_mux) {
49322 +                       int pin_shift = pcs_pin_shift_reg_get(pcs, pin);
49324                         data = pcs->read(pcs->base + offset);
49325                         data &= ~(pcs->fmask << pin_shift);
49326                         data |= frange->gpiofunc << pin_shift;
49327                         pcs->write(data, pcs->base + offset);
49328                 } else {
49329 -                       data = pcs->read(pcs->base + pin * mux_bytes);
49330 +                       data = pcs->read(pcs->base + offset);
49331                         data &= ~pcs->fmask;
49332                         data |= frange->gpiofunc;
49333 -                       pcs->write(data, pcs->base + pin * mux_bytes);
49334 +                       pcs->write(data, pcs->base + offset);
49335                 }
49336                 break;
49337         }
49338 @@ -656,10 +677,8 @@ static const struct pinconf_ops pcs_pinconf_ops = {
49339   * pcs_add_pin() - add a pin to the static per controller pin array
49340   * @pcs: pcs driver instance
49341   * @offset: register offset from base
49342 - * @pin_pos: unused
49343   */
49344 -static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
49345 -               unsigned pin_pos)
49346 +static int pcs_add_pin(struct pcs_device *pcs, unsigned int offset)
49348         struct pcs_soc_data *pcs_soc = &pcs->socdata;
49349         struct pinctrl_pin_desc *pin;
49350 @@ -728,17 +747,9 @@ static int pcs_allocate_pin_table(struct pcs_device *pcs)
49351         for (i = 0; i < pcs->desc.npins; i++) {
49352                 unsigned offset;
49353                 int res;
49354 -               int byte_num;
49355 -               int pin_pos = 0;
49357 -               if (pcs->bits_per_mux) {
49358 -                       byte_num = (pcs->bits_per_pin * i) / BITS_PER_BYTE;
49359 -                       offset = (byte_num / mux_bytes) * mux_bytes;
49360 -                       pin_pos = i % num_pins_in_register;
49361 -               } else {
49362 -                       offset = i * mux_bytes;
49363 -               }
49364 -               res = pcs_add_pin(pcs, offset, pin_pos);
49365 +               offset = pcs_pin_reg_offset_get(pcs, i);
49366 +               res = pcs_add_pin(pcs, offset);
49367                 if (res < 0) {
49368                         dev_err(pcs->dev, "error adding pins: %i\n", res);
49369                         return res;
49370 diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
49371 index 0cd7f33cdf25..2b99f4130e1e 100644
49372 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c
49373 +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
49374 @@ -55,7 +55,7 @@ static void exynos_irq_mask(struct irq_data *irqd)
49375         struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
49376         struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
49377         unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
49378 -       unsigned long mask;
49379 +       unsigned int mask;
49380         unsigned long flags;
49382         raw_spin_lock_irqsave(&bank->slock, flags);
49383 @@ -83,7 +83,7 @@ static void exynos_irq_unmask(struct irq_data *irqd)
49384         struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
49385         struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
49386         unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
49387 -       unsigned long mask;
49388 +       unsigned int mask;
49389         unsigned long flags;
49391         /*
49392 @@ -483,7 +483,7 @@ static void exynos_irq_eint0_15(struct irq_desc *desc)
49393         chained_irq_exit(chip, desc);
49396 -static inline void exynos_irq_demux_eint(unsigned long pend,
49397 +static inline void exynos_irq_demux_eint(unsigned int pend,
49398                                                 struct irq_domain *domain)
49400         unsigned int irq;
49401 @@ -500,8 +500,8 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
49403         struct irq_chip *chip = irq_desc_get_chip(desc);
49404         struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc);
49405 -       unsigned long pend;
49406 -       unsigned long mask;
49407 +       unsigned int pend;
49408 +       unsigned int mask;
49409         int i;
49411         chained_irq_enter(chip, desc);
49412 diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
49413 index 0811562deecc..24be8f550ae0 100644
49414 --- a/drivers/platform/chrome/cros_ec_typec.c
49415 +++ b/drivers/platform/chrome/cros_ec_typec.c
49416 @@ -483,6 +483,11 @@ static int cros_typec_enable_dp(struct cros_typec_data *typec,
49417                 return -ENOTSUPP;
49418         }
49420 +       if (!pd_ctrl->dp_mode) {
49421 +               dev_err(typec->dev, "No valid DP mode provided.\n");
49422 +               return -EINVAL;
49423 +       }
49425         /* Status VDO. */
49426         dp_data.status = DP_STATUS_ENABLED;
49427         if (port->mux_flags & USB_PD_MUX_HPD_IRQ)
49428 diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
49429 index bbc4e71a16ff..38800e86ed8a 100644
49430 --- a/drivers/platform/mellanox/mlxbf-tmfifo.c
49431 +++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
49432 @@ -294,6 +294,9 @@ mlxbf_tmfifo_get_next_desc(struct mlxbf_tmfifo_vring *vring)
49433         if (vring->next_avail == virtio16_to_cpu(vdev, vr->avail->idx))
49434                 return NULL;
49436 +       /* Make sure 'avail->idx' is visible already. */
49437 +       virtio_rmb(false);
49439         idx = vring->next_avail % vr->num;
49440         head = virtio16_to_cpu(vdev, vr->avail->ring[idx]);
49441         if (WARN_ON(head >= vr->num))
49442 @@ -322,7 +325,7 @@ static void mlxbf_tmfifo_release_desc(struct mlxbf_tmfifo_vring *vring,
49443          * done or not. Add a memory barrier here to make sure the update above
49444          * completes before updating the idx.
49445          */
49446 -       mb();
49447 +       virtio_mb(false);
49448         vr->used->idx = cpu_to_virtio16(vdev, vr_idx + 1);
49451 @@ -733,6 +736,12 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
49452                 desc = NULL;
49453                 fifo->vring[is_rx] = NULL;
49455 +               /*
49456 +                * Make sure the load/store are in order before
49457 +                * returning back to virtio.
49458 +                */
49459 +               virtio_mb(false);
49461                 /* Notify upper layer that packet is done. */
49462                 spin_lock_irqsave(&fifo->spin_lock[is_rx], flags);
49463                 vring_interrupt(0, vring->vq);
49464 diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
49465 index 5bcb59ed579d..89761d3e1a47 100644
49466 --- a/drivers/platform/surface/aggregator/controller.c
49467 +++ b/drivers/platform/surface/aggregator/controller.c
49468 @@ -1040,7 +1040,7 @@ static int ssam_dsm_load_u32(acpi_handle handle, u64 funcs, u64 func, u32 *ret)
49469         union acpi_object *obj;
49470         u64 val;
49472 -       if (!(funcs & BIT(func)))
49473 +       if (!(funcs & BIT_ULL(func)))
49474                 return 0; /* Not supported, leave *ret at its default value */
49476         obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_GUID,
49477 diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
49478 index 461ec61530eb..205a096e9cee 100644
49479 --- a/drivers/platform/x86/Kconfig
49480 +++ b/drivers/platform/x86/Kconfig
49481 @@ -688,7 +688,7 @@ config INTEL_HID_EVENT
49483  config INTEL_INT0002_VGPIO
49484         tristate "Intel ACPI INT0002 Virtual GPIO driver"
49485 -       depends on GPIOLIB && ACPI
49486 +       depends on GPIOLIB && ACPI && PM_SLEEP
49487         select GPIOLIB_IRQCHIP
49488         help
49489           Some peripherals on Bay Trail and Cherry Trail platforms signal a
49490 diff --git a/drivers/platform/x86/dell/dell-smbios-wmi.c b/drivers/platform/x86/dell/dell-smbios-wmi.c
49491 index 27a298b7c541..c97bd4a45242 100644
49492 --- a/drivers/platform/x86/dell/dell-smbios-wmi.c
49493 +++ b/drivers/platform/x86/dell/dell-smbios-wmi.c
49494 @@ -271,7 +271,8 @@ int init_dell_smbios_wmi(void)
49496  void exit_dell_smbios_wmi(void)
49498 -       wmi_driver_unregister(&dell_smbios_wmi_driver);
49499 +       if (wmi_supported)
49500 +               wmi_driver_unregister(&dell_smbios_wmi_driver);
49503  MODULE_DEVICE_TABLE(wmi, dell_smbios_wmi_id_table);
49504 diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
49505 index 7410ccae650c..a90ae6ba4a73 100644
49506 --- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
49507 +++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
49508 @@ -399,6 +399,7 @@ static int init_bios_attributes(int attr_type, const char *guid)
49509         union acpi_object *obj = NULL;
49510         union acpi_object *elements;
49511         struct kset *tmp_set;
49512 +       int min_elements;
49514         /* instance_id needs to be reset for each type GUID
49515          * also, instance IDs are unique within GUID but not across
49516 @@ -409,14 +410,38 @@ static int init_bios_attributes(int attr_type, const char *guid)
49517         retval = alloc_attributes_data(attr_type);
49518         if (retval)
49519                 return retval;
49521 +       switch (attr_type) {
49522 +       case ENUM:      min_elements = 8;       break;
49523 +       case INT:       min_elements = 9;       break;
49524 +       case STR:       min_elements = 8;       break;
49525 +       case PO:        min_elements = 4;       break;
49526 +       default:
49527 +               pr_err("Error: Unknown attr_type: %d\n", attr_type);
49528 +               return -EINVAL;
49529 +       }
49531         /* need to use specific instance_id and guid combination to get right data */
49532         obj = get_wmiobj_pointer(instance_id, guid);
49533 -       if (!obj || obj->type != ACPI_TYPE_PACKAGE)
49534 +       if (!obj)
49535                 return -ENODEV;
49536 -       elements = obj->package.elements;
49538         mutex_lock(&wmi_priv.mutex);
49539 -       while (elements) {
49540 +       while (obj) {
49541 +               if (obj->type != ACPI_TYPE_PACKAGE) {
49542 +                       pr_err("Error: Expected ACPI-package type, got: %d\n", obj->type);
49543 +                       retval = -EIO;
49544 +                       goto err_attr_init;
49545 +               }
49547 +               if (obj->package.count < min_elements) {
49548 +                       pr_err("Error: ACPI-package does not have enough elements: %d < %d\n",
49549 +                              obj->package.count, min_elements);
49550 +                       goto nextobj;
49551 +               }
49553 +               elements = obj->package.elements;
49555                 /* sanity checking */
49556                 if (elements[ATTR_NAME].type != ACPI_TYPE_STRING) {
49557                         pr_debug("incorrect element type\n");
49558 @@ -481,7 +506,6 @@ static int init_bios_attributes(int attr_type, const char *guid)
49559                 kfree(obj);
49560                 instance_id++;
49561                 obj = get_wmiobj_pointer(instance_id, guid);
49562 -               elements = obj ? obj->package.elements : NULL;
49563         }
49565         mutex_unlock(&wmi_priv.mutex);
49566 diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
49567 index 6cb5ad4be231..387817290921 100644
49568 --- a/drivers/platform/x86/ideapad-laptop.c
49569 +++ b/drivers/platform/x86/ideapad-laptop.c
49570 @@ -57,8 +57,8 @@ enum {
49571  };
49573  enum {
49574 -       SMBC_CONSERVATION_ON  = 3,
49575 -       SMBC_CONSERVATION_OFF = 5,
49576 +       SBMC_CONSERVATION_ON  = 3,
49577 +       SBMC_CONSERVATION_OFF = 5,
49578  };
49580  enum {
49581 @@ -182,9 +182,9 @@ static int eval_gbmd(acpi_handle handle, unsigned long *res)
49582         return eval_int(handle, "GBMD", res);
49585 -static int exec_smbc(acpi_handle handle, unsigned long arg)
49586 +static int exec_sbmc(acpi_handle handle, unsigned long arg)
49588 -       return exec_simple_method(handle, "SMBC", arg);
49589 +       return exec_simple_method(handle, "SBMC", arg);
49592  static int eval_hals(acpi_handle handle, unsigned long *res)
49593 @@ -477,7 +477,7 @@ static ssize_t conservation_mode_store(struct device *dev,
49594         if (err)
49595                 return err;
49597 -       err = exec_smbc(priv->adev->handle, state ? SMBC_CONSERVATION_ON : SMBC_CONSERVATION_OFF);
49598 +       err = exec_sbmc(priv->adev->handle, state ? SBMC_CONSERVATION_ON : SBMC_CONSERVATION_OFF);
49599         if (err)
49600                 return err;
49602 @@ -809,6 +809,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
49604         struct ideapad_dytc_priv *dytc = container_of(pprof, struct ideapad_dytc_priv, pprof);
49605         struct ideapad_private *priv = dytc->priv;
49606 +       unsigned long output;
49607         int err;
49609         err = mutex_lock_interruptible(&dytc->mutex);
49610 @@ -829,7 +830,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
49612                 /* Determine if we are in CQL mode. This alters the commands we do */
49613                 err = dytc_cql_command(priv, DYTC_SET_COMMAND(DYTC_FUNCTION_MMC, perfmode, 1),
49614 -                                      NULL);
49615 +                                      &output);
49616                 if (err)
49617                         goto unlock;
49618         }
49619 diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c
49620 index 289c6655d425..569342aa8926 100644
49621 --- a/drivers/platform/x86/intel_int0002_vgpio.c
49622 +++ b/drivers/platform/x86/intel_int0002_vgpio.c
49623 @@ -51,6 +51,12 @@
49624  #define GPE0A_STS_PORT                 0x420
49625  #define GPE0A_EN_PORT                  0x428
49627 +struct int0002_data {
49628 +       struct gpio_chip chip;
49629 +       int parent_irq;
49630 +       int wake_enable_count;
49633  /*
49634   * As this is not a real GPIO at all, but just a hack to model an event in
49635   * ACPI the get / set functions are dummy functions.
49636 @@ -98,14 +104,16 @@ static void int0002_irq_mask(struct irq_data *data)
49637  static int int0002_irq_set_wake(struct irq_data *data, unsigned int on)
49639         struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
49640 -       struct platform_device *pdev = to_platform_device(chip->parent);
49641 -       int irq = platform_get_irq(pdev, 0);
49642 +       struct int0002_data *int0002 = container_of(chip, struct int0002_data, chip);
49644 -       /* Propagate to parent irq */
49645 +       /*
49646 +        * Applying of the wakeup flag to our parent IRQ is delayed till system
49647 +        * suspend, because we only want to do this when using s2idle.
49648 +        */
49649         if (on)
49650 -               enable_irq_wake(irq);
49651 +               int0002->wake_enable_count++;
49652         else
49653 -               disable_irq_wake(irq);
49654 +               int0002->wake_enable_count--;
49656         return 0;
49658 @@ -135,7 +143,7 @@ static bool int0002_check_wake(void *data)
49659         return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT);
49662 -static struct irq_chip int0002_byt_irqchip = {
49663 +static struct irq_chip int0002_irqchip = {
49664         .name                   = DRV_NAME,
49665         .irq_ack                = int0002_irq_ack,
49666         .irq_mask               = int0002_irq_mask,
49667 @@ -143,21 +151,9 @@ static struct irq_chip int0002_byt_irqchip = {
49668         .irq_set_wake           = int0002_irq_set_wake,
49669  };
49671 -static struct irq_chip int0002_cht_irqchip = {
49672 -       .name                   = DRV_NAME,
49673 -       .irq_ack                = int0002_irq_ack,
49674 -       .irq_mask               = int0002_irq_mask,
49675 -       .irq_unmask             = int0002_irq_unmask,
49676 -       /*
49677 -        * No set_wake, on CHT the IRQ is typically shared with the ACPI SCI
49678 -        * and we don't want to mess with the ACPI SCI irq settings.
49679 -        */
49680 -       .flags                  = IRQCHIP_SKIP_SET_WAKE,
49683  static const struct x86_cpu_id int0002_cpu_ids[] = {
49684 -       X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT,     &int0002_byt_irqchip),
49685 -       X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT,        &int0002_cht_irqchip),
49686 +       X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL),
49687 +       X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
49688         {}
49689  };
49691 @@ -172,8 +168,9 @@ static int int0002_probe(struct platform_device *pdev)
49693         struct device *dev = &pdev->dev;
49694         const struct x86_cpu_id *cpu_id;
49695 -       struct gpio_chip *chip;
49696 +       struct int0002_data *int0002;
49697         struct gpio_irq_chip *girq;
49698 +       struct gpio_chip *chip;
49699         int irq, ret;
49701         /* Menlow has a different INT0002 device? <sigh> */
49702 @@ -185,10 +182,13 @@ static int int0002_probe(struct platform_device *pdev)
49703         if (irq < 0)
49704                 return irq;
49706 -       chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
49707 -       if (!chip)
49708 +       int0002 = devm_kzalloc(dev, sizeof(*int0002), GFP_KERNEL);
49709 +       if (!int0002)
49710                 return -ENOMEM;
49712 +       int0002->parent_irq = irq;
49714 +       chip = &int0002->chip;
49715         chip->label = DRV_NAME;
49716         chip->parent = dev;
49717         chip->owner = THIS_MODULE;
49718 @@ -214,7 +214,7 @@ static int int0002_probe(struct platform_device *pdev)
49719         }
49721         girq = &chip->irq;
49722 -       girq->chip = (struct irq_chip *)cpu_id->driver_data;
49723 +       girq->chip = &int0002_irqchip;
49724         /* This let us handle the parent IRQ in the driver */
49725         girq->parent_handler = NULL;
49726         girq->num_parents = 0;
49727 @@ -230,6 +230,7 @@ static int int0002_probe(struct platform_device *pdev)
49729         acpi_register_wakeup_handler(irq, int0002_check_wake, NULL);
49730         device_init_wakeup(dev, true);
49731 +       dev_set_drvdata(dev, int0002);
49732         return 0;
49735 @@ -240,6 +241,36 @@ static int int0002_remove(struct platform_device *pdev)
49736         return 0;
49739 +static int int0002_suspend(struct device *dev)
49741 +       struct int0002_data *int0002 = dev_get_drvdata(dev);
49743 +       /*
49744 +        * The INT0002 parent IRQ is often shared with the ACPI GPE IRQ, don't
49745 +        * muck with it when firmware based suspend is used, otherwise we may
49746 +        * cause spurious wakeups from firmware managed suspend.
49747 +        */
49748 +       if (!pm_suspend_via_firmware() && int0002->wake_enable_count)
49749 +               enable_irq_wake(int0002->parent_irq);
49751 +       return 0;
49754 +static int int0002_resume(struct device *dev)
49756 +       struct int0002_data *int0002 = dev_get_drvdata(dev);
49758 +       if (!pm_suspend_via_firmware() && int0002->wake_enable_count)
49759 +               disable_irq_wake(int0002->parent_irq);
49761 +       return 0;
49764 +static const struct dev_pm_ops int0002_pm_ops = {
49765 +       .suspend = int0002_suspend,
49766 +       .resume = int0002_resume,
49769  static const struct acpi_device_id int0002_acpi_ids[] = {
49770         { "INT0002", 0 },
49771         { },
49772 @@ -250,6 +281,7 @@ static struct platform_driver int0002_driver = {
49773         .driver = {
49774                 .name                   = DRV_NAME,
49775                 .acpi_match_table       = int0002_acpi_ids,
49776 +               .pm                     = &int0002_pm_ops,
49777         },
49778         .probe  = int0002_probe,
49779         .remove = int0002_remove,
49780 diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
49781 index bffe548187ee..c2918ee3e100 100644
49782 --- a/drivers/platform/x86/intel_ips.c
49783 +++ b/drivers/platform/x86/intel_ips.c
49784 @@ -798,7 +798,7 @@ static int ips_adjust(void *data)
49785                         ips_gpu_lower(ips);
49787  sleep:
49788 -               schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD));
49789 +               schedule_msec_hrtimeout_interruptible((IPS_ADJUST_PERIOD));
49790         } while (!kthread_should_stop());
49792         dev_dbg(ips->dev, "ips-adjust thread stopped\n");
49793 @@ -974,7 +974,7 @@ static int ips_monitor(void *data)
49794         seqno_timestamp = get_jiffies_64();
49796         old_cpu_power = thm_readl(THM_CEC);
49797 -       schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
49798 +       schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
49800         /* Collect an initial average */
49801         for (i = 0; i < IPS_SAMPLE_COUNT; i++) {
49802 @@ -1001,7 +1001,7 @@ static int ips_monitor(void *data)
49803                         mchp_samples[i] = mchp;
49804                 }
49806 -               schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
49807 +               schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
49808                 if (kthread_should_stop())
49809                         break;
49810         }
49811 @@ -1028,7 +1028,7 @@ static int ips_monitor(void *data)
49812          * us to reduce the sample frequency if the CPU and GPU are idle.
49813          */
49814         old_cpu_power = thm_readl(THM_CEC);
49815 -       schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
49816 +       schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
49817         last_sample_period = IPS_SAMPLE_PERIOD;
49819         timer_setup(&ips->timer, monitor_timeout, TIMER_DEFERRABLE);
49820 diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
49821 index b5888aeb4bcf..260d49dca1ad 100644
49822 --- a/drivers/platform/x86/intel_pmc_core.c
49823 +++ b/drivers/platform/x86/intel_pmc_core.c
49824 @@ -1186,9 +1186,15 @@ static const struct pci_device_id pmc_pci_ids[] = {
49825   * the platform BIOS enforces 24Mhz crystal to shutdown
49826   * before PMC can assert SLP_S0#.
49827   */
49828 +static bool xtal_ignore;
49829  static int quirk_xtal_ignore(const struct dmi_system_id *id)
49831 -       struct pmc_dev *pmcdev = &pmc;
49832 +       xtal_ignore = true;
49833 +       return 0;
49836 +static void pmc_core_xtal_ignore(struct pmc_dev *pmcdev)
49838         u32 value;
49840         value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset);
49841 @@ -1197,7 +1203,6 @@ static int quirk_xtal_ignore(const struct dmi_system_id *id)
49842         /* Low Voltage Mode Enable */
49843         value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
49844         pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value);
49845 -       return 0;
49848  static const struct dmi_system_id pmc_core_dmi_table[]  = {
49849 @@ -1212,6 +1217,14 @@ static const struct dmi_system_id pmc_core_dmi_table[]  = {
49850         {}
49851  };
49853 +static void pmc_core_do_dmi_quirks(struct pmc_dev *pmcdev)
49855 +       dmi_check_system(pmc_core_dmi_table);
49857 +       if (xtal_ignore)
49858 +               pmc_core_xtal_ignore(pmcdev);
49861  static int pmc_core_probe(struct platform_device *pdev)
49863         static bool device_initialized;
49864 @@ -1253,7 +1266,7 @@ static int pmc_core_probe(struct platform_device *pdev)
49865         mutex_init(&pmcdev->lock);
49866         platform_set_drvdata(pdev, pmcdev);
49867         pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
49868 -       dmi_check_system(pmc_core_dmi_table);
49869 +       pmc_core_do_dmi_quirks(pmcdev);
49871         /*
49872          * On TGL, due to a hardware limitation, the GBE LTR blocks PC10 when
49873 diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
49874 index a2a2d923e60c..df1fc6c719f3 100644
49875 --- a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
49876 +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
49877 @@ -21,12 +21,16 @@
49878  #define PUNIT_MAILBOX_BUSY_BIT         31
49880  /*
49881 - * The average time to complete some commands is about 40us. The current
49882 - * count is enough to satisfy 40us. But when the firmware is very busy, this
49883 - * causes timeout occasionally.  So increase to deal with some worst case
49884 - * scenarios. Most of the command still complete in few us.
49885 + * The average time to complete mailbox commands is less than 40us. Most of
49886 + * the commands complete in few micro seconds. But the same firmware handles
49887 + * requests from all power management features.
49888 + * We can create a scenario where we flood the firmware with requests then
49889 + * the mailbox response can be delayed for 100s of micro seconds. So define
49890 + * two timeouts. One for average case and one for long.
49891 + * If the firmware is taking more than average, just call cond_resched().
49892   */
49893 -#define OS_MAILBOX_RETRY_COUNT         100
49894 +#define OS_MAILBOX_TIMEOUT_AVG_US      40
49895 +#define OS_MAILBOX_TIMEOUT_MAX_US      1000
49897  struct isst_if_device {
49898         struct mutex mutex;
49899 @@ -35,11 +39,13 @@ struct isst_if_device {
49900  static int isst_if_mbox_cmd(struct pci_dev *pdev,
49901                             struct isst_if_mbox_cmd *mbox_cmd)
49903 -       u32 retries, data;
49904 +       s64 tm_delta = 0;
49905 +       ktime_t tm;
49906 +       u32 data;
49907         int ret;
49909         /* Poll for rb bit == 0 */
49910 -       retries = OS_MAILBOX_RETRY_COUNT;
49911 +       tm = ktime_get();
49912         do {
49913                 ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
49914                                             &data);
49915 @@ -48,11 +54,14 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
49917                 if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
49918                         ret = -EBUSY;
49919 +                       tm_delta = ktime_us_delta(ktime_get(), tm);
49920 +                       if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
49921 +                               cond_resched();
49922                         continue;
49923                 }
49924                 ret = 0;
49925                 break;
49926 -       } while (--retries);
49927 +       } while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US);
49929         if (ret)
49930                 return ret;
49931 @@ -74,7 +83,8 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
49932                 return ret;
49934         /* Poll for rb bit == 0 */
49935 -       retries = OS_MAILBOX_RETRY_COUNT;
49936 +       tm_delta = 0;
49937 +       tm = ktime_get();
49938         do {
49939                 ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
49940                                             &data);
49941 @@ -83,6 +93,9 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
49943                 if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
49944                         ret = -EBUSY;
49945 +                       tm_delta = ktime_us_delta(ktime_get(), tm);
49946 +                       if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
49947 +                               cond_resched();
49948                         continue;
49949                 }
49951 @@ -96,7 +109,7 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
49952                 mbox_cmd->resp_data = data;
49953                 ret = 0;
49954                 break;
49955 -       } while (--retries);
49956 +       } while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US);
49958         return ret;
49960 diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
49961 index ca684ed760d1..a9d2a4b98e57 100644
49962 --- a/drivers/platform/x86/pmc_atom.c
49963 +++ b/drivers/platform/x86/pmc_atom.c
49964 @@ -393,34 +393,10 @@ static const struct dmi_system_id critclk_systems[] = {
49965         },
49966         {
49967                 /* pmc_plt_clk* - are used for ethernet controllers */
49968 -               .ident = "Beckhoff CB3163",
49969 +               .ident = "Beckhoff Baytrail",
49970                 .matches = {
49971                         DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
49972 -                       DMI_MATCH(DMI_BOARD_NAME, "CB3163"),
49973 -               },
49974 -       },
49975 -       {
49976 -               /* pmc_plt_clk* - are used for ethernet controllers */
49977 -               .ident = "Beckhoff CB4063",
49978 -               .matches = {
49979 -                       DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
49980 -                       DMI_MATCH(DMI_BOARD_NAME, "CB4063"),
49981 -               },
49982 -       },
49983 -       {
49984 -               /* pmc_plt_clk* - are used for ethernet controllers */
49985 -               .ident = "Beckhoff CB6263",
49986 -               .matches = {
49987 -                       DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
49988 -                       DMI_MATCH(DMI_BOARD_NAME, "CB6263"),
49989 -               },
49990 -       },
49991 -       {
49992 -               /* pmc_plt_clk* - are used for ethernet controllers */
49993 -               .ident = "Beckhoff CB6363",
49994 -               .matches = {
49995 -                       DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
49996 -                       DMI_MATCH(DMI_BOARD_NAME, "CB6363"),
49997 +                       DMI_MATCH(DMI_PRODUCT_FAMILY, "CBxx63"),
49998                 },
49999         },
50000         {
50001 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
50002 index 0d9e2ddbf904..61f1c91c62de 100644
50003 --- a/drivers/platform/x86/thinkpad_acpi.c
50004 +++ b/drivers/platform/x86/thinkpad_acpi.c
50005 @@ -6260,6 +6260,7 @@ enum thermal_access_mode {
50006  enum { /* TPACPI_THERMAL_TPEC_* */
50007         TP_EC_THERMAL_TMP0 = 0x78,      /* ACPI EC regs TMP 0..7 */
50008         TP_EC_THERMAL_TMP8 = 0xC0,      /* ACPI EC regs TMP 8..15 */
50009 +       TP_EC_FUNCREV      = 0xEF,      /* ACPI EC Functional revision */
50010         TP_EC_THERMAL_TMP_NA = -128,    /* ACPI EC sensor not available */
50012         TPACPI_THERMAL_SENSOR_NA = -128000, /* Sensor not available */
50013 @@ -6458,7 +6459,7 @@ static const struct attribute_group thermal_temp_input8_group = {
50015  static int __init thermal_init(struct ibm_init_struct *iibm)
50017 -       u8 t, ta1, ta2;
50018 +       u8 t, ta1, ta2, ver = 0;
50019         int i;
50020         int acpi_tmp7;
50021         int res;
50022 @@ -6473,7 +6474,14 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
50023                  * 0x78-0x7F, 0xC0-0xC7.  Registers return 0x00 for
50024                  * non-implemented, thermal sensors return 0x80 when
50025                  * not available
50026 +                * The above rule is unfortunately flawed. This has been seen with
50027 +                * 0xC2 (power supply ID) causing thermal control problems.
50028 +                * The EC version can be determined by offset 0xEF and at least for
50029 +                * version 3 the Lenovo firmware team confirmed that registers 0xC0-0xC7
50030 +                * are not thermal registers.
50031                  */
50032 +               if (!acpi_ec_read(TP_EC_FUNCREV, &ver))
50033 +                       pr_warn("Thinkpad ACPI EC unable to access EC version\n");
50035                 ta1 = ta2 = 0;
50036                 for (i = 0; i < 8; i++) {
50037 @@ -6483,11 +6491,13 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
50038                                 ta1 = 0;
50039                                 break;
50040                         }
50041 -                       if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
50042 -                               ta2 |= t;
50043 -                       } else {
50044 -                               ta1 = 0;
50045 -                               break;
50046 +                       if (ver < 3) {
50047 +                               if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
50048 +                                       ta2 |= t;
50049 +                               } else {
50050 +                                       ta1 = 0;
50051 +                                       break;
50052 +                               }
50053                         }
50054                 }
50055                 if (ta1 == 0) {
50056 @@ -6500,9 +6510,12 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
50057                                 thermal_read_mode = TPACPI_THERMAL_NONE;
50058                         }
50059                 } else {
50060 -                       thermal_read_mode =
50061 -                           (ta2 != 0) ?
50062 -                           TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
50063 +                       if (ver >= 3)
50064 +                               thermal_read_mode = TPACPI_THERMAL_TPEC_8;
50065 +                       else
50066 +                               thermal_read_mode =
50067 +                                       (ta2 != 0) ?
50068 +                                       TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
50069                 }
50070         } else if (acpi_tmp7) {
50071                 if (tpacpi_is_ibm() &&
50072 diff --git a/drivers/power/supply/bq25980_charger.c b/drivers/power/supply/bq25980_charger.c
50073 index 530ff4025b31..0008c229fd9c 100644
50074 --- a/drivers/power/supply/bq25980_charger.c
50075 +++ b/drivers/power/supply/bq25980_charger.c
50076 @@ -606,33 +606,6 @@ static int bq25980_get_state(struct bq25980_device *bq,
50077         return 0;
50080 -static int bq25980_set_battery_property(struct power_supply *psy,
50081 -                               enum power_supply_property psp,
50082 -                               const union power_supply_propval *val)
50084 -       struct bq25980_device *bq = power_supply_get_drvdata(psy);
50085 -       int ret = 0;
50087 -       switch (psp) {
50088 -       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
50089 -               ret = bq25980_set_const_charge_curr(bq, val->intval);
50090 -               if (ret)
50091 -                       return ret;
50092 -               break;
50094 -       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
50095 -               ret = bq25980_set_const_charge_volt(bq, val->intval);
50096 -               if (ret)
50097 -                       return ret;
50098 -               break;
50100 -       default:
50101 -               return -EINVAL;
50102 -       }
50104 -       return ret;
50107  static int bq25980_get_battery_property(struct power_supply *psy,
50108                                 enum power_supply_property psp,
50109                                 union power_supply_propval *val)
50110 @@ -701,6 +674,18 @@ static int bq25980_set_charger_property(struct power_supply *psy,
50111                         return ret;
50112                 break;
50114 +       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
50115 +               ret = bq25980_set_const_charge_curr(bq, val->intval);
50116 +               if (ret)
50117 +                       return ret;
50118 +               break;
50120 +       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
50121 +               ret = bq25980_set_const_charge_volt(bq, val->intval);
50122 +               if (ret)
50123 +                       return ret;
50124 +               break;
50126         default:
50127                 return -EINVAL;
50128         }
50129 @@ -922,7 +907,6 @@ static struct power_supply_desc bq25980_battery_desc = {
50130         .name                   = "bq25980-battery",
50131         .type                   = POWER_SUPPLY_TYPE_BATTERY,
50132         .get_property           = bq25980_get_battery_property,
50133 -       .set_property           = bq25980_set_battery_property,
50134         .properties             = bq25980_battery_props,
50135         .num_properties         = ARRAY_SIZE(bq25980_battery_props),
50136         .property_is_writeable  = bq25980_property_is_writeable,
50137 diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
50138 index 4c4a7b1c64c5..20e1dc8a87cf 100644
50139 --- a/drivers/power/supply/bq27xxx_battery.c
50140 +++ b/drivers/power/supply/bq27xxx_battery.c
50141 @@ -1661,27 +1661,6 @@ static int bq27xxx_battery_read_time(struct bq27xxx_device_info *di, u8 reg)
50142         return tval * 60;
50146 - * Read an average power register.
50147 - * Return < 0 if something fails.
50148 - */
50149 -static int bq27xxx_battery_read_pwr_avg(struct bq27xxx_device_info *di)
50151 -       int tval;
50153 -       tval = bq27xxx_read(di, BQ27XXX_REG_AP, false);
50154 -       if (tval < 0) {
50155 -               dev_err(di->dev, "error reading average power register  %02x: %d\n",
50156 -                       BQ27XXX_REG_AP, tval);
50157 -               return tval;
50158 -       }
50160 -       if (di->opts & BQ27XXX_O_ZERO)
50161 -               return (tval * BQ27XXX_POWER_CONSTANT) / BQ27XXX_RS;
50162 -       else
50163 -               return tval;
50166  /*
50167   * Returns true if a battery over temperature condition is detected
50168   */
50169 @@ -1769,8 +1748,6 @@ void bq27xxx_battery_update(struct bq27xxx_device_info *di)
50170                 }
50171                 if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR)
50172                         cache.cycle_count = bq27xxx_battery_read_cyct(di);
50173 -               if (di->regs[BQ27XXX_REG_AP] != INVALID_REG_ADDR)
50174 -                       cache.power_avg = bq27xxx_battery_read_pwr_avg(di);
50176                 /* We only have to read charge design full once */
50177                 if (di->charge_design_full <= 0)
50178 @@ -1827,9 +1804,35 @@ static int bq27xxx_battery_current(struct bq27xxx_device_info *di,
50179                 val->intval = curr * BQ27XXX_CURRENT_CONSTANT / BQ27XXX_RS;
50180         } else {
50181                 /* Other gauges return signed value */
50182 -               val->intval = -(int)((s16)curr) * 1000;
50183 +               val->intval = (int)((s16)curr) * 1000;
50184 +       }
50186 +       return 0;
50190 + * Get the average power in µW
50191 + * Return < 0 if something fails.
50192 + */
50193 +static int bq27xxx_battery_pwr_avg(struct bq27xxx_device_info *di,
50194 +                                  union power_supply_propval *val)
50196 +       int power;
50198 +       power = bq27xxx_read(di, BQ27XXX_REG_AP, false);
50199 +       if (power < 0) {
50200 +               dev_err(di->dev,
50201 +                       "error reading average power register %02x: %d\n",
50202 +                       BQ27XXX_REG_AP, power);
50203 +               return power;
50204         }
50206 +       if (di->opts & BQ27XXX_O_ZERO)
50207 +               val->intval = (power * BQ27XXX_POWER_CONSTANT) / BQ27XXX_RS;
50208 +       else
50209 +               /* Other gauges return a signed value in units of 10mW */
50210 +               val->intval = (int)((s16)power) * 10000;
50212         return 0;
50215 @@ -2020,7 +2023,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
50216                 ret = bq27xxx_simple_value(di->cache.energy, val);
50217                 break;
50218         case POWER_SUPPLY_PROP_POWER_AVG:
50219 -               ret = bq27xxx_simple_value(di->cache.power_avg, val);
50220 +               ret = bq27xxx_battery_pwr_avg(di, val);
50221                 break;
50222         case POWER_SUPPLY_PROP_HEALTH:
50223                 ret = bq27xxx_simple_value(di->cache.health, val);
50224 diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
50225 index 6d5bcdb9f45d..a3fc0084cda0 100644
50226 --- a/drivers/power/supply/cpcap-battery.c
50227 +++ b/drivers/power/supply/cpcap-battery.c
50228 @@ -786,7 +786,7 @@ static irqreturn_t cpcap_battery_irq_thread(int irq, void *data)
50229                         break;
50230         }
50232 -       if (!d)
50233 +       if (list_entry_is_head(d, &ddata->irq_list, node))
50234                 return IRQ_NONE;
50236         latest = cpcap_battery_latest(ddata);
50237 diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
50238 index 641dcad1133f..2a8915c3e73e 100644
50239 --- a/drivers/power/supply/cpcap-charger.c
50240 +++ b/drivers/power/supply/cpcap-charger.c
50241 @@ -318,7 +318,7 @@ static int cpcap_charger_current_to_regval(int microamp)
50242                 return CPCAP_REG_CRM_ICHRG(0x0);
50243         if (miliamp < 177)
50244                 return CPCAP_REG_CRM_ICHRG(0x1);
50245 -       if (miliamp > 1596)
50246 +       if (miliamp >= 1596)
50247                 return CPCAP_REG_CRM_ICHRG(0xe);
50249         res = microamp / 88666;
50250 @@ -668,6 +668,9 @@ static void cpcap_usb_detect(struct work_struct *work)
50251                 return;
50252         }
50254 +       /* Delay for 80ms to avoid vbus bouncing when usb cable is plugged in */
50255 +       usleep_range(80000, 120000);
50257         /* Throttle chrgcurr2 interrupt for charger done and retry */
50258         switch (ddata->status) {
50259         case POWER_SUPPLY_STATUS_CHARGING:
50260 diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
50261 index 0032069fbc2b..66039c665dd1 100644
50262 --- a/drivers/power/supply/generic-adc-battery.c
50263 +++ b/drivers/power/supply/generic-adc-battery.c
50264 @@ -373,7 +373,7 @@ static int gab_remove(struct platform_device *pdev)
50265         }
50267         kfree(adc_bat->psy_desc.properties);
50268 -       cancel_delayed_work(&adc_bat->bat_work);
50269 +       cancel_delayed_work_sync(&adc_bat->bat_work);
50270         return 0;
50273 diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c
50274 index e7931ffb7151..397e5a03b7d9 100644
50275 --- a/drivers/power/supply/lp8788-charger.c
50276 +++ b/drivers/power/supply/lp8788-charger.c
50277 @@ -501,7 +501,7 @@ static int lp8788_set_irqs(struct platform_device *pdev,
50279                 ret = request_threaded_irq(virq, NULL,
50280                                         lp8788_charger_irq_thread,
50281 -                                       0, name, pchg);
50282 +                                       IRQF_ONESHOT, name, pchg);
50283                 if (ret)
50284                         break;
50285         }
50286 diff --git a/drivers/power/supply/pm2301_charger.c b/drivers/power/supply/pm2301_charger.c
50287 index ac06ecf7fc9c..a3bfb9612b17 100644
50288 --- a/drivers/power/supply/pm2301_charger.c
50289 +++ b/drivers/power/supply/pm2301_charger.c
50290 @@ -1089,7 +1089,7 @@ static int pm2xxx_wall_charger_probe(struct i2c_client *i2c_client,
50291         ret = request_threaded_irq(gpio_to_irq(pm2->pdata->gpio_irq_number),
50292                                 NULL,
50293                                 pm2xxx_charger_irq[0].isr,
50294 -                               pm2->pdata->irq_type,
50295 +                               pm2->pdata->irq_type | IRQF_ONESHOT,
50296                                 pm2xxx_charger_irq[0].name, pm2);
50298         if (ret != 0) {
50299 diff --git a/drivers/power/supply/s3c_adc_battery.c b/drivers/power/supply/s3c_adc_battery.c
50300 index a2addc24ee8b..3e3a598f114d 100644
50301 --- a/drivers/power/supply/s3c_adc_battery.c
50302 +++ b/drivers/power/supply/s3c_adc_battery.c
50303 @@ -395,7 +395,7 @@ static int s3c_adc_bat_remove(struct platform_device *pdev)
50304         if (main_bat.charge_finished)
50305                 free_irq(gpiod_to_irq(main_bat.charge_finished), NULL);
50307 -       cancel_delayed_work(&bat_work);
50308 +       cancel_delayed_work_sync(&bat_work);
50310         if (pdata->exit)
50311                 pdata->exit();
50312 diff --git a/drivers/power/supply/tps65090-charger.c b/drivers/power/supply/tps65090-charger.c
50313 index 6b0098e5a88b..0990b2fa6cd8 100644
50314 --- a/drivers/power/supply/tps65090-charger.c
50315 +++ b/drivers/power/supply/tps65090-charger.c
50316 @@ -301,7 +301,7 @@ static int tps65090_charger_probe(struct platform_device *pdev)
50318         if (irq != -ENXIO) {
50319                 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
50320 -                       tps65090_charger_isr, 0, "tps65090-charger", cdata);
50321 +                       tps65090_charger_isr, IRQF_ONESHOT, "tps65090-charger", cdata);
50322                 if (ret) {
50323                         dev_err(cdata->dev,
50324                                 "Unable to register irq %d err %d\n", irq,
50325 diff --git a/drivers/power/supply/tps65217_charger.c b/drivers/power/supply/tps65217_charger.c
50326 index 814c2b81fdfe..ba33d1617e0b 100644
50327 --- a/drivers/power/supply/tps65217_charger.c
50328 +++ b/drivers/power/supply/tps65217_charger.c
50329 @@ -238,7 +238,7 @@ static int tps65217_charger_probe(struct platform_device *pdev)
50330         for (i = 0; i < NUM_CHARGER_IRQS; i++) {
50331                 ret = devm_request_threaded_irq(&pdev->dev, irq[i], NULL,
50332                                                 tps65217_charger_irq,
50333 -                                               0, "tps65217-charger",
50334 +                                               IRQF_ONESHOT, "tps65217-charger",
50335                                                 charger);
50336                 if (ret) {
50337                         dev_err(charger->dev,
50338 diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
50339 index fdda2a737186..58ecdad26cca 100644
50340 --- a/drivers/powercap/intel_rapl_common.c
50341 +++ b/drivers/powercap/intel_rapl_common.c
50342 @@ -1454,7 +1454,7 @@ static int __init rapl_init(void)
50344         id = x86_match_cpu(rapl_ids);
50345         if (!id) {
50346 -               pr_err("driver does not support CPU family %d model %d\n",
50347 +               pr_info("driver does not support CPU family %d model %d\n",
50348                        boot_cpu_data.x86, boot_cpu_data.x86_model);
50350                 return -ENODEV;
50351 diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
50352 index 5813339b597b..3292158157b6 100644
50353 --- a/drivers/pwm/pwm-atmel.c
50354 +++ b/drivers/pwm/pwm-atmel.c
50355 @@ -319,7 +319,7 @@ static void atmel_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
50357                 cdty = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm,
50358                                           atmel_pwm->data->regs.duty);
50359 -               tmp = (u64)cdty * NSEC_PER_SEC;
50360 +               tmp = (u64)(cprd - cdty) * NSEC_PER_SEC;
50361                 tmp <<= pres;
50362                 state->duty_cycle = DIV64_U64_ROUND_UP(tmp, rate);
50364 diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
50365 index 50ec53d67a4c..db4c265287ae 100644
50366 --- a/drivers/rapidio/rio_cm.c
50367 +++ b/drivers/rapidio/rio_cm.c
50368 @@ -2127,6 +2127,14 @@ static int riocm_add_mport(struct device *dev,
50369                 return -ENODEV;
50370         }
50372 +       cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
50373 +       if (!cm->rx_wq) {
50374 +               rio_release_inb_mbox(mport, cmbox);
50375 +               rio_release_outb_mbox(mport, cmbox);
50376 +               kfree(cm);
50377 +               return -ENOMEM;
50378 +       }
50380         /*
50381          * Allocate and register inbound messaging buffers to be ready
50382          * to receive channel and system management requests
50383 @@ -2137,15 +2145,6 @@ static int riocm_add_mport(struct device *dev,
50384         cm->rx_slots = RIOCM_RX_RING_SIZE;
50385         mutex_init(&cm->rx_lock);
50386         riocm_rx_fill(cm, RIOCM_RX_RING_SIZE);
50387 -       cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
50388 -       if (!cm->rx_wq) {
50389 -               riocm_error("failed to allocate IBMBOX_%d on %s",
50390 -                           cmbox, mport->name);
50391 -               rio_release_outb_mbox(mport, cmbox);
50392 -               kfree(cm);
50393 -               return -ENOMEM;
50394 -       }
50396         INIT_WORK(&cm->rx_work, rio_ibmsg_handler);
50398         cm->tx_slot = 0;
50399 diff --git a/drivers/regulator/bd9576-regulator.c b/drivers/regulator/bd9576-regulator.c
50400 index a8b5832a5a1b..204a2da054f5 100644
50401 --- a/drivers/regulator/bd9576-regulator.c
50402 +++ b/drivers/regulator/bd9576-regulator.c
50403 @@ -206,7 +206,7 @@ static int bd957x_probe(struct platform_device *pdev)
50405         struct regmap *regmap;
50406         struct regulator_config config = { 0 };
50407 -       int i, err;
50408 +       int i;
50409         bool vout_mode, ddr_sel;
50410         const struct bd957x_regulator_data *reg_data = &bd9576_regulators[0];
50411         unsigned int num_reg_data = ARRAY_SIZE(bd9576_regulators);
50412 @@ -279,8 +279,7 @@ static int bd957x_probe(struct platform_device *pdev)
50413                 break;
50414         default:
50415                 dev_err(&pdev->dev, "Unsupported chip type\n");
50416 -               err = -EINVAL;
50417 -               goto err;
50418 +               return -EINVAL;
50419         }
50421         config.dev = pdev->dev.parent;
50422 @@ -300,8 +299,7 @@ static int bd957x_probe(struct platform_device *pdev)
50423                         dev_err(&pdev->dev,
50424                                 "failed to register %s regulator\n",
50425                                 desc->name);
50426 -                       err = PTR_ERR(rdev);
50427 -                       goto err;
50428 +                       return PTR_ERR(rdev);
50429                 }
50430                 /*
50431                  * Clear the VOUT1 GPIO setting - rest of the regulators do not
50432 @@ -310,8 +308,7 @@ static int bd957x_probe(struct platform_device *pdev)
50433                 config.ena_gpiod = NULL;
50434         }
50436 -err:
50437 -       return err;
50438 +       return 0;
50441  static const struct platform_device_id bd957x_pmic_id[] = {
50442 diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
50443 index a2ede7d7897e..08cbf688e14d 100644
50444 --- a/drivers/regulator/da9121-regulator.c
50445 +++ b/drivers/regulator/da9121-regulator.c
50446 @@ -40,6 +40,7 @@ struct da9121 {
50447         unsigned int passive_delay;
50448         int chip_irq;
50449         int variant_id;
50450 +       int subvariant_id;
50451  };
50453  /* Define ranges for different variants, enabling translation to/from
50454 @@ -812,7 +813,6 @@ static struct regmap_config da9121_2ch_regmap_config = {
50455  static int da9121_check_device_type(struct i2c_client *i2c, struct da9121 *chip)
50457         u32 device_id;
50458 -       u8 chip_id = chip->variant_id;
50459         u32 variant_id;
50460         u8 variant_mrc, variant_vrc;
50461         char *type;
50462 @@ -839,22 +839,34 @@ static int da9121_check_device_type(struct i2c_client *i2c, struct da9121 *chip)
50464         variant_vrc = variant_id & DA9121_MASK_OTP_VARIANT_ID_VRC;
50466 -       switch (variant_vrc) {
50467 -       case DA9121_VARIANT_VRC:
50468 -               type = "DA9121/DA9130";
50469 -               config_match = (chip_id == DA9121_TYPE_DA9121_DA9130);
50470 +       switch (chip->subvariant_id) {
50471 +       case DA9121_SUBTYPE_DA9121:
50472 +               type = "DA9121";
50473 +               config_match = (variant_vrc == DA9121_VARIANT_VRC);
50474                 break;
50475 -       case DA9220_VARIANT_VRC:
50476 -               type = "DA9220/DA9132";
50477 -               config_match = (chip_id == DA9121_TYPE_DA9220_DA9132);
50478 +       case DA9121_SUBTYPE_DA9130:
50479 +               type = "DA9130";
50480 +               config_match = (variant_vrc == DA9130_VARIANT_VRC);
50481                 break;
50482 -       case DA9122_VARIANT_VRC:
50483 -               type = "DA9122/DA9131";
50484 -               config_match = (chip_id == DA9121_TYPE_DA9122_DA9131);
50485 +       case DA9121_SUBTYPE_DA9220:
50486 +               type = "DA9220";
50487 +               config_match = (variant_vrc == DA9220_VARIANT_VRC);
50488                 break;
50489 -       case DA9217_VARIANT_VRC:
50490 +       case DA9121_SUBTYPE_DA9132:
50491 +               type = "DA9132";
50492 +               config_match = (variant_vrc == DA9132_VARIANT_VRC);
50493 +               break;
50494 +       case DA9121_SUBTYPE_DA9122:
50495 +               type = "DA9122";
50496 +               config_match = (variant_vrc == DA9122_VARIANT_VRC);
50497 +               break;
50498 +       case DA9121_SUBTYPE_DA9131:
50499 +               type = "DA9131";
50500 +               config_match = (variant_vrc == DA9131_VARIANT_VRC);
50501 +               break;
50502 +       case DA9121_SUBTYPE_DA9217:
50503                 type = "DA9217";
50504 -               config_match = (chip_id == DA9121_TYPE_DA9217);
50505 +               config_match = (variant_vrc == DA9217_VARIANT_VRC);
50506                 break;
50507         default:
50508                 type = "Unknown";
50509 @@ -892,15 +904,27 @@ static int da9121_assign_chip_model(struct i2c_client *i2c,
50511         chip->dev = &i2c->dev;
50513 -       switch (chip->variant_id) {
50514 -       case DA9121_TYPE_DA9121_DA9130:
50515 -               fallthrough;
50516 -       case DA9121_TYPE_DA9217:
50517 +       /* Use configured subtype to select the regulator descriptor index and
50518 +        * register map, common to both consumer and automotive grade variants
50519 +        */
50520 +       switch (chip->subvariant_id) {
50521 +       case DA9121_SUBTYPE_DA9121:
50522 +       case DA9121_SUBTYPE_DA9130:
50523 +               chip->variant_id = DA9121_TYPE_DA9121_DA9130;
50524                 regmap = &da9121_1ch_regmap_config;
50525                 break;
50526 -       case DA9121_TYPE_DA9122_DA9131:
50527 -               fallthrough;
50528 -       case DA9121_TYPE_DA9220_DA9132:
50529 +       case DA9121_SUBTYPE_DA9217:
50530 +               chip->variant_id = DA9121_TYPE_DA9217;
50531 +               regmap = &da9121_1ch_regmap_config;
50532 +               break;
50533 +       case DA9121_SUBTYPE_DA9122:
50534 +       case DA9121_SUBTYPE_DA9131:
50535 +               chip->variant_id = DA9121_TYPE_DA9122_DA9131;
50536 +               regmap = &da9121_2ch_regmap_config;
50537 +               break;
50538 +       case DA9121_SUBTYPE_DA9220:
50539 +       case DA9121_SUBTYPE_DA9132:
50540 +               chip->variant_id = DA9121_TYPE_DA9220_DA9132;
50541                 regmap = &da9121_2ch_regmap_config;
50542                 break;
50543         }
50544 @@ -975,13 +999,13 @@ static int da9121_config_irq(struct i2c_client *i2c,
50547  static const struct of_device_id da9121_dt_ids[] = {
50548 -       { .compatible = "dlg,da9121", .data = (void *) DA9121_TYPE_DA9121_DA9130 },
50549 -       { .compatible = "dlg,da9130", .data = (void *) DA9121_TYPE_DA9121_DA9130 },
50550 -       { .compatible = "dlg,da9217", .data = (void *) DA9121_TYPE_DA9217 },
50551 -       { .compatible = "dlg,da9122", .data = (void *) DA9121_TYPE_DA9122_DA9131 },
50552 -       { .compatible = "dlg,da9131", .data = (void *) DA9121_TYPE_DA9122_DA9131 },
50553 -       { .compatible = "dlg,da9220", .data = (void *) DA9121_TYPE_DA9220_DA9132 },
50554 -       { .compatible = "dlg,da9132", .data = (void *) DA9121_TYPE_DA9220_DA9132 },
50555 +       { .compatible = "dlg,da9121", .data = (void *) DA9121_SUBTYPE_DA9121 },
50556 +       { .compatible = "dlg,da9130", .data = (void *) DA9121_SUBTYPE_DA9130 },
50557 +       { .compatible = "dlg,da9217", .data = (void *) DA9121_SUBTYPE_DA9217 },
50558 +       { .compatible = "dlg,da9122", .data = (void *) DA9121_SUBTYPE_DA9122 },
50559 +       { .compatible = "dlg,da9131", .data = (void *) DA9121_SUBTYPE_DA9131 },
50560 +       { .compatible = "dlg,da9220", .data = (void *) DA9121_SUBTYPE_DA9220 },
50561 +       { .compatible = "dlg,da9132", .data = (void *) DA9121_SUBTYPE_DA9132 },
50562         { }
50563  };
50564  MODULE_DEVICE_TABLE(of, da9121_dt_ids);
50565 @@ -1011,7 +1035,7 @@ static int da9121_i2c_probe(struct i2c_client *i2c,
50566         }
50568         chip->pdata = i2c->dev.platform_data;
50569 -       chip->variant_id = da9121_of_get_id(&i2c->dev);
50570 +       chip->subvariant_id = da9121_of_get_id(&i2c->dev);
50572         ret = da9121_assign_chip_model(i2c, chip);
50573         if (ret < 0)
50574 diff --git a/drivers/regulator/da9121-regulator.h b/drivers/regulator/da9121-regulator.h
50575 index 3c34cb889ca8..357f416e17c1 100644
50576 --- a/drivers/regulator/da9121-regulator.h
50577 +++ b/drivers/regulator/da9121-regulator.h
50578 @@ -29,6 +29,16 @@ enum da9121_variant {
50579         DA9121_TYPE_DA9217
50580  };
50582 +enum da9121_subvariant {
50583 +       DA9121_SUBTYPE_DA9121,
50584 +       DA9121_SUBTYPE_DA9130,
50585 +       DA9121_SUBTYPE_DA9220,
50586 +       DA9121_SUBTYPE_DA9132,
50587 +       DA9121_SUBTYPE_DA9122,
50588 +       DA9121_SUBTYPE_DA9131,
50589 +       DA9121_SUBTYPE_DA9217
50592  /* Minimum, maximum and default polling millisecond periods are provided
50593   * here as an example. It is expected that any final implementation will
50594   * include a modification of these settings to match the required
50595 @@ -279,6 +289,9 @@ enum da9121_variant {
50596  #define DA9220_VARIANT_VRC     0x0
50597  #define DA9122_VARIANT_VRC     0x2
50598  #define DA9217_VARIANT_VRC     0x7
50599 +#define DA9130_VARIANT_VRC     0x0
50600 +#define DA9131_VARIANT_VRC     0x1
50601 +#define DA9132_VARIANT_VRC     0x2
50603  /* DA9121_REG_OTP_CUSTOMER_ID */
50605 diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
50606 index dcb380e868df..549ed3fed625 100644
50607 --- a/drivers/remoteproc/pru_rproc.c
50608 +++ b/drivers/remoteproc/pru_rproc.c
50609 @@ -266,12 +266,17 @@ static void pru_rproc_create_debug_entries(struct rproc *rproc)
50611  static void pru_dispose_irq_mapping(struct pru_rproc *pru)
50613 -       while (pru->evt_count--) {
50614 +       if (!pru->mapped_irq)
50615 +               return;
50617 +       while (pru->evt_count) {
50618 +               pru->evt_count--;
50619                 if (pru->mapped_irq[pru->evt_count] > 0)
50620                         irq_dispose_mapping(pru->mapped_irq[pru->evt_count]);
50621         }
50623         kfree(pru->mapped_irq);
50624 +       pru->mapped_irq = NULL;
50627  /*
50628 @@ -284,7 +289,7 @@ static int pru_handle_intrmap(struct rproc *rproc)
50629         struct pru_rproc *pru = rproc->priv;
50630         struct pru_irq_rsc *rsc = pru->pru_interrupt_map;
50631         struct irq_fwspec fwspec;
50632 -       struct device_node *irq_parent;
50633 +       struct device_node *parent, *irq_parent;
50634         int i, ret = 0;
50636         /* not having pru_interrupt_map is not an error */
50637 @@ -307,16 +312,31 @@ static int pru_handle_intrmap(struct rproc *rproc)
50638         pru->evt_count = rsc->num_evts;
50639         pru->mapped_irq = kcalloc(pru->evt_count, sizeof(unsigned int),
50640                                   GFP_KERNEL);
50641 -       if (!pru->mapped_irq)
50642 +       if (!pru->mapped_irq) {
50643 +               pru->evt_count = 0;
50644                 return -ENOMEM;
50645 +       }
50647         /*
50648          * parse and fill in system event to interrupt channel and
50649 -        * channel-to-host mapping
50650 +        * channel-to-host mapping. The interrupt controller to be used
50651 +        * for these mappings for a given PRU remoteproc is always its
50652 +        * corresponding sibling PRUSS INTC node.
50653          */
50654 -       irq_parent = of_irq_find_parent(pru->dev->of_node);
50655 +       parent = of_get_parent(dev_of_node(pru->dev));
50656 +       if (!parent) {
50657 +               kfree(pru->mapped_irq);
50658 +               pru->mapped_irq = NULL;
50659 +               pru->evt_count = 0;
50660 +               return -ENODEV;
50661 +       }
50663 +       irq_parent = of_get_child_by_name(parent, "interrupt-controller");
50664 +       of_node_put(parent);
50665         if (!irq_parent) {
50666                 kfree(pru->mapped_irq);
50667 +               pru->mapped_irq = NULL;
50668 +               pru->evt_count = 0;
50669                 return -ENODEV;
50670         }
50672 @@ -332,16 +352,20 @@ static int pru_handle_intrmap(struct rproc *rproc)
50674                 pru->mapped_irq[i] = irq_create_fwspec_mapping(&fwspec);
50675                 if (!pru->mapped_irq[i]) {
50676 -                       dev_err(dev, "failed to get virq\n");
50677 -                       ret = pru->mapped_irq[i];
50678 +                       dev_err(dev, "failed to get virq for fw mapping %d: event %d chnl %d host %d\n",
50679 +                               i, fwspec.param[0], fwspec.param[1],
50680 +                               fwspec.param[2]);
50681 +                       ret = -EINVAL;
50682                         goto map_fail;
50683                 }
50684         }
50685 +       of_node_put(irq_parent);
50687         return ret;
50689  map_fail:
50690         pru_dispose_irq_mapping(pru);
50691 +       of_node_put(irq_parent);
50693         return ret;
50695 @@ -387,8 +411,7 @@ static int pru_rproc_stop(struct rproc *rproc)
50696         pru_control_write_reg(pru, PRU_CTRL_CTRL, val);
50698         /* dispose irq mapping - new firmware can provide new mapping */
50699 -       if (pru->mapped_irq)
50700 -               pru_dispose_irq_mapping(pru);
50701 +       pru_dispose_irq_mapping(pru);
50703         return 0;
50705 diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
50706 index 66106ba25ba3..14e0ce5f18f5 100644
50707 --- a/drivers/remoteproc/qcom_q6v5_mss.c
50708 +++ b/drivers/remoteproc/qcom_q6v5_mss.c
50709 @@ -1210,6 +1210,14 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
50710                         goto release_firmware;
50711                 }
50713 +               if (phdr->p_filesz > phdr->p_memsz) {
50714 +                       dev_err(qproc->dev,
50715 +                               "refusing to load segment %d with p_filesz > p_memsz\n",
50716 +                               i);
50717 +                       ret = -EINVAL;
50718 +                       goto release_firmware;
50719 +               }
50721                 ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC);
50722                 if (!ptr) {
50723                         dev_err(qproc->dev,
50724 @@ -1241,6 +1249,16 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
50725                                 goto release_firmware;
50726                         }
50728 +                       if (seg_fw->size != phdr->p_filesz) {
50729 +                               dev_err(qproc->dev,
50730 +                                       "failed to load segment %d from truncated file %s\n",
50731 +                                       i, fw_name);
50732 +                               ret = -EINVAL;
50733 +                               release_firmware(seg_fw);
50734 +                               memunmap(ptr);
50735 +                               goto release_firmware;
50736 +                       }
50738                         release_firmware(seg_fw);
50739                 }
50741 diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
50742 index 27a05167c18c..4840886532ff 100644
50743 --- a/drivers/rpmsg/qcom_glink_native.c
50744 +++ b/drivers/rpmsg/qcom_glink_native.c
50745 @@ -857,6 +857,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
50746                         dev_err(glink->dev,
50747                                 "no intent found for channel %s intent %d",
50748                                 channel->name, liid);
50749 +                       ret = -ENOENT;
50750                         goto advance_rx;
50751                 }
50752         }
50753 diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
50754 index cd8e438bc9c4..8752620d8e34 100644
50755 --- a/drivers/rtc/rtc-ds1307.c
50756 +++ b/drivers/rtc/rtc-ds1307.c
50757 @@ -296,7 +296,11 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
50758         t->tm_min = bcd2bin(regs[DS1307_REG_MIN] & 0x7f);
50759         tmp = regs[DS1307_REG_HOUR] & 0x3f;
50760         t->tm_hour = bcd2bin(tmp);
50761 -       t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1;
50762 +       /* rx8130 is bit position, not BCD */
50763 +       if (ds1307->type == rx_8130)
50764 +               t->tm_wday = fls(regs[DS1307_REG_WDAY] & 0x7f);
50765 +       else
50766 +               t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1;
50767         t->tm_mday = bcd2bin(regs[DS1307_REG_MDAY] & 0x3f);
50768         tmp = regs[DS1307_REG_MONTH] & 0x1f;
50769         t->tm_mon = bcd2bin(tmp) - 1;
50770 @@ -343,7 +347,11 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
50771         regs[DS1307_REG_SECS] = bin2bcd(t->tm_sec);
50772         regs[DS1307_REG_MIN] = bin2bcd(t->tm_min);
50773         regs[DS1307_REG_HOUR] = bin2bcd(t->tm_hour);
50774 -       regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
50775 +       /* rx8130 is bit position, not BCD */
50776 +       if (ds1307->type == rx_8130)
50777 +               regs[DS1307_REG_WDAY] = 1 << t->tm_wday;
50778 +       else
50779 +               regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
50780         regs[DS1307_REG_MDAY] = bin2bcd(t->tm_mday);
50781         regs[DS1307_REG_MONTH] = bin2bcd(t->tm_mon + 1);
50783 diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c
50784 index 57cc09d0a806..c0df49fb978c 100644
50785 --- a/drivers/rtc/rtc-fsl-ftm-alarm.c
50786 +++ b/drivers/rtc/rtc-fsl-ftm-alarm.c
50787 @@ -310,6 +310,7 @@ static const struct of_device_id ftm_rtc_match[] = {
50788         { .compatible = "fsl,lx2160a-ftm-alarm", },
50789         { },
50790  };
50791 +MODULE_DEVICE_TABLE(of, ftm_rtc_match);
50793  static const struct acpi_device_id ftm_imx_acpi_ids[] = {
50794         {"NXP0014",},
50795 diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
50796 index aef6c1ee8bb0..82becae14229 100644
50797 --- a/drivers/rtc/rtc-pcf85063.c
50798 +++ b/drivers/rtc/rtc-pcf85063.c
50799 @@ -478,6 +478,7 @@ static struct clk *pcf85063_clkout_register_clk(struct pcf85063 *pcf85063)
50801         struct clk *clk;
50802         struct clk_init_data init;
50803 +       struct device_node *node = pcf85063->rtc->dev.parent->of_node;
50805         init.name = "pcf85063-clkout";
50806         init.ops = &pcf85063_clkout_ops;
50807 @@ -487,15 +488,13 @@ static struct clk *pcf85063_clkout_register_clk(struct pcf85063 *pcf85063)
50808         pcf85063->clkout_hw.init = &init;
50810         /* optional override of the clockname */
50811 -       of_property_read_string(pcf85063->rtc->dev.of_node,
50812 -                               "clock-output-names", &init.name);
50813 +       of_property_read_string(node, "clock-output-names", &init.name);
50815         /* register the clock */
50816         clk = devm_clk_register(&pcf85063->rtc->dev, &pcf85063->clkout_hw);
50818         if (!IS_ERR(clk))
50819 -               of_clk_add_provider(pcf85063->rtc->dev.of_node,
50820 -                                   of_clk_src_simple_get, clk);
50821 +               of_clk_add_provider(node, of_clk_src_simple_get, clk);
50823         return clk;
50825 diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
50826 index 288abb1abdb8..bc89c62ccb9b 100644
50827 --- a/drivers/rtc/rtc-tps65910.c
50828 +++ b/drivers/rtc/rtc-tps65910.c
50829 @@ -18,6 +18,7 @@
50830  #include <linux/rtc.h>
50831  #include <linux/bcd.h>
50832  #include <linux/math64.h>
50833 +#include <linux/property.h>
50834  #include <linux/platform_device.h>
50835  #include <linux/interrupt.h>
50836  #include <linux/mfd/tps65910.h>
50837 diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
50838 index 2018614f258f..fc19b312c345 100644
50839 --- a/drivers/rtc/rtc-wm8350.c
50840 +++ b/drivers/rtc/rtc-wm8350.c
50841 @@ -114,7 +114,7 @@ static int wm8350_rtc_settime(struct device *dev, struct rtc_time *tm)
50842         /* Wait until confirmation of stopping */
50843         do {
50844                 rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
50845 -               schedule_timeout_uninterruptible(msecs_to_jiffies(1));
50846 +               schedule_msec_hrtimeout_uninterruptible((1));
50847         } while (--retries && !(rtc_ctrl & WM8350_RTC_STS));
50849         if (!retries) {
50850 @@ -197,7 +197,7 @@ static int wm8350_rtc_stop_alarm(struct wm8350 *wm8350)
50851         /* Wait until confirmation of stopping */
50852         do {
50853                 rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
50854 -               schedule_timeout_uninterruptible(msecs_to_jiffies(1));
50855 +               schedule_msec_hrtimeout_uninterruptible((1));
50856         } while (retries-- && !(rtc_ctrl & WM8350_RTC_ALMSTS));
50858         if (!(rtc_ctrl & WM8350_RTC_ALMSTS))
50859 @@ -220,7 +220,7 @@ static int wm8350_rtc_start_alarm(struct wm8350 *wm8350)
50860         /* Wait until confirmation */
50861         do {
50862                 rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
50863 -               schedule_timeout_uninterruptible(msecs_to_jiffies(1));
50864 +               schedule_msec_hrtimeout_uninterruptible((1));
50865         } while (retries-- && rtc_ctrl & WM8350_RTC_ALMSTS);
50867         if (rtc_ctrl & WM8350_RTC_ALMSTS)
50868 diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
50869 index 3f026021e95e..84f659cafe76 100644
50870 --- a/drivers/s390/cio/device.c
50871 +++ b/drivers/s390/cio/device.c
50872 @@ -1532,8 +1532,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
50873         switch (action) {
50874         case IO_SCH_ORPH_UNREG:
50875         case IO_SCH_UNREG:
50876 -               if (!cdev)
50877 -                       css_sch_device_unregister(sch);
50878 +               css_sch_device_unregister(sch);
50879                 break;
50880         case IO_SCH_ORPH_ATTACH:
50881         case IO_SCH_UNREG_ATTACH:
50882 diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
50883 index 34bf2f197c71..0e0044d70844 100644
50884 --- a/drivers/s390/cio/qdio.h
50885 +++ b/drivers/s390/cio/qdio.h
50886 @@ -181,12 +181,6 @@ struct qdio_input_q {
50887  struct qdio_output_q {
50888         /* PCIs are enabled for the queue */
50889         int pci_out_enabled;
50890 -       /* cq: use asynchronous output buffers */
50891 -       int use_cq;
50892 -       /* cq: aobs used for particual SBAL */
50893 -       struct qaob **aobs;
50894 -       /* cq: sbal state related to asynchronous operation */
50895 -       struct qdio_outbuf_state *sbal_state;
50896         /* timer to check for more outbound work */
50897         struct timer_list timer;
50898         /* tasklet to check for completions */
50899 @@ -379,12 +373,8 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data);
50900  void qdio_shutdown_irq(struct qdio_irq *irq);
50901  void qdio_print_subchannel_info(struct qdio_irq *irq_ptr);
50902  void qdio_free_queues(struct qdio_irq *irq_ptr);
50903 -void qdio_free_async_data(struct qdio_irq *irq_ptr);
50904  int qdio_setup_init(void);
50905  void qdio_setup_exit(void);
50906 -int qdio_enable_async_operation(struct qdio_output_q *q);
50907 -void qdio_disable_async_operation(struct qdio_output_q *q);
50908 -struct qaob *qdio_allocate_aob(void);
50910  int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
50911                         unsigned char *state);
50912 diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
50913 index 03a011619908..307ce7ff5ca4 100644
50914 --- a/drivers/s390/cio/qdio_main.c
50915 +++ b/drivers/s390/cio/qdio_main.c
50916 @@ -517,24 +517,6 @@ static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
50917         return 1;
50920 -static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
50921 -                                       int bufnr)
50923 -       unsigned long phys_aob = 0;
50925 -       if (!q->aobs[bufnr]) {
50926 -               struct qaob *aob = qdio_allocate_aob();
50927 -               q->aobs[bufnr] = aob;
50928 -       }
50929 -       if (q->aobs[bufnr]) {
50930 -               q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
50931 -               phys_aob = virt_to_phys(q->aobs[bufnr]);
50932 -               WARN_ON_ONCE(phys_aob & 0xFF);
50933 -       }
50935 -       return phys_aob;
50938  static inline int qdio_tasklet_schedule(struct qdio_q *q)
50940         if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
50941 @@ -548,7 +530,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
50942                                         unsigned int *error)
50944         unsigned char state = 0;
50945 -       unsigned int i;
50946         int count;
50948         q->timestamp = get_tod_clock_fast();
50949 @@ -570,10 +551,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
50951         switch (state) {
50952         case SLSB_P_OUTPUT_PENDING:
50953 -               /* detach the utilized QAOBs: */
50954 -               for (i = 0; i < count; i++)
50955 -                       q->u.out.aobs[QDIO_BUFNR(start + i)] = NULL;
50957                 *error = QDIO_ERROR_SLSB_PENDING;
50958                 fallthrough;
50959         case SLSB_P_OUTPUT_EMPTY:
50960 @@ -999,7 +976,6 @@ int qdio_free(struct ccw_device *cdev)
50961         cdev->private->qdio_data = NULL;
50962         mutex_unlock(&irq_ptr->setup_mutex);
50964 -       qdio_free_async_data(irq_ptr);
50965         qdio_free_queues(irq_ptr);
50966         free_page((unsigned long) irq_ptr->qdr);
50967         free_page(irq_ptr->chsc_page);
50968 @@ -1075,28 +1051,6 @@ int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
50970  EXPORT_SYMBOL_GPL(qdio_allocate);
50972 -static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
50974 -       struct qdio_q *q = irq_ptr->input_qs[0];
50975 -       int i, use_cq = 0;
50977 -       if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
50978 -               use_cq = 1;
50980 -       for_each_output_queue(irq_ptr, q, i) {
50981 -               if (use_cq) {
50982 -                       if (multicast_outbound(q))
50983 -                               continue;
50984 -                       if (qdio_enable_async_operation(&q->u.out) < 0) {
50985 -                               use_cq = 0;
50986 -                               continue;
50987 -                       }
50988 -               } else
50989 -                       qdio_disable_async_operation(&q->u.out);
50990 -       }
50991 -       DBF_EVENT("use_cq:%d", use_cq);
50994  static void qdio_trace_init_data(struct qdio_irq *irq,
50995                                  struct qdio_initialize *data)
50997 @@ -1191,8 +1145,6 @@ int qdio_establish(struct ccw_device *cdev,
50999         qdio_setup_ssqd_info(irq_ptr);
51001 -       qdio_detect_hsicq(irq_ptr);
51003         /* qebsm is now setup if available, initialize buffer states */
51004         qdio_init_buf_states(irq_ptr);
51006 @@ -1297,9 +1249,11 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
51007   * @callflags: flags
51008   * @bufnr: first buffer to process
51009   * @count: how many buffers are filled
51010 + * @aob: asynchronous operation block
51011   */
51012  static int handle_outbound(struct qdio_q *q, unsigned int callflags,
51013 -                          unsigned int bufnr, unsigned int count)
51014 +                          unsigned int bufnr, unsigned int count,
51015 +                          struct qaob *aob)
51017         const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
51018         unsigned char state = 0;
51019 @@ -1320,11 +1274,9 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
51020                 q->u.out.pci_out_enabled = 0;
51022         if (queue_type(q) == QDIO_IQDIO_QFMT) {
51023 -               unsigned long phys_aob = 0;
51025 -               if (q->u.out.use_cq && count == 1)
51026 -                       phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
51027 +               unsigned long phys_aob = aob ? virt_to_phys(aob) : 0;
51029 +               WARN_ON_ONCE(!IS_ALIGNED(phys_aob, 256));
51030                 rc = qdio_kick_outbound_q(q, count, phys_aob);
51031         } else if (need_siga_sync(q)) {
51032                 rc = qdio_siga_sync_q(q);
51033 @@ -1359,9 +1311,10 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
51034   * @q_nr: queue number
51035   * @bufnr: buffer number
51036   * @count: how many buffers to process
51037 + * @aob: asynchronous operation block (outbound only)
51038   */
51039  int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
51040 -           int q_nr, unsigned int bufnr, unsigned int count)
51041 +           int q_nr, unsigned int bufnr, unsigned int count, struct qaob *aob)
51043         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
51045 @@ -1383,7 +1336,7 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
51046                                       callflags, bufnr, count);
51047         else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
51048                 return handle_outbound(irq_ptr->output_qs[q_nr],
51049 -                                      callflags, bufnr, count);
51050 +                                      callflags, bufnr, count, aob);
51051         return -EINVAL;
51053  EXPORT_SYMBOL_GPL(do_QDIO);
51054 diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
51055 index c8b9620bc688..da67e4979402 100644
51056 --- a/drivers/s390/cio/qdio_setup.c
51057 +++ b/drivers/s390/cio/qdio_setup.c
51058 @@ -30,6 +30,7 @@ struct qaob *qdio_allocate_aob(void)
51060         return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
51062 +EXPORT_SYMBOL_GPL(qdio_allocate_aob);
51064  void qdio_release_aob(struct qaob *aob)
51066 @@ -247,8 +248,6 @@ static void setup_queues(struct qdio_irq *irq_ptr,
51067                          struct qdio_initialize *qdio_init)
51069         struct qdio_q *q;
51070 -       struct qdio_outbuf_state *output_sbal_state_array =
51071 -                                 qdio_init->output_sbal_state_array;
51072         int i;
51074         for_each_input_queue(irq_ptr, q, i) {
51075 @@ -265,9 +264,6 @@ static void setup_queues(struct qdio_irq *irq_ptr,
51076                 DBF_EVENT("outq:%1d", i);
51077                 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
51079 -               q->u.out.sbal_state = output_sbal_state_array;
51080 -               output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
51082                 q->is_input_q = 0;
51083                 setup_storage_lists(q, irq_ptr,
51084                                     qdio_init->output_sbal_addr_array[i], i);
51085 @@ -372,30 +368,6 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
51086         DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
51089 -void qdio_free_async_data(struct qdio_irq *irq_ptr)
51091 -       struct qdio_q *q;
51092 -       int i;
51094 -       for (i = 0; i < irq_ptr->max_output_qs; i++) {
51095 -               q = irq_ptr->output_qs[i];
51096 -               if (q->u.out.use_cq) {
51097 -                       unsigned int n;
51099 -                       for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; n++) {
51100 -                               struct qaob *aob = q->u.out.aobs[n];
51102 -                               if (aob) {
51103 -                                       qdio_release_aob(aob);
51104 -                                       q->u.out.aobs[n] = NULL;
51105 -                               }
51106 -                       }
51108 -                       qdio_disable_async_operation(&q->u.out);
51109 -               }
51110 -       }
51113  static void qdio_fill_qdr_desc(struct qdesfmt0 *desc, struct qdio_q *queue)
51115         desc->sliba = virt_to_phys(queue->slib);
51116 @@ -545,25 +517,6 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr)
51117         printk(KERN_INFO "%s", s);
51120 -int qdio_enable_async_operation(struct qdio_output_q *outq)
51122 -       outq->aobs = kcalloc(QDIO_MAX_BUFFERS_PER_Q, sizeof(struct qaob *),
51123 -                            GFP_KERNEL);
51124 -       if (!outq->aobs) {
51125 -               outq->use_cq = 0;
51126 -               return -ENOMEM;
51127 -       }
51128 -       outq->use_cq = 1;
51129 -       return 0;
51132 -void qdio_disable_async_operation(struct qdio_output_q *q)
51134 -       kfree(q->aobs);
51135 -       q->aobs = NULL;
51136 -       q->use_cq = 0;
51139  int __init qdio_setup_init(void)
51141         int rc;
51142 diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
51143 index 1ffdd411201c..6946a7e26eff 100644
51144 --- a/drivers/s390/crypto/vfio_ap_ops.c
51145 +++ b/drivers/s390/crypto/vfio_ap_ops.c
51146 @@ -294,6 +294,19 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
51147         matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
51148                                    struct ap_matrix_mdev, pqap_hook);
51150 +       /*
51151 +        * If the KVM pointer is in the process of being set, wait until the
51152 +        * process has completed.
51153 +        */
51154 +       wait_event_cmd(matrix_mdev->wait_for_kvm,
51155 +                      !matrix_mdev->kvm_busy,
51156 +                      mutex_unlock(&matrix_dev->lock),
51157 +                      mutex_lock(&matrix_dev->lock));
51159 +       /* If the there is no guest using the mdev, there is nothing to do */
51160 +       if (!matrix_mdev->kvm)
51161 +               goto out_unlock;
51163         q = vfio_ap_get_queue(matrix_mdev, apqn);
51164         if (!q)
51165                 goto out_unlock;
51166 @@ -337,6 +350,7 @@ static int vfio_ap_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
51168         matrix_mdev->mdev = mdev;
51169         vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
51170 +       init_waitqueue_head(&matrix_mdev->wait_for_kvm);
51171         mdev_set_drvdata(mdev, matrix_mdev);
51172         matrix_mdev->pqap_hook.hook = handle_pqap;
51173         matrix_mdev->pqap_hook.owner = THIS_MODULE;
51174 @@ -351,17 +365,23 @@ static int vfio_ap_mdev_remove(struct mdev_device *mdev)
51176         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
51178 -       if (matrix_mdev->kvm)
51179 +       mutex_lock(&matrix_dev->lock);
51181 +       /*
51182 +        * If the KVM pointer is in flux or the guest is running, disallow
51183 +        * un-assignment of control domain.
51184 +        */
51185 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
51186 +               mutex_unlock(&matrix_dev->lock);
51187                 return -EBUSY;
51188 +       }
51190 -       mutex_lock(&matrix_dev->lock);
51191         vfio_ap_mdev_reset_queues(mdev);
51192         list_del(&matrix_mdev->node);
51193 -       mutex_unlock(&matrix_dev->lock);
51195         kfree(matrix_mdev);
51196         mdev_set_drvdata(mdev, NULL);
51197         atomic_inc(&matrix_dev->available_instances);
51198 +       mutex_unlock(&matrix_dev->lock);
51200         return 0;
51202 @@ -606,24 +626,31 @@ static ssize_t assign_adapter_store(struct device *dev,
51203         struct mdev_device *mdev = mdev_from_dev(dev);
51204         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
51206 -       /* If the guest is running, disallow assignment of adapter */
51207 -       if (matrix_mdev->kvm)
51208 -               return -EBUSY;
51209 +       mutex_lock(&matrix_dev->lock);
51211 +       /*
51212 +        * If the KVM pointer is in flux or the guest is running, disallow
51213 +        * un-assignment of adapter
51214 +        */
51215 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
51216 +               ret = -EBUSY;
51217 +               goto done;
51218 +       }
51220         ret = kstrtoul(buf, 0, &apid);
51221         if (ret)
51222 -               return ret;
51223 +               goto done;
51225 -       if (apid > matrix_mdev->matrix.apm_max)
51226 -               return -ENODEV;
51227 +       if (apid > matrix_mdev->matrix.apm_max) {
51228 +               ret = -ENODEV;
51229 +               goto done;
51230 +       }
51232         /*
51233          * Set the bit in the AP mask (APM) corresponding to the AP adapter
51234          * number (APID). The bits in the mask, from most significant to least
51235          * significant bit, correspond to APIDs 0-255.
51236          */
51237 -       mutex_lock(&matrix_dev->lock);
51239         ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid);
51240         if (ret)
51241                 goto done;
51242 @@ -672,22 +699,31 @@ static ssize_t unassign_adapter_store(struct device *dev,
51243         struct mdev_device *mdev = mdev_from_dev(dev);
51244         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
51246 -       /* If the guest is running, disallow un-assignment of adapter */
51247 -       if (matrix_mdev->kvm)
51248 -               return -EBUSY;
51249 +       mutex_lock(&matrix_dev->lock);
51251 +       /*
51252 +        * If the KVM pointer is in flux or the guest is running, disallow
51253 +        * un-assignment of adapter
51254 +        */
51255 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
51256 +               ret = -EBUSY;
51257 +               goto done;
51258 +       }
51260         ret = kstrtoul(buf, 0, &apid);
51261         if (ret)
51262 -               return ret;
51263 +               goto done;
51265 -       if (apid > matrix_mdev->matrix.apm_max)
51266 -               return -ENODEV;
51267 +       if (apid > matrix_mdev->matrix.apm_max) {
51268 +               ret = -ENODEV;
51269 +               goto done;
51270 +       }
51272 -       mutex_lock(&matrix_dev->lock);
51273         clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
51274 +       ret = count;
51275 +done:
51276         mutex_unlock(&matrix_dev->lock);
51278 -       return count;
51279 +       return ret;
51281  static DEVICE_ATTR_WO(unassign_adapter);
51283 @@ -753,17 +789,24 @@ static ssize_t assign_domain_store(struct device *dev,
51284         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
51285         unsigned long max_apqi = matrix_mdev->matrix.aqm_max;
51287 -       /* If the guest is running, disallow assignment of domain */
51288 -       if (matrix_mdev->kvm)
51289 -               return -EBUSY;
51290 +       mutex_lock(&matrix_dev->lock);
51292 +       /*
51293 +        * If the KVM pointer is in flux or the guest is running, disallow
51294 +        * assignment of domain
51295 +        */
51296 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
51297 +               ret = -EBUSY;
51298 +               goto done;
51299 +       }
51301         ret = kstrtoul(buf, 0, &apqi);
51302         if (ret)
51303 -               return ret;
51304 -       if (apqi > max_apqi)
51305 -               return -ENODEV;
51307 -       mutex_lock(&matrix_dev->lock);
51308 +               goto done;
51309 +       if (apqi > max_apqi) {
51310 +               ret = -ENODEV;
51311 +               goto done;
51312 +       }
51314         ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi);
51315         if (ret)
51316 @@ -814,22 +857,32 @@ static ssize_t unassign_domain_store(struct device *dev,
51317         struct mdev_device *mdev = mdev_from_dev(dev);
51318         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
51320 -       /* If the guest is running, disallow un-assignment of domain */
51321 -       if (matrix_mdev->kvm)
51322 -               return -EBUSY;
51323 +       mutex_lock(&matrix_dev->lock);
51325 +       /*
51326 +        * If the KVM pointer is in flux or the guest is running, disallow
51327 +        * un-assignment of domain
51328 +        */
51329 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
51330 +               ret = -EBUSY;
51331 +               goto done;
51332 +       }
51334         ret = kstrtoul(buf, 0, &apqi);
51335         if (ret)
51336 -               return ret;
51337 +               goto done;
51339 -       if (apqi > matrix_mdev->matrix.aqm_max)
51340 -               return -ENODEV;
51341 +       if (apqi > matrix_mdev->matrix.aqm_max) {
51342 +               ret = -ENODEV;
51343 +               goto done;
51344 +       }
51346 -       mutex_lock(&matrix_dev->lock);
51347         clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
51348 -       mutex_unlock(&matrix_dev->lock);
51349 +       ret = count;
51351 -       return count;
51352 +done:
51353 +       mutex_unlock(&matrix_dev->lock);
51354 +       return ret;
51356  static DEVICE_ATTR_WO(unassign_domain);
51358 @@ -858,27 +911,36 @@ static ssize_t assign_control_domain_store(struct device *dev,
51359         struct mdev_device *mdev = mdev_from_dev(dev);
51360         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
51362 -       /* If the guest is running, disallow assignment of control domain */
51363 -       if (matrix_mdev->kvm)
51364 -               return -EBUSY;
51365 +       mutex_lock(&matrix_dev->lock);
51367 +       /*
51368 +        * If the KVM pointer is in flux or the guest is running, disallow
51369 +        * assignment of control domain.
51370 +        */
51371 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
51372 +               ret = -EBUSY;
51373 +               goto done;
51374 +       }
51376         ret = kstrtoul(buf, 0, &id);
51377         if (ret)
51378 -               return ret;
51379 +               goto done;
51381 -       if (id > matrix_mdev->matrix.adm_max)
51382 -               return -ENODEV;
51383 +       if (id > matrix_mdev->matrix.adm_max) {
51384 +               ret = -ENODEV;
51385 +               goto done;
51386 +       }
51388         /* Set the bit in the ADM (bitmask) corresponding to the AP control
51389          * domain number (id). The bits in the mask, from most significant to
51390          * least significant, correspond to IDs 0 up to the one less than the
51391          * number of control domains that can be assigned.
51392          */
51393 -       mutex_lock(&matrix_dev->lock);
51394         set_bit_inv(id, matrix_mdev->matrix.adm);
51395 +       ret = count;
51396 +done:
51397         mutex_unlock(&matrix_dev->lock);
51399 -       return count;
51400 +       return ret;
51402  static DEVICE_ATTR_WO(assign_control_domain);
51404 @@ -908,21 +970,30 @@ static ssize_t unassign_control_domain_store(struct device *dev,
51405         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
51406         unsigned long max_domid =  matrix_mdev->matrix.adm_max;
51408 -       /* If the guest is running, disallow un-assignment of control domain */
51409 -       if (matrix_mdev->kvm)
51410 -               return -EBUSY;
51411 +       mutex_lock(&matrix_dev->lock);
51413 +       /*
51414 +        * If the KVM pointer is in flux or the guest is running, disallow
51415 +        * un-assignment of control domain.
51416 +        */
51417 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
51418 +               ret = -EBUSY;
51419 +               goto done;
51420 +       }
51422         ret = kstrtoul(buf, 0, &domid);
51423         if (ret)
51424 -               return ret;
51425 -       if (domid > max_domid)
51426 -               return -ENODEV;
51427 +               goto done;
51428 +       if (domid > max_domid) {
51429 +               ret = -ENODEV;
51430 +               goto done;
51431 +       }
51433 -       mutex_lock(&matrix_dev->lock);
51434         clear_bit_inv(domid, matrix_mdev->matrix.adm);
51435 +       ret = count;
51436 +done:
51437         mutex_unlock(&matrix_dev->lock);
51439 -       return count;
51440 +       return ret;
51442  static DEVICE_ATTR_WO(unassign_control_domain);
51444 @@ -1027,8 +1098,15 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
51445   * @matrix_mdev: a mediated matrix device
51446   * @kvm: reference to KVM instance
51447   *
51448 - * Verifies no other mediated matrix device has @kvm and sets a reference to
51449 - * it in @matrix_mdev->kvm.
51450 + * Sets all data for @matrix_mdev that are needed to manage AP resources
51451 + * for the guest whose state is represented by @kvm.
51452 + *
51453 + * Note: The matrix_dev->lock must be taken prior to calling
51454 + * this function; however, the lock will be temporarily released while the
51455 + * guest's AP configuration is set to avoid a potential lockdep splat.
51456 + * The kvm->lock is taken to set the guest's AP configuration which, under
51457 + * certain circumstances, will result in a circular lock dependency if this is
51458 + * done under the @matrix_mdev->lock.
51459   *
51460   * Return 0 if no other mediated matrix device has a reference to @kvm;
51461   * otherwise, returns an -EPERM.
51462 @@ -1038,14 +1116,25 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
51464         struct ap_matrix_mdev *m;
51466 -       list_for_each_entry(m, &matrix_dev->mdev_list, node) {
51467 -               if ((m != matrix_mdev) && (m->kvm == kvm))
51468 -                       return -EPERM;
51469 -       }
51470 +       if (kvm->arch.crypto.crycbd) {
51471 +               list_for_each_entry(m, &matrix_dev->mdev_list, node) {
51472 +                       if (m != matrix_mdev && m->kvm == kvm)
51473 +                               return -EPERM;
51474 +               }
51476 -       matrix_mdev->kvm = kvm;
51477 -       kvm_get_kvm(kvm);
51478 -       kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
51479 +               kvm_get_kvm(kvm);
51480 +               matrix_mdev->kvm_busy = true;
51481 +               mutex_unlock(&matrix_dev->lock);
51482 +               kvm_arch_crypto_set_masks(kvm,
51483 +                                         matrix_mdev->matrix.apm,
51484 +                                         matrix_mdev->matrix.aqm,
51485 +                                         matrix_mdev->matrix.adm);
51486 +               mutex_lock(&matrix_dev->lock);
51487 +               kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
51488 +               matrix_mdev->kvm = kvm;
51489 +               matrix_mdev->kvm_busy = false;
51490 +               wake_up_all(&matrix_mdev->wait_for_kvm);
51491 +       }
51493         return 0;
51495 @@ -1079,51 +1168,65 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
51496         return NOTIFY_DONE;
51499 +/**
51500 + * vfio_ap_mdev_unset_kvm
51501 + *
51502 + * @matrix_mdev: a matrix mediated device
51503 + *
51504 + * Performs clean-up of resources no longer needed by @matrix_mdev.
51505 + *
51506 + * Note: The matrix_dev->lock must be taken prior to calling
51507 + * this function; however, the lock will be temporarily released while the
51508 + * guest's AP configuration is cleared to avoid a potential lockdep splat.
51509 + * The kvm->lock is taken to clear the guest's AP configuration which, under
51510 + * certain circumstances, will result in a circular lock dependency if this is
51511 + * done under the @matrix_mdev->lock.
51512 + *
51513 + */
51514  static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
51516 -       kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
51517 -       matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
51518 -       vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
51519 -       kvm_put_kvm(matrix_mdev->kvm);
51520 -       matrix_mdev->kvm = NULL;
51521 +       /*
51522 +        * If the KVM pointer is in the process of being set, wait until the
51523 +        * process has completed.
51524 +        */
51525 +       wait_event_cmd(matrix_mdev->wait_for_kvm,
51526 +                      !matrix_mdev->kvm_busy,
51527 +                      mutex_unlock(&matrix_dev->lock),
51528 +                      mutex_lock(&matrix_dev->lock));
51530 +       if (matrix_mdev->kvm) {
51531 +               matrix_mdev->kvm_busy = true;
51532 +               mutex_unlock(&matrix_dev->lock);
51533 +               kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
51534 +               mutex_lock(&matrix_dev->lock);
51535 +               vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
51536 +               matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
51537 +               kvm_put_kvm(matrix_mdev->kvm);
51538 +               matrix_mdev->kvm = NULL;
51539 +               matrix_mdev->kvm_busy = false;
51540 +               wake_up_all(&matrix_mdev->wait_for_kvm);
51541 +       }
51544  static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
51545                                        unsigned long action, void *data)
51547 -       int ret, notify_rc = NOTIFY_OK;
51548 +       int notify_rc = NOTIFY_OK;
51549         struct ap_matrix_mdev *matrix_mdev;
51551         if (action != VFIO_GROUP_NOTIFY_SET_KVM)
51552                 return NOTIFY_OK;
51554 -       matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
51555         mutex_lock(&matrix_dev->lock);
51556 +       matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
51558 -       if (!data) {
51559 -               if (matrix_mdev->kvm)
51560 -                       vfio_ap_mdev_unset_kvm(matrix_mdev);
51561 -               goto notify_done;
51562 -       }
51564 -       ret = vfio_ap_mdev_set_kvm(matrix_mdev, data);
51565 -       if (ret) {
51566 -               notify_rc = NOTIFY_DONE;
51567 -               goto notify_done;
51568 -       }
51570 -       /* If there is no CRYCB pointer, then we can't copy the masks */
51571 -       if (!matrix_mdev->kvm->arch.crypto.crycbd) {
51572 +       if (!data)
51573 +               vfio_ap_mdev_unset_kvm(matrix_mdev);
51574 +       else if (vfio_ap_mdev_set_kvm(matrix_mdev, data))
51575                 notify_rc = NOTIFY_DONE;
51576 -               goto notify_done;
51577 -       }
51579 -       kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm,
51580 -                                 matrix_mdev->matrix.aqm,
51581 -                                 matrix_mdev->matrix.adm);
51583 -notify_done:
51584         mutex_unlock(&matrix_dev->lock);
51586         return notify_rc;
51589 @@ -1258,8 +1361,7 @@ static void vfio_ap_mdev_release(struct mdev_device *mdev)
51590         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
51592         mutex_lock(&matrix_dev->lock);
51593 -       if (matrix_mdev->kvm)
51594 -               vfio_ap_mdev_unset_kvm(matrix_mdev);
51595 +       vfio_ap_mdev_unset_kvm(matrix_mdev);
51596         mutex_unlock(&matrix_dev->lock);
51598         vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
51599 @@ -1293,6 +1395,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
51600                                     unsigned int cmd, unsigned long arg)
51602         int ret;
51603 +       struct ap_matrix_mdev *matrix_mdev;
51605         mutex_lock(&matrix_dev->lock);
51606         switch (cmd) {
51607 @@ -1300,6 +1403,21 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
51608                 ret = vfio_ap_mdev_get_device_info(arg);
51609                 break;
51610         case VFIO_DEVICE_RESET:
51611 +               matrix_mdev = mdev_get_drvdata(mdev);
51612 +               if (WARN(!matrix_mdev, "Driver data missing from mdev!!")) {
51613 +                       ret = -EINVAL;
51614 +                       break;
51615 +               }
51617 +               /*
51618 +                * If the KVM pointer is in the process of being set, wait until
51619 +                * the process has completed.
51620 +                */
51621 +               wait_event_cmd(matrix_mdev->wait_for_kvm,
51622 +                              !matrix_mdev->kvm_busy,
51623 +                              mutex_unlock(&matrix_dev->lock),
51624 +                              mutex_lock(&matrix_dev->lock));
51626                 ret = vfio_ap_mdev_reset_queues(mdev);
51627                 break;
51628         default:
51629 diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
51630 index 28e9d9989768..f82a6396acae 100644
51631 --- a/drivers/s390/crypto/vfio_ap_private.h
51632 +++ b/drivers/s390/crypto/vfio_ap_private.h
51633 @@ -83,6 +83,8 @@ struct ap_matrix_mdev {
51634         struct ap_matrix matrix;
51635         struct notifier_block group_notifier;
51636         struct notifier_block iommu_notifier;
51637 +       bool kvm_busy;
51638 +       wait_queue_head_t wait_for_kvm;
51639         struct kvm *kvm;
51640         struct kvm_s390_module_hook pqap_hook;
51641         struct mdev_device *mdev;
51642 diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
51643 index 33b23884b133..09fe6bb8880b 100644
51644 --- a/drivers/s390/crypto/zcrypt_card.c
51645 +++ b/drivers/s390/crypto/zcrypt_card.c
51646 @@ -192,5 +192,6 @@ void zcrypt_card_unregister(struct zcrypt_card *zc)
51647         spin_unlock(&zcrypt_list_lock);
51648         sysfs_remove_group(&zc->card->ap_dev.device.kobj,
51649                            &zcrypt_card_attr_group);
51650 +       zcrypt_card_put(zc);
51652  EXPORT_SYMBOL(zcrypt_card_unregister);
51653 diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
51654 index 5062eae73d4a..c3ffbd26b73f 100644
51655 --- a/drivers/s390/crypto/zcrypt_queue.c
51656 +++ b/drivers/s390/crypto/zcrypt_queue.c
51657 @@ -223,5 +223,6 @@ void zcrypt_queue_unregister(struct zcrypt_queue *zq)
51658         sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
51659                            &zcrypt_queue_attr_group);
51660         zcrypt_card_put(zc);
51661 +       zcrypt_queue_put(zq);
51663  EXPORT_SYMBOL(zcrypt_queue_unregister);
51664 diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
51665 index 91acff493612..fd9b869d278e 100644
51666 --- a/drivers/s390/net/qeth_core.h
51667 +++ b/drivers/s390/net/qeth_core.h
51668 @@ -437,6 +437,7 @@ struct qeth_qdio_out_buffer {
51670         struct qeth_qdio_out_q *q;
51671         struct list_head list_entry;
51672 +       struct qaob *aob;
51673  };
51675  struct qeth_card;
51676 @@ -499,7 +500,6 @@ struct qeth_out_q_stats {
51677  struct qeth_qdio_out_q {
51678         struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
51679         struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
51680 -       struct qdio_outbuf_state *bufstates; /* convenience pointer */
51681         struct list_head pending_bufs;
51682         struct qeth_out_q_stats stats;
51683         spinlock_t lock;
51684 @@ -563,7 +563,6 @@ struct qeth_qdio_info {
51685         /* output */
51686         unsigned int no_out_queues;
51687         struct qeth_qdio_out_q *out_qs[QETH_MAX_OUT_QUEUES];
51688 -       struct qdio_outbuf_state *out_bufstates;
51690         /* priority queueing */
51691         int do_prio_queueing;
51692 diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
51693 index a814698387bc..175b82b98f36 100644
51694 --- a/drivers/s390/net/qeth_core_main.c
51695 +++ b/drivers/s390/net/qeth_core_main.c
51696 @@ -369,8 +369,7 @@ static int qeth_cq_init(struct qeth_card *card)
51697                                    QDIO_MAX_BUFFERS_PER_Q);
51698                 card->qdio.c_q->next_buf_to_init = 127;
51699                 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
51700 -                            card->qdio.no_in_queues - 1, 0,
51701 -                            127);
51702 +                            card->qdio.no_in_queues - 1, 0, 127, NULL);
51703                 if (rc) {
51704                         QETH_CARD_TEXT_(card, 2, "1err%d", rc);
51705                         goto out;
51706 @@ -383,48 +382,22 @@ static int qeth_cq_init(struct qeth_card *card)
51708  static int qeth_alloc_cq(struct qeth_card *card)
51710 -       int rc;
51712         if (card->options.cq == QETH_CQ_ENABLED) {
51713 -               int i;
51714 -               struct qdio_outbuf_state *outbuf_states;
51716                 QETH_CARD_TEXT(card, 2, "cqon");
51717                 card->qdio.c_q = qeth_alloc_qdio_queue();
51718                 if (!card->qdio.c_q) {
51719 -                       rc = -1;
51720 -                       goto kmsg_out;
51721 +                       dev_err(&card->gdev->dev, "Failed to create completion queue\n");
51722 +                       return -ENOMEM;
51723                 }
51725                 card->qdio.no_in_queues = 2;
51726 -               card->qdio.out_bufstates =
51727 -                       kcalloc(card->qdio.no_out_queues *
51728 -                                       QDIO_MAX_BUFFERS_PER_Q,
51729 -                               sizeof(struct qdio_outbuf_state),
51730 -                               GFP_KERNEL);
51731 -               outbuf_states = card->qdio.out_bufstates;
51732 -               if (outbuf_states == NULL) {
51733 -                       rc = -1;
51734 -                       goto free_cq_out;
51735 -               }
51736 -               for (i = 0; i < card->qdio.no_out_queues; ++i) {
51737 -                       card->qdio.out_qs[i]->bufstates = outbuf_states;
51738 -                       outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
51739 -               }
51740         } else {
51741                 QETH_CARD_TEXT(card, 2, "nocq");
51742                 card->qdio.c_q = NULL;
51743                 card->qdio.no_in_queues = 1;
51744         }
51745         QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
51746 -       rc = 0;
51747 -out:
51748 -       return rc;
51749 -free_cq_out:
51750 -       qeth_free_qdio_queue(card->qdio.c_q);
51751 -       card->qdio.c_q = NULL;
51752 -kmsg_out:
51753 -       dev_err(&card->gdev->dev, "Failed to create completion queue\n");
51754 -       goto out;
51755 +       return 0;
51758  static void qeth_free_cq(struct qeth_card *card)
51759 @@ -434,8 +407,6 @@ static void qeth_free_cq(struct qeth_card *card)
51760                 qeth_free_qdio_queue(card->qdio.c_q);
51761                 card->qdio.c_q = NULL;
51762         }
51763 -       kfree(card->qdio.out_bufstates);
51764 -       card->qdio.out_bufstates = NULL;
51767  static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
51768 @@ -487,12 +458,12 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
51769         switch (atomic_xchg(&buffer->state, new_state)) {
51770         case QETH_QDIO_BUF_PRIMED:
51771                 /* Faster than TX completion code, let it handle the async
51772 -                * completion for us.
51773 +                * completion for us. It will also recycle the QAOB.
51774                  */
51775                 break;
51776         case QETH_QDIO_BUF_PENDING:
51777                 /* TX completion code is active and will handle the async
51778 -                * completion for us.
51779 +                * completion for us. It will also recycle the QAOB.
51780                  */
51781                 break;
51782         case QETH_QDIO_BUF_NEED_QAOB:
51783 @@ -501,7 +472,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
51784                 qeth_notify_skbs(buffer->q, buffer, notification);
51786                 /* Free dangling allocations. The attached skbs are handled by
51787 -                * qeth_tx_complete_pending_bufs().
51788 +                * qeth_tx_complete_pending_bufs(), and so is the QAOB.
51789                  */
51790                 for (i = 0;
51791                      i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
51792 @@ -520,8 +491,6 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
51793         default:
51794                 WARN_ON_ONCE(1);
51795         }
51797 -       qdio_release_aob(aob);
51800  static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
51801 @@ -1451,6 +1420,13 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
51802         atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
51805 +static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf)
51807 +       if (buf->aob)
51808 +               qdio_release_aob(buf->aob);
51809 +       kmem_cache_free(qeth_qdio_outbuf_cache, buf);
51812  static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
51813                                           struct qeth_qdio_out_q *queue,
51814                                           bool drain)
51815 @@ -1468,7 +1444,7 @@ static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
51816                         qeth_tx_complete_buf(buf, drain, 0);
51818                         list_del(&buf->list_entry);
51819 -                       kmem_cache_free(qeth_qdio_outbuf_cache, buf);
51820 +                       qeth_free_out_buf(buf);
51821                 }
51822         }
51824 @@ -1485,7 +1461,7 @@ static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
51826                 qeth_clear_output_buffer(q, q->bufs[j], true, 0);
51827                 if (free) {
51828 -                       kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
51829 +                       qeth_free_out_buf(q->bufs[j]);
51830                         q->bufs[j] = NULL;
51831                 }
51832         }
51833 @@ -2637,7 +2613,7 @@ static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
51835  err_out_bufs:
51836         while (i > 0)
51837 -               kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[--i]);
51838 +               qeth_free_out_buf(q->bufs[--i]);
51839         qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
51840  err_qdio_bufs:
51841         kfree(q);
51842 @@ -3024,7 +3000,8 @@ static int qeth_init_qdio_queues(struct qeth_card *card)
51843         }
51845         card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
51846 -       rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs);
51847 +       rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs,
51848 +                    NULL);
51849         if (rc) {
51850                 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
51851                 return rc;
51852 @@ -3516,7 +3493,7 @@ static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
51853                 }
51855                 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
51856 -                            queue->next_buf_to_init, count);
51857 +                            queue->next_buf_to_init, count, NULL);
51858                 if (rc) {
51859                         QETH_CARD_TEXT(card, 2, "qinberr");
51860                 }
51861 @@ -3625,6 +3602,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
51862         struct qeth_qdio_out_buffer *buf = queue->bufs[index];
51863         unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
51864         struct qeth_card *card = queue->card;
51865 +       struct qaob *aob = NULL;
51866         int rc;
51867         int i;
51869 @@ -3637,16 +3615,24 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
51870                                 SBAL_EFLAGS_LAST_ENTRY;
51871                 queue->coalesced_frames += buf->frames;
51873 -               if (queue->bufstates)
51874 -                       queue->bufstates[bidx].user = buf;
51876                 if (IS_IQD(card)) {
51877                         skb_queue_walk(&buf->skb_list, skb)
51878                                 skb_tx_timestamp(skb);
51879                 }
51880         }
51882 -       if (!IS_IQD(card)) {
51883 +       if (IS_IQD(card)) {
51884 +               if (card->options.cq == QETH_CQ_ENABLED &&
51885 +                   !qeth_iqd_is_mcast_queue(card, queue) &&
51886 +                   count == 1) {
51887 +                       if (!buf->aob)
51888 +                               buf->aob = qdio_allocate_aob();
51889 +                       if (buf->aob) {
51890 +                               aob = buf->aob;
51891 +                               aob->user1 = (u64) buf;
51892 +                       }
51893 +               }
51894 +       } else {
51895                 if (!queue->do_pack) {
51896                         if ((atomic_read(&queue->used_buffers) >=
51897                                 (QETH_HIGH_WATERMARK_PACK -
51898 @@ -3677,8 +3663,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
51899         }
51901         QETH_TXQ_STAT_INC(queue, doorbell);
51902 -       rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
51903 -                    queue->queue_no, index, count);
51904 +       rc = do_QDIO(CARD_DDEV(card), qdio_flags, queue->queue_no, index, count,
51905 +                    aob);
51907         switch (rc) {
51908         case 0:
51909 @@ -3814,8 +3800,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
51910                 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
51911         }
51912         rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
51913 -                   card->qdio.c_q->next_buf_to_init,
51914 -                   count);
51915 +                    cq->next_buf_to_init, count, NULL);
51916         if (rc) {
51917                 dev_warn(&card->gdev->dev,
51918                         "QDIO reported an error, rc=%i\n", rc);
51919 @@ -5270,7 +5255,6 @@ static int qeth_qdio_establish(struct qeth_card *card)
51920         init_data.int_parm               = (unsigned long) card;
51921         init_data.input_sbal_addr_array  = in_sbal_ptrs;
51922         init_data.output_sbal_addr_array = out_sbal_ptrs;
51923 -       init_data.output_sbal_state_array = card->qdio.out_bufstates;
51924         init_data.scan_threshold         = IS_IQD(card) ? 0 : 32;
51926         if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
51927 @@ -6069,7 +6053,15 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
51928         bool error = !!qdio_error;
51930         if (qdio_error == QDIO_ERROR_SLSB_PENDING) {
51931 -               WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
51932 +               struct qaob *aob = buffer->aob;
51934 +               if (!aob) {
51935 +                       netdev_WARN_ONCE(card->dev,
51936 +                                        "Pending TX buffer %#x without QAOB on TX queue %u\n",
51937 +                                        bidx, queue->queue_no);
51938 +                       qeth_schedule_recovery(card);
51939 +                       return;
51940 +               }
51942                 QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
51944 @@ -6125,6 +6117,8 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
51945                 default:
51946                         WARN_ON_ONCE(1);
51947                 }
51949 +               memset(aob, 0, sizeof(*aob));
51950         } else if (card->options.cq == QETH_CQ_ENABLED) {
51951                 qeth_notify_skbs(queue, buffer,
51952                                  qeth_compute_cq_notification(sflags, 0));
51953 diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
51954 index 23ab16d65f2a..049596cbfb5d 100644
51955 --- a/drivers/s390/scsi/zfcp_qdio.c
51956 +++ b/drivers/s390/scsi/zfcp_qdio.c
51957 @@ -128,7 +128,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
51958         /*
51959          * put SBALs back to response queue
51960          */
51961 -       if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
51962 +       if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count, NULL))
51963                 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
51966 @@ -298,7 +298,7 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
51967         atomic_sub(sbal_number, &qdio->req_q_free);
51969         retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
51970 -                        q_req->sbal_first, sbal_number);
51971 +                        q_req->sbal_first, sbal_number, NULL);
51973         if (unlikely(retval)) {
51974                 /* Failed to submit the IO, roll back our modifications. */
51975 @@ -463,7 +463,8 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
51976                 sbale->addr = 0;
51977         }
51979 -       if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
51980 +       if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q,
51981 +                   NULL))
51982                 goto failed_qdio;
51984         /* set index of first available SBALS / number of available SBALS */
51985 diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
51986 index ea436a14087f..5eff3368143d 100644
51987 --- a/drivers/scsi/device_handler/scsi_dh_alua.c
51988 +++ b/drivers/scsi/device_handler/scsi_dh_alua.c
51989 @@ -573,10 +573,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
51990                  * even though it shouldn't according to T10.
51991                  * The retry without rtpg_ext_hdr_req set
51992                  * handles this.
51993 +                * Note:  some arrays return a sense key of ILLEGAL_REQUEST
51994 +                * with ASC 00h if they don't support the extended header.
51995                  */
51996                 if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
51997 -                   sense_hdr.sense_key == ILLEGAL_REQUEST &&
51998 -                   sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) {
51999 +                   sense_hdr.sense_key == ILLEGAL_REQUEST) {
52000                         pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP;
52001                         goto retry;
52002                 }
52003 diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
52004 index 36744968378f..09e49e21deb6 100644
52005 --- a/drivers/scsi/fnic/fnic_scsi.c
52006 +++ b/drivers/scsi/fnic/fnic_scsi.c
52007 @@ -217,7 +217,7 @@ int fnic_fw_reset_handler(struct fnic *fnic)
52009         /* wait for io cmpl */
52010         while (atomic_read(&fnic->in_flight))
52011 -               schedule_timeout(msecs_to_jiffies(1));
52012 +               schedule_msec_hrtimeout((1));
52014         spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
52016 @@ -2277,7 +2277,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
52017                 }
52018         }
52020 -       schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
52021 +       schedule_msec_hrtimeout((2 * fnic->config.ed_tov));
52023         /* walk again to check, if IOs are still pending in fw */
52024         if (fnic_is_abts_pending(fnic, lr_sc))
52025 diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
52026 index 7451377c4cb6..3e359ac752fd 100644
52027 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
52028 +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
52029 @@ -1646,7 +1646,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
52030                 idx = i * HISI_SAS_PHY_INT_NR;
52031                 for (j = 0; j < HISI_SAS_PHY_INT_NR; j++, idx++) {
52032                         irq = platform_get_irq(pdev, idx);
52033 -                       if (!irq) {
52034 +                       if (irq < 0) {
52035                                 dev_err(dev, "irq init: fail map phy interrupt %d\n",
52036                                         idx);
52037                                 return -ENOENT;
52038 @@ -1665,7 +1665,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
52039         idx = hisi_hba->n_phy * HISI_SAS_PHY_INT_NR;
52040         for (i = 0; i < hisi_hba->queue_count; i++, idx++) {
52041                 irq = platform_get_irq(pdev, idx);
52042 -               if (!irq) {
52043 +               if (irq < 0) {
52044                         dev_err(dev, "irq init: could not map cq interrupt %d\n",
52045                                 idx);
52046                         return -ENOENT;
52047 @@ -1683,7 +1683,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
52048         idx = (hisi_hba->n_phy * HISI_SAS_PHY_INT_NR) + hisi_hba->queue_count;
52049         for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++, idx++) {
52050                 irq = platform_get_irq(pdev, idx);
52051 -               if (!irq) {
52052 +               if (irq < 0) {
52053                         dev_err(dev, "irq init: could not map fatal interrupt %d\n",
52054                                 idx);
52055                         return -ENOENT;
52056 diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
52057 index 61831f2fdb30..d6675a25719d 100644
52058 --- a/drivers/scsi/ibmvscsi/ibmvfc.c
52059 +++ b/drivers/scsi/ibmvscsi/ibmvfc.c
52060 @@ -603,8 +603,17 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
52061                 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
52062                         vhost->action = action;
52063                 break;
52064 +       case IBMVFC_HOST_ACTION_REENABLE:
52065 +       case IBMVFC_HOST_ACTION_RESET:
52066 +               vhost->action = action;
52067 +               break;
52068         case IBMVFC_HOST_ACTION_INIT:
52069         case IBMVFC_HOST_ACTION_TGT_DEL:
52070 +       case IBMVFC_HOST_ACTION_LOGO:
52071 +       case IBMVFC_HOST_ACTION_QUERY_TGTS:
52072 +       case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
52073 +       case IBMVFC_HOST_ACTION_NONE:
52074 +       default:
52075                 switch (vhost->action) {
52076                 case IBMVFC_HOST_ACTION_RESET:
52077                 case IBMVFC_HOST_ACTION_REENABLE:
52078 @@ -614,15 +623,6 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
52079                         break;
52080                 }
52081                 break;
52082 -       case IBMVFC_HOST_ACTION_LOGO:
52083 -       case IBMVFC_HOST_ACTION_QUERY_TGTS:
52084 -       case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
52085 -       case IBMVFC_HOST_ACTION_NONE:
52086 -       case IBMVFC_HOST_ACTION_RESET:
52087 -       case IBMVFC_HOST_ACTION_REENABLE:
52088 -       default:
52089 -               vhost->action = action;
52090 -               break;
52091         }
52094 @@ -5373,30 +5373,49 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
52095         case IBMVFC_HOST_ACTION_INIT_WAIT:
52096                 break;
52097         case IBMVFC_HOST_ACTION_RESET:
52098 -               vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
52099                 list_splice_init(&vhost->purge, &purge);
52100                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
52101                 ibmvfc_complete_purge(&purge);
52102                 rc = ibmvfc_reset_crq(vhost);
52104                 spin_lock_irqsave(vhost->host->host_lock, flags);
52105 -               if (rc == H_CLOSED)
52106 +               if (!rc || rc == H_CLOSED)
52107                         vio_enable_interrupts(to_vio_dev(vhost->dev));
52108 -               if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
52109 -                   (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
52110 -                       ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
52111 -                       dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
52112 +               if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
52113 +                       /*
52114 +                        * The only action we could have changed to would have
52115 +                        * been reenable, in which case, we skip the rest of
52116 +                        * this path and wait until we've done the re-enable
52117 +                        * before sending the crq init.
52118 +                        */
52119 +                       vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
52121 +                       if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
52122 +                           (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
52123 +                               ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
52124 +                               dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
52125 +                       }
52126                 }
52127                 break;
52128         case IBMVFC_HOST_ACTION_REENABLE:
52129 -               vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
52130                 list_splice_init(&vhost->purge, &purge);
52131                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
52132                 ibmvfc_complete_purge(&purge);
52133                 rc = ibmvfc_reenable_crq_queue(vhost);
52135                 spin_lock_irqsave(vhost->host->host_lock, flags);
52136 -               if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
52137 -                       ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
52138 -                       dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
52139 +               if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
52140 +                       /*
52141 +                        * The only action we could have changed to would have
52142 +                        * been reset, in which case, we skip the rest of this
52143 +                        * path and wait until we've done the reset before
52144 +                        * sending the crq init.
52145 +                        */
52146 +                       vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
52147 +                       if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
52148 +                               ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
52149 +                               dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
52150 +                       }
52151                 }
52152                 break;
52153         case IBMVFC_HOST_ACTION_LOGO:
52154 diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
52155 index f0ed6863cc70..60a88a95a8e2 100644
52156 --- a/drivers/scsi/jazz_esp.c
52157 +++ b/drivers/scsi/jazz_esp.c
52158 @@ -143,7 +143,9 @@ static int esp_jazz_probe(struct platform_device *dev)
52159         if (!esp->command_block)
52160                 goto fail_unmap_regs;
52162 -       host->irq = platform_get_irq(dev, 0);
52163 +       host->irq = err = platform_get_irq(dev, 0);
52164 +       if (err < 0)
52165 +               goto fail_unmap_command_block;
52166         err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
52167         if (err < 0)
52168                 goto fail_unmap_command_block;
52169 diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
52170 index 22826544da7e..9989669beec3 100644
52171 --- a/drivers/scsi/libfc/fc_lport.c
52172 +++ b/drivers/scsi/libfc/fc_lport.c
52173 @@ -1731,7 +1731,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
52175         if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) {
52176                 FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
52177 -                            "lport->mfs:%hu\n", mfs, lport->mfs);
52178 +                            "lport->mfs:%u\n", mfs, lport->mfs);
52179                 fc_lport_error(lport, fp);
52180                 goto out;
52181         }
52182 diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
52183 index bdd9a29f4201..0496a60735ef 100644
52184 --- a/drivers/scsi/lpfc/lpfc_attr.c
52185 +++ b/drivers/scsi/lpfc/lpfc_attr.c
52186 @@ -1687,8 +1687,7 @@ lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
52187                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
52188                                 "0071 Set trunk mode failed with status: %d",
52189                                 rc);
52190 -       if (rc != MBX_TIMEOUT)
52191 -               mempool_free(mbox, phba->mbox_mem_pool);
52192 +       mempool_free(mbox, phba->mbox_mem_pool);
52194         return 0;
52196 @@ -6793,15 +6792,19 @@ lpfc_get_stats(struct Scsi_Host *shost)
52197         pmboxq->ctx_buf = NULL;
52198         pmboxq->vport = vport;
52200 -       if (vport->fc_flag & FC_OFFLINE_MODE)
52201 +       if (vport->fc_flag & FC_OFFLINE_MODE) {
52202                 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
52203 -       else
52204 -               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
52206 -       if (rc != MBX_SUCCESS) {
52207 -               if (rc != MBX_TIMEOUT)
52208 +               if (rc != MBX_SUCCESS) {
52209                         mempool_free(pmboxq, phba->mbox_mem_pool);
52210 -               return NULL;
52211 +                       return NULL;
52212 +               }
52213 +       } else {
52214 +               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
52215 +               if (rc != MBX_SUCCESS) {
52216 +                       if (rc != MBX_TIMEOUT)
52217 +                               mempool_free(pmboxq, phba->mbox_mem_pool);
52218 +                       return NULL;
52219 +               }
52220         }
52222         memset(hs, 0, sizeof (struct fc_host_statistics));
52223 @@ -6825,15 +6828,19 @@ lpfc_get_stats(struct Scsi_Host *shost)
52224         pmboxq->ctx_buf = NULL;
52225         pmboxq->vport = vport;
52227 -       if (vport->fc_flag & FC_OFFLINE_MODE)
52228 +       if (vport->fc_flag & FC_OFFLINE_MODE) {
52229                 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
52230 -       else
52231 -               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
52233 -       if (rc != MBX_SUCCESS) {
52234 -               if (rc != MBX_TIMEOUT)
52235 +               if (rc != MBX_SUCCESS) {
52236                         mempool_free(pmboxq, phba->mbox_mem_pool);
52237 -               return NULL;
52238 +                       return NULL;
52239 +               }
52240 +       } else {
52241 +               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
52242 +               if (rc != MBX_SUCCESS) {
52243 +                       if (rc != MBX_TIMEOUT)
52244 +                               mempool_free(pmboxq, phba->mbox_mem_pool);
52245 +                       return NULL;
52246 +               }
52247         }
52249         hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
52250 @@ -6906,15 +6913,19 @@ lpfc_reset_stats(struct Scsi_Host *shost)
52251         pmboxq->vport = vport;
52253         if ((vport->fc_flag & FC_OFFLINE_MODE) ||
52254 -               (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
52255 +               (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
52256                 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
52257 -       else
52258 -               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
52260 -       if (rc != MBX_SUCCESS) {
52261 -               if (rc != MBX_TIMEOUT)
52262 +               if (rc != MBX_SUCCESS) {
52263                         mempool_free(pmboxq, phba->mbox_mem_pool);
52264 -               return;
52265 +                       return;
52266 +               }
52267 +       } else {
52268 +               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
52269 +               if (rc != MBX_SUCCESS) {
52270 +                       if (rc != MBX_TIMEOUT)
52271 +                               mempool_free(pmboxq, phba->mbox_mem_pool);
52272 +                       return;
52273 +               }
52274         }
52276         memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
52277 @@ -6924,15 +6935,19 @@ lpfc_reset_stats(struct Scsi_Host *shost)
52278         pmboxq->vport = vport;
52280         if ((vport->fc_flag & FC_OFFLINE_MODE) ||
52281 -           (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
52282 +           (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
52283                 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
52284 -       else
52285 +               if (rc != MBX_SUCCESS) {
52286 +                       mempool_free(pmboxq, phba->mbox_mem_pool);
52287 +                       return;
52288 +               }
52289 +       } else {
52290                 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
52292 -       if (rc != MBX_SUCCESS) {
52293 -               if (rc != MBX_TIMEOUT)
52294 -                       mempool_free( pmboxq, phba->mbox_mem_pool);
52295 -               return;
52296 +               if (rc != MBX_SUCCESS) {
52297 +                       if (rc != MBX_TIMEOUT)
52298 +                               mempool_free(pmboxq, phba->mbox_mem_pool);
52299 +                       return;
52300 +               }
52301         }
52303         lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
52304 diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
52305 index a0aad4896a45..763b1eeb0ca8 100644
52306 --- a/drivers/scsi/lpfc/lpfc_crtn.h
52307 +++ b/drivers/scsi/lpfc/lpfc_crtn.h
52308 @@ -55,9 +55,6 @@ void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
52309  void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
52310  void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
52311  void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
52312 -void lpfc_supported_pages(struct lpfcMboxq *);
52313 -void lpfc_pc_sli4_params(struct lpfcMboxq *);
52314 -int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
52315  int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
52316                            uint16_t, uint16_t, bool);
52317  int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
52318 @@ -351,8 +348,8 @@ int lpfc_sli_hbq_size(void);
52319  int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
52320                                struct lpfc_iocbq *, void *);
52321  int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd);
52322 -int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t,
52323 -                       uint64_t, lpfc_ctx_cmd);
52324 +int lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
52325 +                       lpfc_ctx_cmd abort_cmd);
52326  int
52327  lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *,
52328                         uint16_t, uint64_t, lpfc_ctx_cmd);
52329 diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
52330 index f0a758138ae8..3dd22da3153f 100644
52331 --- a/drivers/scsi/lpfc/lpfc_els.c
52332 +++ b/drivers/scsi/lpfc/lpfc_els.c
52333 @@ -1,7 +1,7 @@
52334  /*******************************************************************
52335   * This file is part of the Emulex Linux Device Driver for         *
52336   * Fibre Channel Host Bus Adapters.                                *
52337 - * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
52338 + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
52339   * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
52340   * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
52341   * EMULEX and SLI are trademarks of Emulex.                        *
52342 @@ -1600,7 +1600,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
52343         struct lpfc_nodelist *new_ndlp;
52344         struct serv_parm *sp;
52345         uint8_t  name[sizeof(struct lpfc_name)];
52346 -       uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
52347 +       uint32_t keepDID = 0, keep_nlp_flag = 0;
52348         uint32_t keep_new_nlp_flag = 0;
52349         uint16_t keep_nlp_state;
52350         u32 keep_nlp_fc4_type = 0;
52351 @@ -1622,7 +1622,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
52352         new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
52354         /* return immediately if the WWPN matches ndlp */
52355 -       if (new_ndlp == ndlp)
52356 +       if (!new_ndlp || (new_ndlp == ndlp))
52357                 return ndlp;
52359         if (phba->sli_rev == LPFC_SLI_REV4) {
52360 @@ -1641,30 +1641,11 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
52361                          (new_ndlp ? new_ndlp->nlp_flag : 0),
52362                          (new_ndlp ? new_ndlp->nlp_fc4_type : 0));
52364 -       if (!new_ndlp) {
52365 -               rc = memcmp(&ndlp->nlp_portname, name,
52366 -                           sizeof(struct lpfc_name));
52367 -               if (!rc) {
52368 -                       if (active_rrqs_xri_bitmap)
52369 -                               mempool_free(active_rrqs_xri_bitmap,
52370 -                                            phba->active_rrq_pool);
52371 -                       return ndlp;
52372 -               }
52373 -               new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID);
52374 -               if (!new_ndlp) {
52375 -                       if (active_rrqs_xri_bitmap)
52376 -                               mempool_free(active_rrqs_xri_bitmap,
52377 -                                            phba->active_rrq_pool);
52378 -                       return ndlp;
52379 -               }
52380 -       } else {
52381 -               keepDID = new_ndlp->nlp_DID;
52382 -               if (phba->sli_rev == LPFC_SLI_REV4 &&
52383 -                   active_rrqs_xri_bitmap)
52384 -                       memcpy(active_rrqs_xri_bitmap,
52385 -                              new_ndlp->active_rrqs_xri_bitmap,
52386 -                              phba->cfg_rrq_xri_bitmap_sz);
52387 -       }
52388 +       keepDID = new_ndlp->nlp_DID;
52390 +       if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap)
52391 +               memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap,
52392 +                      phba->cfg_rrq_xri_bitmap_sz);
52394         /* At this point in this routine, we know new_ndlp will be
52395          * returned. however, any previous GID_FTs that were done
52396 @@ -2063,13 +2044,12 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
52397   * This routine issues a Port Login (PLOGI) command to a remote N_Port
52398   * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
52399   * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
52400 - * This routine constructs the proper feilds of the PLOGI IOCB and invokes
52401 + * This routine constructs the proper fields of the PLOGI IOCB and invokes
52402   * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
52403   *
52404 - * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
52405 - * will be incremented by 1 for holding the ndlp and the reference to ndlp
52406 - * will be stored into the context1 field of the IOCB for the completion
52407 - * callback function to the PLOGI ELS command.
52408 + * Note that the ndlp reference count will be incremented by 1 for holding
52409 + * the ndlp and the reference to ndlp will be stored into the context1 field
52410 + * of the IOCB for the completion callback function to the PLOGI ELS command.
52411   *
52412   * Return code
52413   *   0 - Successfully issued a plogi for @vport
52414 @@ -2087,29 +2067,28 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
52415         int ret;
52417         ndlp = lpfc_findnode_did(vport, did);
52418 +       if (!ndlp)
52419 +               return 1;
52421 -       if (ndlp) {
52422 -               /* Defer the processing of the issue PLOGI until after the
52423 -                * outstanding UNREG_RPI mbox command completes, unless we
52424 -                * are going offline. This logic does not apply for Fabric DIDs
52425 -                */
52426 -               if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
52427 -                   ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
52428 -                   !(vport->fc_flag & FC_OFFLINE_MODE)) {
52429 -                       lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
52430 -                                        "4110 Issue PLOGI x%x deferred "
52431 -                                        "on NPort x%x rpi x%x Data: x%px\n",
52432 -                                        ndlp->nlp_defer_did, ndlp->nlp_DID,
52433 -                                        ndlp->nlp_rpi, ndlp);
52435 -                       /* We can only defer 1st PLOGI */
52436 -                       if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
52437 -                               ndlp->nlp_defer_did = did;
52438 -                       return 0;
52439 -               }
52440 +       /* Defer the processing of the issue PLOGI until after the
52441 +        * outstanding UNREG_RPI mbox command completes, unless we
52442 +        * are going offline. This logic does not apply for Fabric DIDs
52443 +        */
52444 +       if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
52445 +           ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
52446 +           !(vport->fc_flag & FC_OFFLINE_MODE)) {
52447 +               lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
52448 +                                "4110 Issue PLOGI x%x deferred "
52449 +                                "on NPort x%x rpi x%x Data: x%px\n",
52450 +                                ndlp->nlp_defer_did, ndlp->nlp_DID,
52451 +                                ndlp->nlp_rpi, ndlp);
52453 +               /* We can only defer 1st PLOGI */
52454 +               if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
52455 +                       ndlp->nlp_defer_did = did;
52456 +               return 0;
52457         }
52459 -       /* If ndlp is not NULL, we will bump the reference count on it */
52460         cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
52461         elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
52462                                      ELS_CMD_PLOGI);
52463 @@ -3829,7 +3808,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
52464                 did = irsp->un.elsreq64.remoteID;
52465                 ndlp = lpfc_findnode_did(vport, did);
52466                 if (!ndlp && (cmd != ELS_CMD_PLOGI))
52467 -                       return 1;
52468 +                       return 0;
52469         }
52471         lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
52472 @@ -4473,10 +4452,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
52473   * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
52474   * field in the command IOCB is not NULL, the referred mailbox command will
52475   * be send out, and then invokes the lpfc_els_free_iocb() routine to release
52476 - * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
52477 - * link down event occurred during the discovery, the lpfc_nlp_not_used()
52478 - * routine shall be invoked trying to release the ndlp if no other threads
52479 - * are currently referring it.
52480 + * the IOCB.
52481   **/
52482  static void
52483  lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
52484 @@ -4486,10 +4462,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
52485         struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
52486         struct Scsi_Host  *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
52487         IOCB_t  *irsp;
52488 -       uint8_t *pcmd;
52489         LPFC_MBOXQ_t *mbox = NULL;
52490         struct lpfc_dmabuf *mp = NULL;
52491 -       uint32_t ls_rjt = 0;
52493         irsp = &rspiocb->iocb;
52495 @@ -4501,18 +4475,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
52496         if (cmdiocb->context_un.mbox)
52497                 mbox = cmdiocb->context_un.mbox;
52499 -       /* First determine if this is a LS_RJT cmpl. Note, this callback
52500 -        * function can have cmdiocb->contest1 (ndlp) field set to NULL.
52501 -        */
52502 -       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
52503 -       if (ndlp && (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
52504 -               /* A LS_RJT associated with Default RPI cleanup has its own
52505 -                * separate code path.
52506 -                */
52507 -               if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
52508 -                       ls_rjt = 1;
52509 -       }
52511         /* Check to see if link went down during discovery */
52512         if (!ndlp || lpfc_els_chk_latt(vport)) {
52513                 if (mbox) {
52514 @@ -4523,15 +4485,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
52515                         }
52516                         mempool_free(mbox, phba->mbox_mem_pool);
52517                 }
52518 -               if (ndlp && (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
52519 -                       if (lpfc_nlp_not_used(ndlp)) {
52520 -                               ndlp = NULL;
52521 -                               /* Indicate the node has already released,
52522 -                                * should not reference to it from within
52523 -                                * the routine lpfc_els_free_iocb.
52524 -                                */
52525 -                               cmdiocb->context1 = NULL;
52526 -                       }
52527                 goto out;
52528         }
52530 @@ -4609,29 +4562,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
52531                                 "Data: x%x x%x x%x\n",
52532                                 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
52533                                 ndlp->nlp_rpi);
52535 -                       if (lpfc_nlp_not_used(ndlp)) {
52536 -                               ndlp = NULL;
52537 -                               /* Indicate node has already been released,
52538 -                                * should not reference to it from within
52539 -                                * the routine lpfc_els_free_iocb.
52540 -                                */
52541 -                               cmdiocb->context1 = NULL;
52542 -                       }
52543 -               } else {
52544 -                       /* Do not drop node for lpfc_els_abort'ed ELS cmds */
52545 -                       if (!lpfc_error_lost_link(irsp) &&
52546 -                           ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
52547 -                               if (lpfc_nlp_not_used(ndlp)) {
52548 -                                       ndlp = NULL;
52549 -                                       /* Indicate node has already been
52550 -                                        * released, should not reference
52551 -                                        * to it from within the routine
52552 -                                        * lpfc_els_free_iocb.
52553 -                                        */
52554 -                                       cmdiocb->context1 = NULL;
52555 -                               }
52556 -                       }
52557                 }
52558                 mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
52559                 if (mp) {
52560 @@ -4647,19 +4577,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
52561                         ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
52562                 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI;
52563                 spin_unlock_irq(&ndlp->lock);
52565 -               /* If the node is not being used by another discovery thread,
52566 -                * and we are sending a reject, we are done with it.
52567 -                * Release driver reference count here and free associated
52568 -                * resources.
52569 -                */
52570 -               if (ls_rjt)
52571 -                       if (lpfc_nlp_not_used(ndlp))
52572 -                               /* Indicate node has already been released,
52573 -                                * should not reference to it from within
52574 -                                * the routine lpfc_els_free_iocb.
52575 -                                */
52576 -                               cmdiocb->context1 = NULL;
52577         }
52579         /* Release the originating I/O reference. */
52580 diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
52581 index 48ca4a612f80..c5176f406386 100644
52582 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c
52583 +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
52584 @@ -140,11 +140,8 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
52585                               "rport terminate: sid:x%x did:x%x flg:x%x",
52586                               ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
52588 -       if (ndlp->nlp_sid != NLP_NO_SID) {
52589 -               lpfc_sli_abort_iocb(vport,
52590 -                                   &vport->phba->sli.sli3_ring[LPFC_FCP_RING],
52591 -                                   ndlp->nlp_sid, 0, LPFC_CTX_TGT);
52592 -       }
52593 +       if (ndlp->nlp_sid != NLP_NO_SID)
52594 +               lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
52597  /*
52598 @@ -299,8 +296,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
52600         if (ndlp->nlp_sid != NLP_NO_SID) {
52601                 warn_on = 1;
52602 -               lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
52603 -                                   ndlp->nlp_sid, 0, LPFC_CTX_TGT);
52604 +               lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
52605         }
52607         if (warn_on) {
52608 diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
52609 index 541b9aef6bfe..f5bc2c32a817 100644
52610 --- a/drivers/scsi/lpfc/lpfc_hw4.h
52611 +++ b/drivers/scsi/lpfc/lpfc_hw4.h
52612 @@ -124,6 +124,7 @@ struct lpfc_sli_intf {
52613  /* Define SLI4 Alignment requirements. */
52614  #define LPFC_ALIGN_16_BYTE     16
52615  #define LPFC_ALIGN_64_BYTE     64
52616 +#define SLI4_PAGE_SIZE         4096
52618  /* Define SLI4 specific definitions. */
52619  #define LPFC_MQ_CQE_BYTE_OFFSET        256
52620 @@ -2976,62 +2977,6 @@ struct lpfc_mbx_request_features {
52621  #define lpfc_mbx_rq_ftr_rsp_mrqp_WORD          word3
52622  };
52624 -struct lpfc_mbx_supp_pages {
52625 -       uint32_t word1;
52626 -#define qs_SHIFT                               0
52627 -#define qs_MASK                                        0x00000001
52628 -#define qs_WORD                                        word1
52629 -#define wr_SHIFT                               1
52630 -#define wr_MASK                                0x00000001
52631 -#define wr_WORD                                        word1
52632 -#define pf_SHIFT                               8
52633 -#define pf_MASK                                        0x000000ff
52634 -#define pf_WORD                                        word1
52635 -#define cpn_SHIFT                              16
52636 -#define cpn_MASK                               0x000000ff
52637 -#define cpn_WORD                               word1
52638 -       uint32_t word2;
52639 -#define list_offset_SHIFT                      0
52640 -#define list_offset_MASK                       0x000000ff
52641 -#define list_offset_WORD                       word2
52642 -#define next_offset_SHIFT                      8
52643 -#define next_offset_MASK                       0x000000ff
52644 -#define next_offset_WORD                       word2
52645 -#define elem_cnt_SHIFT                         16
52646 -#define elem_cnt_MASK                          0x000000ff
52647 -#define elem_cnt_WORD                          word2
52648 -       uint32_t word3;
52649 -#define pn_0_SHIFT                             24
52650 -#define pn_0_MASK                              0x000000ff
52651 -#define pn_0_WORD                              word3
52652 -#define pn_1_SHIFT                             16
52653 -#define pn_1_MASK                              0x000000ff
52654 -#define pn_1_WORD                              word3
52655 -#define pn_2_SHIFT                             8
52656 -#define pn_2_MASK                              0x000000ff
52657 -#define pn_2_WORD                              word3
52658 -#define pn_3_SHIFT                             0
52659 -#define pn_3_MASK                              0x000000ff
52660 -#define pn_3_WORD                              word3
52661 -       uint32_t word4;
52662 -#define pn_4_SHIFT                             24
52663 -#define pn_4_MASK                              0x000000ff
52664 -#define pn_4_WORD                              word4
52665 -#define pn_5_SHIFT                             16
52666 -#define pn_5_MASK                              0x000000ff
52667 -#define pn_5_WORD                              word4
52668 -#define pn_6_SHIFT                             8
52669 -#define pn_6_MASK                              0x000000ff
52670 -#define pn_6_WORD                              word4
52671 -#define pn_7_SHIFT                             0
52672 -#define pn_7_MASK                              0x000000ff
52673 -#define pn_7_WORD                              word4
52674 -       uint32_t rsvd[27];
52675 -#define LPFC_SUPP_PAGES                        0
52676 -#define LPFC_BLOCK_GUARD_PROFILES      1
52677 -#define LPFC_SLI4_PARAMETERS           2
52680  struct lpfc_mbx_memory_dump_type3 {
52681         uint32_t word1;
52682  #define lpfc_mbx_memory_dump_type3_type_SHIFT    0
52683 @@ -3248,121 +3193,6 @@ struct user_eeprom {
52684         uint8_t reserved191[57];
52685  };
52687 -struct lpfc_mbx_pc_sli4_params {
52688 -       uint32_t word1;
52689 -#define qs_SHIFT                               0
52690 -#define qs_MASK                                        0x00000001
52691 -#define qs_WORD                                        word1
52692 -#define wr_SHIFT                               1
52693 -#define wr_MASK                                        0x00000001
52694 -#define wr_WORD                                        word1
52695 -#define pf_SHIFT                               8
52696 -#define pf_MASK                                        0x000000ff
52697 -#define pf_WORD                                        word1
52698 -#define cpn_SHIFT                              16
52699 -#define cpn_MASK                               0x000000ff
52700 -#define cpn_WORD                               word1
52701 -       uint32_t word2;
52702 -#define if_type_SHIFT                          0
52703 -#define if_type_MASK                           0x00000007
52704 -#define if_type_WORD                           word2
52705 -#define sli_rev_SHIFT                          4
52706 -#define sli_rev_MASK                           0x0000000f
52707 -#define sli_rev_WORD                           word2
52708 -#define sli_family_SHIFT                       8
52709 -#define sli_family_MASK                                0x000000ff
52710 -#define sli_family_WORD                                word2
52711 -#define featurelevel_1_SHIFT                   16
52712 -#define featurelevel_1_MASK                    0x000000ff
52713 -#define featurelevel_1_WORD                    word2
52714 -#define featurelevel_2_SHIFT                   24
52715 -#define featurelevel_2_MASK                    0x0000001f
52716 -#define featurelevel_2_WORD                    word2
52717 -       uint32_t word3;
52718 -#define fcoe_SHIFT                             0
52719 -#define fcoe_MASK                              0x00000001
52720 -#define fcoe_WORD                              word3
52721 -#define fc_SHIFT                               1
52722 -#define fc_MASK                                        0x00000001
52723 -#define fc_WORD                                        word3
52724 -#define nic_SHIFT                              2
52725 -#define nic_MASK                               0x00000001
52726 -#define nic_WORD                               word3
52727 -#define iscsi_SHIFT                            3
52728 -#define iscsi_MASK                             0x00000001
52729 -#define iscsi_WORD                             word3
52730 -#define rdma_SHIFT                             4
52731 -#define rdma_MASK                              0x00000001
52732 -#define rdma_WORD                              word3
52733 -       uint32_t sge_supp_len;
52734 -#define SLI4_PAGE_SIZE 4096
52735 -       uint32_t word5;
52736 -#define if_page_sz_SHIFT                       0
52737 -#define if_page_sz_MASK                                0x0000ffff
52738 -#define if_page_sz_WORD                                word5
52739 -#define loopbk_scope_SHIFT                     24
52740 -#define loopbk_scope_MASK                      0x0000000f
52741 -#define loopbk_scope_WORD                      word5
52742 -#define rq_db_window_SHIFT                     28
52743 -#define rq_db_window_MASK                      0x0000000f
52744 -#define rq_db_window_WORD                      word5
52745 -       uint32_t word6;
52746 -#define eq_pages_SHIFT                         0
52747 -#define eq_pages_MASK                          0x0000000f
52748 -#define eq_pages_WORD                          word6
52749 -#define eqe_size_SHIFT                         8
52750 -#define eqe_size_MASK                          0x000000ff
52751 -#define eqe_size_WORD                          word6
52752 -       uint32_t word7;
52753 -#define cq_pages_SHIFT                         0
52754 -#define cq_pages_MASK                          0x0000000f
52755 -#define cq_pages_WORD                          word7
52756 -#define cqe_size_SHIFT                         8
52757 -#define cqe_size_MASK                          0x000000ff
52758 -#define cqe_size_WORD                          word7
52759 -       uint32_t word8;
52760 -#define mq_pages_SHIFT                         0
52761 -#define mq_pages_MASK                          0x0000000f
52762 -#define mq_pages_WORD                          word8
52763 -#define mqe_size_SHIFT                         8
52764 -#define mqe_size_MASK                          0x000000ff
52765 -#define mqe_size_WORD                          word8
52766 -#define mq_elem_cnt_SHIFT                      16
52767 -#define mq_elem_cnt_MASK                       0x000000ff
52768 -#define mq_elem_cnt_WORD                       word8
52769 -       uint32_t word9;
52770 -#define wq_pages_SHIFT                         0
52771 -#define wq_pages_MASK                          0x0000ffff
52772 -#define wq_pages_WORD                          word9
52773 -#define wqe_size_SHIFT                         8
52774 -#define wqe_size_MASK                          0x000000ff
52775 -#define wqe_size_WORD                          word9
52776 -       uint32_t word10;
52777 -#define rq_pages_SHIFT                         0
52778 -#define rq_pages_MASK                          0x0000ffff
52779 -#define rq_pages_WORD                          word10
52780 -#define rqe_size_SHIFT                         8
52781 -#define rqe_size_MASK                          0x000000ff
52782 -#define rqe_size_WORD                          word10
52783 -       uint32_t word11;
52784 -#define hdr_pages_SHIFT                                0
52785 -#define hdr_pages_MASK                         0x0000000f
52786 -#define hdr_pages_WORD                         word11
52787 -#define hdr_size_SHIFT                         8
52788 -#define hdr_size_MASK                          0x0000000f
52789 -#define hdr_size_WORD                          word11
52790 -#define hdr_pp_align_SHIFT                     16
52791 -#define hdr_pp_align_MASK                      0x0000ffff
52792 -#define hdr_pp_align_WORD                      word11
52793 -       uint32_t word12;
52794 -#define sgl_pages_SHIFT                                0
52795 -#define sgl_pages_MASK                         0x0000000f
52796 -#define sgl_pages_WORD                         word12
52797 -#define sgl_pp_align_SHIFT                     16
52798 -#define sgl_pp_align_MASK                      0x0000ffff
52799 -#define sgl_pp_align_WORD                      word12
52800 -       uint32_t rsvd_13_63[51];
52802  #define SLI4_PAGE_ALIGN(addr) (((addr)+((SLI4_PAGE_SIZE)-1)) \
52803                                &(~((SLI4_PAGE_SIZE)-1)))
52805 @@ -3994,8 +3824,6 @@ struct lpfc_mqe {
52806                 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
52807                 struct lpfc_mbx_query_fw_config query_fw_cfg;
52808                 struct lpfc_mbx_set_beacon_config beacon_config;
52809 -               struct lpfc_mbx_supp_pages supp_pages;
52810 -               struct lpfc_mbx_pc_sli4_params sli4_params;
52811                 struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
52812                 struct lpfc_mbx_set_link_diag_state link_diag_state;
52813                 struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
52814 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
52815 index 71f340dd4fbd..a67051ba3f12 100644
52816 --- a/drivers/scsi/lpfc/lpfc_init.c
52817 +++ b/drivers/scsi/lpfc/lpfc_init.c
52818 @@ -6573,8 +6573,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
52819         LPFC_MBOXQ_t *mboxq;
52820         MAILBOX_t *mb;
52821         int rc, i, max_buf_size;
52822 -       uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
52823 -       struct lpfc_mqe *mqe;
52824         int longs;
52825         int extra;
52826         uint64_t wwn;
52827 @@ -6808,32 +6806,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
52829         lpfc_nvme_mod_param_dep(phba);
52831 -       /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
52832 -       lpfc_supported_pages(mboxq);
52833 -       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
52834 -       if (!rc) {
52835 -               mqe = &mboxq->u.mqe;
52836 -               memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
52837 -                      LPFC_MAX_SUPPORTED_PAGES);
52838 -               for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
52839 -                       switch (pn_page[i]) {
52840 -                       case LPFC_SLI4_PARAMETERS:
52841 -                               phba->sli4_hba.pc_sli4_params.supported = 1;
52842 -                               break;
52843 -                       default:
52844 -                               break;
52845 -                       }
52846 -               }
52847 -               /* Read the port's SLI4 Parameters capabilities if supported. */
52848 -               if (phba->sli4_hba.pc_sli4_params.supported)
52849 -                       rc = lpfc_pc_sli4_params_get(phba, mboxq);
52850 -               if (rc) {
52851 -                       mempool_free(mboxq, phba->mbox_mem_pool);
52852 -                       rc = -EIO;
52853 -                       goto out_free_bsmbx;
52854 -               }
52855 -       }
52857         /*
52858          * Get sli4 parameters that override parameters from Port capabilities.
52859          * If this call fails, it isn't critical unless the SLI4 parameters come
52860 @@ -9660,8 +9632,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
52861                                 "3250 QUERY_FW_CFG mailbox failed with status "
52862                                 "x%x add_status x%x, mbx status x%x\n",
52863                                 shdr_status, shdr_add_status, rc);
52864 -               if (rc != MBX_TIMEOUT)
52865 -                       mempool_free(mboxq, phba->mbox_mem_pool);
52866 +               mempool_free(mboxq, phba->mbox_mem_pool);
52867                 rc = -ENXIO;
52868                 goto out_error;
52869         }
52870 @@ -9677,8 +9648,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
52871                         "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
52872                         phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
52874 -       if (rc != MBX_TIMEOUT)
52875 -               mempool_free(mboxq, phba->mbox_mem_pool);
52876 +       mempool_free(mboxq, phba->mbox_mem_pool);
52878         /*
52879          * Set up HBA Event Queues (EQs)
52880 @@ -10276,8 +10246,7 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
52881                 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
52882                 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
52883                                          &shdr->response);
52884 -               if (rc != MBX_TIMEOUT)
52885 -                       mempool_free(mboxq, phba->mbox_mem_pool);
52886 +               mempool_free(mboxq, phba->mbox_mem_pool);
52887                 if (shdr_status || shdr_add_status || rc) {
52888                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
52889                                         "0495 SLI_FUNCTION_RESET mailbox "
52890 @@ -12075,78 +12044,6 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
52891                 phba->pport->work_port_events = 0;
52894 - /**
52895 - * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
52896 - * @phba: Pointer to HBA context object.
52897 - * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
52898 - *
52899 - * This function is called in the SLI4 code path to read the port's
52900 - * sli4 capabilities.
52901 - *
52902 - * This function may be be called from any context that can block-wait
52903 - * for the completion.  The expectation is that this routine is called
52904 - * typically from probe_one or from the online routine.
52905 - **/
52906 -int
52907 -lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
52909 -       int rc;
52910 -       struct lpfc_mqe *mqe;
52911 -       struct lpfc_pc_sli4_params *sli4_params;
52912 -       uint32_t mbox_tmo;
52914 -       rc = 0;
52915 -       mqe = &mboxq->u.mqe;
52917 -       /* Read the port's SLI4 Parameters port capabilities */
52918 -       lpfc_pc_sli4_params(mboxq);
52919 -       if (!phba->sli4_hba.intr_enable)
52920 -               rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
52921 -       else {
52922 -               mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
52923 -               rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
52924 -       }
52926 -       if (unlikely(rc))
52927 -               return 1;
52929 -       sli4_params = &phba->sli4_hba.pc_sli4_params;
52930 -       sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
52931 -       sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
52932 -       sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
52933 -       sli4_params->featurelevel_1 = bf_get(featurelevel_1,
52934 -                                            &mqe->un.sli4_params);
52935 -       sli4_params->featurelevel_2 = bf_get(featurelevel_2,
52936 -                                            &mqe->un.sli4_params);
52937 -       sli4_params->proto_types = mqe->un.sli4_params.word3;
52938 -       sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
52939 -       sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
52940 -       sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
52941 -       sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
52942 -       sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
52943 -       sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
52944 -       sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
52945 -       sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
52946 -       sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
52947 -       sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
52948 -       sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
52949 -       sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
52950 -       sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
52951 -       sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
52952 -       sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
52953 -       sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
52954 -       sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
52955 -       sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
52956 -       sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
52957 -       sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
52959 -       /* Make sure that sge_supp_len can be handled by the driver */
52960 -       if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
52961 -               sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
52963 -       return rc;
52966  /**
52967   * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
52968   * @phba: Pointer to HBA context object.
52969 @@ -12205,7 +12102,8 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
52970         else
52971                 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
52972         sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
52973 -       sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
52974 +       sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
52975 +                                          mbx_sli4_parameters);
52976         sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
52977         sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
52978         sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
52979 diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
52980 index c03a7f12dd65..72dd22ad5dcc 100644
52981 --- a/drivers/scsi/lpfc/lpfc_mbox.c
52982 +++ b/drivers/scsi/lpfc/lpfc_mbox.c
52983 @@ -2624,39 +2624,3 @@ lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
52984         resume_rpi->event_tag = ndlp->phba->fc_eventTag;
52987 -/**
52988 - * lpfc_supported_pages - Initialize the PORT_CAPABILITIES supported pages
52989 - *                        mailbox command.
52990 - * @mbox: pointer to lpfc mbox command to initialize.
52991 - *
52992 - * The PORT_CAPABILITIES supported pages mailbox command is issued to
52993 - * retrieve the particular feature pages supported by the port.
52994 - **/
52995 -void
52996 -lpfc_supported_pages(struct lpfcMboxq *mbox)
52998 -       struct lpfc_mbx_supp_pages *supp_pages;
53000 -       memset(mbox, 0, sizeof(*mbox));
53001 -       supp_pages = &mbox->u.mqe.un.supp_pages;
53002 -       bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
53003 -       bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
53006 -/**
53007 - * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd.
53008 - * @mbox: pointer to lpfc mbox command to initialize.
53009 - *
53010 - * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
53011 - * retrieve the particular SLI4 features supported by the port.
53012 - **/
53013 -void
53014 -lpfc_pc_sli4_params(struct lpfcMboxq *mbox)
53016 -       struct lpfc_mbx_pc_sli4_params *sli4_params;
53018 -       memset(mbox, 0, sizeof(*mbox));
53019 -       sli4_params = &mbox->u.mqe.un.sli4_params;
53020 -       bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
53021 -       bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
53023 diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
53024 index 135d8e8a42ba..9f05f5e329c6 100644
53025 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c
53026 +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
53027 @@ -279,106 +279,43 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
53028         lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
53031 -/* lpfc_defer_pt2pt_acc - Complete SLI3 pt2pt processing on link up
53032 +/* lpfc_defer_plogi_acc - Issue PLOGI ACC after reg_login completes
53033   * @phba: pointer to lpfc hba data structure.
53034 - * @link_mbox: pointer to CONFIG_LINK mailbox object
53035 + * @login_mbox: pointer to REG_RPI mailbox object
53036   *
53037 - * This routine is only called if we are SLI3, direct connect pt2pt
53038 - * mode and the remote NPort issues the PLOGI after link up.
53039 + * The ACC for a rcv'ed PLOGI is deferred until AFTER the REG_RPI completes
53040   */
53041  static void
53042 -lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
53043 +lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
53045 -       LPFC_MBOXQ_t *login_mbox;
53046 -       MAILBOX_t *mb = &link_mbox->u.mb;
53047         struct lpfc_iocbq *save_iocb;
53048         struct lpfc_nodelist *ndlp;
53049 +       MAILBOX_t *mb = &login_mbox->u.mb;
53051         int rc;
53053 -       ndlp = link_mbox->ctx_ndlp;
53054 -       login_mbox = link_mbox->context3;
53055 +       ndlp = login_mbox->ctx_ndlp;
53056         save_iocb = login_mbox->context3;
53057 -       link_mbox->context3 = NULL;
53058 -       login_mbox->context3 = NULL;
53060 -       /* Check for CONFIG_LINK error */
53061 -       if (mb->mbxStatus) {
53062 -               lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
53063 -                               "4575 CONFIG_LINK fails pt2pt discovery: %x\n",
53064 -                               mb->mbxStatus);
53065 -               mempool_free(login_mbox, phba->mbox_mem_pool);
53066 -               mempool_free(link_mbox, phba->mbox_mem_pool);
53067 -               kfree(save_iocb);
53068 -               return;
53069 -       }
53071 -       /* Now that CONFIG_LINK completed, and our SID is configured,
53072 -        * we can now proceed with sending the PLOGI ACC.
53073 -        */
53074 -       rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI,
53075 -                             save_iocb, ndlp, login_mbox);
53076 -       if (rc) {
53077 -               lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
53078 -                               "4576 PLOGI ACC fails pt2pt discovery: %x\n",
53079 -                               rc);
53080 -               mempool_free(login_mbox, phba->mbox_mem_pool);
53081 +       if (mb->mbxStatus == MBX_SUCCESS) {
53082 +               /* Now that REG_RPI completed successfully,
53083 +                * we can now proceed with sending the PLOGI ACC.
53084 +                */
53085 +               rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI,
53086 +                                     save_iocb, ndlp, NULL);
53087 +               if (rc) {
53088 +                       lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
53089 +                                       "4576 PLOGI ACC fails pt2pt discovery: "
53090 +                                       "DID %x Data: %x\n", ndlp->nlp_DID, rc);
53091 +               }
53092         }
53094 -       mempool_free(link_mbox, phba->mbox_mem_pool);
53095 +       /* Now process the REG_RPI cmpl */
53096 +       lpfc_mbx_cmpl_reg_login(phba, login_mbox);
53097 +       ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
53098         kfree(save_iocb);
53101 -/**
53102 - * lpfc_defer_tgt_acc - Progress SLI4 target rcv PLOGI handler
53103 - * @phba: Pointer to HBA context object.
53104 - * @pmb: Pointer to mailbox object.
53105 - *
53106 - * This function provides the unreg rpi mailbox completion handler for a tgt.
53107 - * The routine frees the memory resources associated with the completed
53108 - * mailbox command and transmits the ELS ACC.
53109 - *
53110 - * This routine is only called if we are SLI4, acting in target
53111 - * mode and the remote NPort issues the PLOGI after link up.
53112 - **/
53113 -static void
53114 -lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
53116 -       struct lpfc_vport *vport = pmb->vport;
53117 -       struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
53118 -       LPFC_MBOXQ_t *mbox = pmb->context3;
53119 -       struct lpfc_iocbq *piocb = NULL;
53120 -       int rc;
53122 -       if (mbox) {
53123 -               pmb->context3 = NULL;
53124 -               piocb = mbox->context3;
53125 -               mbox->context3 = NULL;
53126 -       }
53128 -       /*
53129 -        * Complete the unreg rpi mbx request, and update flags.
53130 -        * This will also restart any deferred events.
53131 -        */
53132 -       lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb);
53134 -       if (!piocb) {
53135 -               lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
53136 -                                "4578 PLOGI ACC fail\n");
53137 -               if (mbox)
53138 -                       mempool_free(mbox, phba->mbox_mem_pool);
53139 -               return;
53140 -       }
53142 -       rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox);
53143 -       if (rc) {
53144 -               lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
53145 -                                "4579 PLOGI ACC fail %x\n", rc);
53146 -               if (mbox)
53147 -                       mempool_free(mbox, phba->mbox_mem_pool);
53148 -       }
53149 -       kfree(piocb);
53152  static int
53153  lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
53154                struct lpfc_iocbq *cmdiocb)
53155 @@ -395,8 +332,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
53156         struct lpfc_iocbq *save_iocb;
53157         struct ls_rjt stat;
53158         uint32_t vid, flag;
53159 -       u16 rpi;
53160 -       int rc, defer_acc;
53161 +       int rc;
53163         memset(&stat, 0, sizeof (struct ls_rjt));
53164         pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
53165 @@ -445,7 +381,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
53166         else
53167                 ndlp->nlp_fcp_info |= CLASS3;
53169 -       defer_acc = 0;
53170         ndlp->nlp_class_sup = 0;
53171         if (sp->cls1.classValid)
53172                 ndlp->nlp_class_sup |= FC_COS_CLASS1;
53173 @@ -539,27 +474,26 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
53175                 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
53177 -               /* Issue config_link / reg_vfi to account for updated TOV's */
53179 +               /* Issue CONFIG_LINK for SLI3 or REG_VFI for SLI4,
53180 +                * to account for updated TOV's / parameters
53181 +                */
53182                 if (phba->sli_rev == LPFC_SLI_REV4)
53183                         lpfc_issue_reg_vfi(vport);
53184                 else {
53185 -                       defer_acc = 1;
53186                         link_mbox = mempool_alloc(phba->mbox_mem_pool,
53187                                                   GFP_KERNEL);
53188                         if (!link_mbox)
53189                                 goto out;
53190                         lpfc_config_link(phba, link_mbox);
53191 -                       link_mbox->mbox_cmpl = lpfc_defer_pt2pt_acc;
53192 +                       link_mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
53193                         link_mbox->vport = vport;
53194                         link_mbox->ctx_ndlp = ndlp;
53196 -                       save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
53197 -                       if (!save_iocb)
53198 +                       rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
53199 +                       if (rc == MBX_NOT_FINISHED) {
53200 +                               mempool_free(link_mbox, phba->mbox_mem_pool);
53201                                 goto out;
53202 -                       /* Save info from cmd IOCB used in rsp */
53203 -                       memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
53204 -                              sizeof(struct lpfc_iocbq));
53205 +                       }
53206                 }
53208                 lpfc_can_disctmo(vport);
53209 @@ -578,59 +512,28 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
53210         if (!login_mbox)
53211                 goto out;
53213 -       /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
53214 -       if (phba->nvmet_support && !defer_acc) {
53215 -               link_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
53216 -               if (!link_mbox)
53217 -                       goto out;
53219 -               /* As unique identifiers such as iotag would be overwritten
53220 -                * with those from the cmdiocb, allocate separate temporary
53221 -                * storage for the copy.
53222 -                */
53223 -               save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
53224 -               if (!save_iocb)
53225 -                       goto out;
53227 -               /* Unreg RPI is required for SLI4. */
53228 -               rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
53229 -               lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox);
53230 -               link_mbox->vport = vport;
53231 -               link_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
53232 -               if (!link_mbox->ctx_ndlp)
53233 -                       goto out;
53235 -               link_mbox->mbox_cmpl = lpfc_defer_acc_rsp;
53237 -               if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
53238 -                   (!(vport->fc_flag & FC_OFFLINE_MODE)))
53239 -                       ndlp->nlp_flag |= NLP_UNREG_INP;
53240 +       save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
53241 +       if (!save_iocb)
53242 +               goto out;
53244 -               /* Save info from cmd IOCB used in rsp */
53245 -               memcpy(save_iocb, cmdiocb, sizeof(*save_iocb));
53246 +       /* Save info from cmd IOCB to be used in rsp after all mbox completes */
53247 +       memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
53248 +              sizeof(struct lpfc_iocbq));
53250 -               /* Delay sending ACC till unreg RPI completes. */
53251 -               defer_acc = 1;
53252 -       } else if (phba->sli_rev == LPFC_SLI_REV4)
53253 +       /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
53254 +       if (phba->sli_rev == LPFC_SLI_REV4)
53255                 lpfc_unreg_rpi(vport, ndlp);
53257 +       /* Issue REG_LOGIN first, before ACCing the PLOGI, thus we will
53258 +        * always be deferring the ACC.
53259 +        */
53260         rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
53261                             (uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
53262         if (rc)
53263                 goto out;
53265 -       /* ACC PLOGI rsp command needs to execute first,
53266 -        * queue this login_mbox command to be processed later.
53267 -        */
53268         login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
53269 -       /*
53270 -        * login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
53271 -        * command issued in lpfc_cmpl_els_acc().
53272 -        */
53273         login_mbox->vport = vport;
53274 -       spin_lock_irq(&ndlp->lock);
53275 -       ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
53276 -       spin_unlock_irq(&ndlp->lock);
53278         /*
53279          * If there is an outstanding PLOGI issued, abort it before
53280 @@ -660,7 +563,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
53281                  * to register, then unregister the RPI.
53282                  */
53283                 spin_lock_irq(&ndlp->lock);
53284 -               ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
53285 +               ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN |
53286 +                                  NLP_RCV_PLOGI);
53287                 spin_unlock_irq(&ndlp->lock);
53288                 stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
53289                 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
53290 @@ -670,42 +574,39 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
53291                         mempool_free(login_mbox, phba->mbox_mem_pool);
53292                 return 1;
53293         }
53294 -       if (defer_acc) {
53295 -               /* So the order here should be:
53296 -                * SLI3 pt2pt
53297 -                *   Issue CONFIG_LINK mbox
53298 -                *   CONFIG_LINK cmpl
53299 -                * SLI4 tgt
53300 -                *   Issue UNREG RPI mbx
53301 -                *   UNREG RPI cmpl
53302 -                * Issue PLOGI ACC
53303 -                * PLOGI ACC cmpl
53304 -                * Issue REG_LOGIN mbox
53305 -                */
53307 -               /* Save the REG_LOGIN mbox for and rcv IOCB copy later */
53308 -               link_mbox->context3 = login_mbox;
53309 -               login_mbox->context3 = save_iocb;
53310 +       /* So the order here should be:
53311 +        * SLI3 pt2pt
53312 +        *   Issue CONFIG_LINK mbox
53313 +        *   CONFIG_LINK cmpl
53314 +        * SLI4 pt2pt
53315 +        *   Issue REG_VFI mbox
53316 +        *   REG_VFI cmpl
53317 +        * SLI4
53318 +        *   Issue UNREG RPI mbx
53319 +        *   UNREG RPI cmpl
53320 +        * Issue REG_RPI mbox
53321 +        * REG RPI cmpl
53322 +        * Issue PLOGI ACC
53323 +        * PLOGI ACC cmpl
53324 +        */
53325 +       login_mbox->mbox_cmpl = lpfc_defer_plogi_acc;
53326 +       login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
53327 +       login_mbox->context3 = save_iocb; /* For PLOGI ACC */
53329 -               /* Start the ball rolling by issuing CONFIG_LINK here */
53330 -               rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
53331 -               if (rc == MBX_NOT_FINISHED)
53332 -                       goto out;
53333 -               return 1;
53334 -       }
53335 +       spin_lock_irq(&ndlp->lock);
53336 +       ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
53337 +       spin_unlock_irq(&ndlp->lock);
53339 +       /* Start the ball rolling by issuing REG_LOGIN here */
53340 +       rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT);
53341 +       if (rc == MBX_NOT_FINISHED)
53342 +               goto out;
53343 +       lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
53345 -       rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, login_mbox);
53346 -       if (rc)
53347 -               mempool_free(login_mbox, phba->mbox_mem_pool);
53348         return 1;
53349  out:
53350 -       if (defer_acc)
53351 -               lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
53352 -                               "4577 discovery failure: %p %p %p\n",
53353 -                               save_iocb, link_mbox, login_mbox);
53354         kfree(save_iocb);
53355 -       if (link_mbox)
53356 -               mempool_free(link_mbox, phba->mbox_mem_pool);
53357         if (login_mbox)
53358                 mempool_free(login_mbox, phba->mbox_mem_pool);
53360 @@ -913,9 +814,14 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
53361                 }
53362         } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
53363                 ((ndlp->nlp_type & NLP_FCP_TARGET) ||
53364 -               !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
53365 +               (ndlp->nlp_type & NLP_NVME_TARGET) ||
53366 +               (vport->fc_flag & FC_PT2PT))) ||
53367                 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
53368 -               /* Only try to re-login if this is NOT a Fabric Node */
53369 +               /* Only try to re-login if this is NOT a Fabric Node
53370 +                * AND the remote NPORT is a FCP/NVME Target or we
53371 +                * are in pt2pt mode. NLP_STE_ADISC_ISSUE is a special
53372 +                * case for LOGO as a response to ADISC behavior.
53373 +                */
53374                 mod_timer(&ndlp->nlp_delayfunc,
53375                           jiffies + msecs_to_jiffies(1000 * 1));
53376                 spin_lock_irq(&ndlp->lock);
53377 @@ -1985,8 +1891,6 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
53378                 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
53380                 lpfc_issue_els_logo(vport, ndlp, 0);
53381 -               ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
53382 -               lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
53383                 return ndlp->nlp_state;
53384         }
53386 @@ -2633,12 +2537,10 @@ static uint32_t
53387  lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
53388                           void *arg, uint32_t evt)
53390 -       struct lpfc_hba  *phba = vport->phba;
53391         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
53393         /* flush the target */
53394 -       lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
53395 -                           ndlp->nlp_sid, 0, LPFC_CTX_TGT);
53396 +       lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
53398         /* Treat like rcv logo */
53399         lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
53400 diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
53401 index bb2a4a0d1295..a3fd959f7431 100644
53402 --- a/drivers/scsi/lpfc/lpfc_nvmet.c
53403 +++ b/drivers/scsi/lpfc/lpfc_nvmet.c
53404 @@ -3304,7 +3304,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
53405         bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
53407         /* Word 10 */
53408 -       bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
53409         bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
53410         bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
53411                LPFC_WQE_LENLOC_WORD12);
53412 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
53413 index a4d697373c71..fab9ea6fe965 100644
53414 --- a/drivers/scsi/lpfc/lpfc_scsi.c
53415 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
53416 @@ -5815,7 +5815,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
53417                                         tgt_id, lun_id, context);
53418         later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
53419         while (time_after(later, jiffies) && cnt) {
53420 -               schedule_timeout_uninterruptible(msecs_to_jiffies(20));
53421 +               schedule_msec_hrtimeout_uninterruptible((20));
53422                 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
53423         }
53424         if (cnt) {
53425 diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
53426 index fa1a714a78f0..920cf329268b 100644
53427 --- a/drivers/scsi/lpfc/lpfc_sli.c
53428 +++ b/drivers/scsi/lpfc/lpfc_sli.c
53429 @@ -5683,12 +5683,10 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
53430                         phba->sli4_hba.lnk_info.lnk_no,
53431                         phba->BIOSVersion);
53432  out_free_mboxq:
53433 -       if (rc != MBX_TIMEOUT) {
53434 -               if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
53435 -                       lpfc_sli4_mbox_cmd_free(phba, mboxq);
53436 -               else
53437 -                       mempool_free(mboxq, phba->mbox_mem_pool);
53438 -       }
53439 +       if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
53440 +               lpfc_sli4_mbox_cmd_free(phba, mboxq);
53441 +       else
53442 +               mempool_free(mboxq, phba->mbox_mem_pool);
53443         return rc;
53446 @@ -5789,12 +5787,10 @@ lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
53447         }
53449  out_free_mboxq:
53450 -       if (rc != MBX_TIMEOUT) {
53451 -               if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
53452 -                       lpfc_sli4_mbox_cmd_free(phba, mboxq);
53453 -               else
53454 -                       mempool_free(mboxq, phba->mbox_mem_pool);
53455 -       }
53456 +       if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
53457 +               lpfc_sli4_mbox_cmd_free(phba, mboxq);
53458 +       else
53459 +               mempool_free(mboxq, phba->mbox_mem_pool);
53460         return rc;
53463 @@ -11647,7 +11643,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
53464         icmd = &cmdiocb->iocb;
53465         if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
53466             icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
53467 -           (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
53468 +           cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED)
53469                 return IOCB_ABORTING;
53471         if (!pring) {
53472 @@ -11811,13 +11807,20 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
53473                            lpfc_ctx_cmd ctx_cmd)
53475         struct lpfc_io_buf *lpfc_cmd;
53476 +       IOCB_t *icmd = NULL;
53477         int rc = 1;
53479         if (!iocbq || iocbq->vport != vport)
53480                 return rc;
53482 -       if (!(iocbq->iocb_flag &  LPFC_IO_FCP) ||
53483 -           !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
53484 +       if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
53485 +           !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) ||
53486 +             iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
53487 +               return rc;
53489 +       icmd = &iocbq->iocb;
53490 +       if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
53491 +           icmd->ulpCommand == CMD_CLOSE_XRI_CN)
53492                 return rc;
53494         lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
53495 @@ -11945,7 +11948,6 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
53496  /**
53497   * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
53498   * @vport: Pointer to virtual port.
53499 - * @pring: Pointer to driver SLI ring object.
53500   * @tgt_id: SCSI ID of the target.
53501   * @lun_id: LUN ID of the scsi device.
53502   * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
53503 @@ -11960,18 +11962,22 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
53504   * FCP iocbs associated with SCSI target specified by tgt_id parameter.
53505   * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
53506   * FCP iocbs associated with virtual port.
53507 + * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
53508 + * lpfc_sli4_calc_ring is used.
53509   * This function returns number of iocbs it failed to abort.
53510   * This function is called with no locks held.
53511   **/
53512  int
53513 -lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
53514 -                   uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
53515 +lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
53516 +                   lpfc_ctx_cmd abort_cmd)
53518         struct lpfc_hba *phba = vport->phba;
53519 +       struct lpfc_sli_ring *pring = NULL;
53520         struct lpfc_iocbq *iocbq;
53521         int errcnt = 0, ret_val = 0;
53522         unsigned long iflags;
53523         int i;
53524 +       void *fcp_cmpl = NULL;
53526         /* all I/Os are in process of being flushed */
53527         if (phba->hba_flag & HBA_IOQ_FLUSH)
53528 @@ -11985,8 +11991,15 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
53529                         continue;
53531                 spin_lock_irqsave(&phba->hbalock, iflags);
53532 +               if (phba->sli_rev == LPFC_SLI_REV3) {
53533 +                       pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
53534 +                       fcp_cmpl = lpfc_sli_abort_fcp_cmpl;
53535 +               } else if (phba->sli_rev == LPFC_SLI_REV4) {
53536 +                       pring = lpfc_sli4_calc_ring(phba, iocbq);
53537 +                       fcp_cmpl = lpfc_sli4_abort_fcp_cmpl;
53538 +               }
53539                 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
53540 -                                                    lpfc_sli_abort_fcp_cmpl);
53541 +                                                    fcp_cmpl);
53542                 spin_unlock_irqrestore(&phba->hbalock, iflags);
53543                 if (ret_val != IOCB_SUCCESS)
53544                         errcnt++;
53545 @@ -17072,8 +17085,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
53546                                 "2509 RQ_DESTROY mailbox failed with "
53547                                 "status x%x add_status x%x, mbx status x%x\n",
53548                                 shdr_status, shdr_add_status, rc);
53549 -               if (rc != MBX_TIMEOUT)
53550 -                       mempool_free(mbox, hrq->phba->mbox_mem_pool);
53551 +               mempool_free(mbox, hrq->phba->mbox_mem_pool);
53552                 return -ENXIO;
53553         }
53554         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
53555 @@ -17170,7 +17182,9 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
53556         shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
53557         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
53558         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
53559 -       if (rc != MBX_TIMEOUT)
53560 +       if (!phba->sli4_hba.intr_enable)
53561 +               mempool_free(mbox, phba->mbox_mem_pool);
53562 +       else if (rc != MBX_TIMEOUT)
53563                 mempool_free(mbox, phba->mbox_mem_pool);
53564         if (shdr_status || shdr_add_status || rc) {
53565                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
53566 @@ -17367,7 +17381,9 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
53567         shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
53568         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
53569         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
53570 -       if (rc != MBX_TIMEOUT)
53571 +       if (!phba->sli4_hba.intr_enable)
53572 +               lpfc_sli4_mbox_cmd_free(phba, mbox);
53573 +       else if (rc != MBX_TIMEOUT)
53574                 lpfc_sli4_mbox_cmd_free(phba, mbox);
53575         if (shdr_status || shdr_add_status || rc) {
53576                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
53577 @@ -17480,7 +17496,9 @@ lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
53578         shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
53579         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
53580         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
53581 -       if (rc != MBX_TIMEOUT)
53582 +       if (!phba->sli4_hba.intr_enable)
53583 +               lpfc_sli4_mbox_cmd_free(phba, mbox);
53584 +       else if (rc != MBX_TIMEOUT)
53585                 lpfc_sli4_mbox_cmd_free(phba, mbox);
53586         if (shdr_status || shdr_add_status || rc) {
53587                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
53588 @@ -18064,7 +18082,6 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
53589         if (cmd_iocbq) {
53590                 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
53591                 lpfc_nlp_put(ndlp);
53592 -               lpfc_nlp_not_used(ndlp);
53593                 lpfc_sli_release_iocbq(phba, cmd_iocbq);
53594         }
53596 @@ -18831,8 +18848,7 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
53597         shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
53598         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
53599         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
53600 -       if (rc != MBX_TIMEOUT)
53601 -               mempool_free(mboxq, phba->mbox_mem_pool);
53602 +       mempool_free(mboxq, phba->mbox_mem_pool);
53603         if (shdr_status || shdr_add_status || rc) {
53604                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
53605                                 "2514 POST_RPI_HDR mailbox failed with "
53606 @@ -20076,7 +20092,9 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
53607                         break;
53608                 }
53609         }
53610 -       if (rc != MBX_TIMEOUT)
53611 +       if (!phba->sli4_hba.intr_enable)
53612 +               mempool_free(mbox, phba->mbox_mem_pool);
53613 +       else if (rc != MBX_TIMEOUT)
53614                 mempool_free(mbox, phba->mbox_mem_pool);
53615         if (shdr_status || shdr_add_status || rc) {
53616                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
53617 diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
53618 index ac0eef975f17..b6beacfd0f62 100644
53619 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c
53620 +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
53621 @@ -7252,6 +7252,8 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
53623         ioc_info(ioc, "sending diag reset !!\n");
53625 +       pci_cfg_access_lock(ioc->pdev);
53627         drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
53629         count = 0;
53630 @@ -7342,10 +7344,12 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
53631                 goto out;
53632         }
53634 +       pci_cfg_access_unlock(ioc->pdev);
53635         ioc_info(ioc, "diag reset: SUCCESS\n");
53636         return 0;
53638   out:
53639 +       pci_cfg_access_unlock(ioc->pdev);
53640         ioc_err(ioc, "diag reset: FAILED\n");
53641         return -EFAULT;
53643 diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
53644 index 44f9a05db94e..2ec11be62a82 100644
53645 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
53646 +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
53647 @@ -2507,7 +2507,7 @@ _ctl_addnl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
53648                     __func__, karg.unique_id);
53649                 return -EPERM;
53650         }
53651 -       memset(&karg.buffer_rel_condition, 0, sizeof(struct htb_rel_query));
53652 +       memset(&karg.rel_query, 0, sizeof(karg.rel_query));
53653         if ((ioc->diag_buffer_status[buffer_type] &
53654             MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
53655                 ioc_info(ioc, "%s: buffer_type(0x%02x) is not registered\n",
53656 @@ -2520,8 +2520,7 @@ _ctl_addnl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
53657                     __func__, buffer_type);
53658                 return -EPERM;
53659         }
53660 -       memcpy(&karg.buffer_rel_condition, &ioc->htb_rel,
53661 -           sizeof(struct  htb_rel_query));
53662 +       memcpy(&karg.rel_query, &ioc->htb_rel, sizeof(karg.rel_query));
53663  out:
53664         if (copy_to_user(arg, &karg, sizeof(struct mpt3_addnl_diag_query))) {
53665                 ioc_err(ioc, "%s: unable to write mpt3_addnl_diag_query data @ %p\n",
53666 diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
53667 index d2ccdafb8df2..8f6ffb40261c 100644
53668 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
53669 +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
53670 @@ -50,6 +50,8 @@
53671  #include <linux/miscdevice.h>
53672  #endif
53674 +#include "mpt3sas_base.h"
53676  #ifndef MPT2SAS_MINOR
53677  #define MPT2SAS_MINOR          (MPT_MINOR + 1)
53678  #endif
53679 @@ -436,19 +438,13 @@ struct mpt3_diag_read_buffer {
53680   * struct mpt3_addnl_diag_query - diagnostic buffer release reason
53681   * @hdr - generic header
53682   * @unique_id - unique id associated with this buffer.
53683 - * @buffer_rel_condition - Release condition ioctl/sysfs/reset
53684 - * @reserved1
53685 - * @trigger_type - Master/Event/scsi/MPI
53686 - * @trigger_info_dwords - Data Correspondig to trigger type
53687 + * @rel_query - release query.
53688   * @reserved2
53689   */
53690  struct mpt3_addnl_diag_query {
53691         struct mpt3_ioctl_header hdr;
53692         uint32_t unique_id;
53693 -       uint16_t buffer_rel_condition;
53694 -       uint16_t reserved1;
53695 -       uint32_t trigger_type;
53696 -       uint32_t trigger_info_dwords[2];
53697 +       struct htb_rel_query rel_query;
53698         uint32_t reserved2[2];
53699  };
53701 diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
53702 index 6aa6de729187..ae1973878cc7 100644
53703 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
53704 +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
53705 @@ -6483,6 +6483,9 @@ _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
53706                 if (!vphy)
53707                         return NULL;
53709 +               if (!port->vphys_mask)
53710 +                       INIT_LIST_HEAD(&port->vphys_list);
53712                 /*
53713                  * Enable bit corresponding to HBA phy number on its
53714                  * parent hba_port object's vphys_mask field.
53715 @@ -6490,7 +6493,6 @@ _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
53716                 port->vphys_mask |= (1 << phy_num);
53717                 vphy->phy_mask |= (1 << phy_num);
53719 -               INIT_LIST_HEAD(&port->vphys_list);
53720                 list_add_tail(&vphy->list, &port->vphys_list);
53722                 ioc_info(ioc,
53723 diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
53724 index 31e5455d280c..1b1a57f46989 100644
53725 --- a/drivers/scsi/pm8001/pm8001_hwi.c
53726 +++ b/drivers/scsi/pm8001/pm8001_hwi.c
53727 @@ -643,7 +643,7 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
53728   */
53729  static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
53731 -       u8 i = 0;
53732 +       u32 i = 0;
53733         u16 deviceid;
53734         pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
53735         /* 8081 controllers need BAR shift to access MPI space
53736 diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
53737 index 84315560e8e1..c6b0834e3806 100644
53738 --- a/drivers/scsi/pm8001/pm80xx_hwi.c
53739 +++ b/drivers/scsi/pm8001/pm80xx_hwi.c
53740 @@ -1502,9 +1502,9 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
53742         /* wait until Inbound DoorBell Clear Register toggled */
53743         if (IS_SPCV_12G(pm8001_ha->pdev)) {
53744 -               max_wait_count = 4 * 1000 * 1000;/* 4 sec */
53745 +               max_wait_count = 30 * 1000 * 1000; /* 30 sec */
53746         } else {
53747 -               max_wait_count = 2 * 1000 * 1000;/* 2 sec */
53748 +               max_wait_count = 15 * 1000 * 1000; /* 15 sec */
53749         }
53750         do {
53751                 udelay(1);
53752 diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
53753 index cec27f2ef70d..e5076f09d5ed 100644
53754 --- a/drivers/scsi/qedf/qedf_main.c
53755 +++ b/drivers/scsi/qedf/qedf_main.c
53756 @@ -536,7 +536,9 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf,
53757         if (linkmode_intersects(link->supported_caps, sup_caps))
53758                 lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
53760 -       fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
53761 +       if (lport->host && lport->host->shost_data)
53762 +               fc_host_supported_speeds(lport->host) =
53763 +                       lport->link_supported_speeds;
53766  static void qedf_bw_update(void *dev)
53767 diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
53768 index 63391c9be05d..3aa9869f6fae 100644
53769 --- a/drivers/scsi/qla2xxx/qla_attr.c
53770 +++ b/drivers/scsi/qla2xxx/qla_attr.c
53771 @@ -2864,6 +2864,8 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
53772         vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
53774         if (IS_FWI2_CAPABLE(ha)) {
53775 +               int rval;
53777                 stats = dma_alloc_coherent(&ha->pdev->dev,
53778                     sizeof(*stats), &stats_dma, GFP_KERNEL);
53779                 if (!stats) {
53780 @@ -2873,7 +2875,11 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
53781                 }
53783                 /* reset firmware statistics */
53784 -               qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
53785 +               rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
53786 +               if (rval != QLA_SUCCESS)
53787 +                       ql_log(ql_log_warn, vha, 0x70de,
53788 +                              "Resetting ISP statistics failed: rval = %d\n",
53789 +                              rval);
53791                 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
53792                     stats, stats_dma);
53793 diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
53794 index bee8cf9f8123..aef2f7cc89d3 100644
53795 --- a/drivers/scsi/qla2xxx/qla_bsg.c
53796 +++ b/drivers/scsi/qla2xxx/qla_bsg.c
53797 @@ -25,10 +25,11 @@ void qla2x00_bsg_job_done(srb_t *sp, int res)
53798         struct bsg_job *bsg_job = sp->u.bsg_job;
53799         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
53801 +       sp->free(sp);
53803         bsg_reply->result = res;
53804         bsg_job_done(bsg_job, bsg_reply->result,
53805                        bsg_reply->reply_payload_rcv_len);
53806 -       sp->free(sp);
53809  void qla2x00_bsg_sp_free(srb_t *sp)
53810 @@ -2583,6 +2584,10 @@ qla2x00_get_host_stats(struct bsg_job *bsg_job)
53811         }
53813         data = kzalloc(response_len, GFP_KERNEL);
53814 +       if (!data) {
53815 +               kfree(req_data);
53816 +               return -ENOMEM;
53817 +       }
53819         ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
53820                                     data, response_len);
53821 diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
53822 index f01f07116bd3..8cb0574cfa91 100644
53823 --- a/drivers/scsi/qla2xxx/qla_init.c
53824 +++ b/drivers/scsi/qla2xxx/qla_init.c
53825 @@ -1194,6 +1194,9 @@ static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
53827         struct qla_work_evt *e;
53829 +       if (vha->host->active_mode == MODE_TARGET)
53830 +               return QLA_FUNCTION_FAILED;
53832         e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
53833         if (!e)
53834                 return QLA_FUNCTION_FAILED;
53835 diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
53836 index 5e188375c871..af4831c9edf9 100644
53837 --- a/drivers/scsi/qla2xxx/qla_isr.c
53838 +++ b/drivers/scsi/qla2xxx/qla_isr.c
53839 @@ -4005,11 +4005,11 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
53840         if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
53841                 /* user wants to control IRQ setting for target mode */
53842                 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
53843 -                   min((u16)ha->msix_count, (u16)num_online_cpus()),
53844 +                   min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
53845                     PCI_IRQ_MSIX);
53846         } else
53847                 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
53848 -                   min((u16)ha->msix_count, (u16)num_online_cpus()),
53849 +                   min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
53850                     PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
53851                     &desc);
53853 diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
53854 index 0677295957bc..615e44af1ca6 100644
53855 --- a/drivers/scsi/qla2xxx/qla_nx.c
53856 +++ b/drivers/scsi/qla2xxx/qla_nx.c
53857 @@ -1063,7 +1063,8 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
53858                 return ret;
53859         }
53861 -       if (qla82xx_flash_set_write_enable(ha))
53862 +       ret = qla82xx_flash_set_write_enable(ha);
53863 +       if (ret < 0)
53864                 goto done_write;
53866         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
53867 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
53868 index 074392560f3d..0e07b98dfae8 100644
53869 --- a/drivers/scsi/qla2xxx/qla_os.c
53870 +++ b/drivers/scsi/qla2xxx/qla_os.c
53871 @@ -1013,8 +1013,6 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
53872         if (rval != QLA_SUCCESS) {
53873                 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
53874                     "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
53875 -               if (rval == QLA_INTERFACE_ERROR)
53876 -                       goto qc24_free_sp_fail_command;
53877                 goto qc24_host_busy_free_sp;
53878         }
53880 @@ -1026,11 +1024,6 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
53881  qc24_target_busy:
53882         return SCSI_MLQUEUE_TARGET_BUSY;
53884 -qc24_free_sp_fail_command:
53885 -       sp->free(sp);
53886 -       CMD_SP(cmd) = NULL;
53887 -       qla2xxx_rel_qpair_sp(sp->qpair, sp);
53889  qc24_fail_command:
53890         cmd->scsi_done(cmd);
53892 diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
53893 index a1dacb6e993e..c30f6047410f 100644
53894 --- a/drivers/scsi/smartpqi/smartpqi_init.c
53895 +++ b/drivers/scsi/smartpqi/smartpqi_init.c
53896 @@ -5488,6 +5488,8 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
53898                                 list_del(&io_request->request_list_entry);
53899                                 set_host_byte(scmd, DID_RESET);
53900 +                               pqi_free_io_request(io_request);
53901 +                               scsi_dma_unmap(scmd);
53902                                 pqi_scsi_done(scmd);
53903                         }
53905 @@ -5524,6 +5526,8 @@ static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
53907                                 list_del(&io_request->request_list_entry);
53908                                 set_host_byte(scmd, DID_RESET);
53909 +                               pqi_free_io_request(io_request);
53910 +                               scsi_dma_unmap(scmd);
53911                                 pqi_scsi_done(scmd);
53912                         }
53914 @@ -6598,6 +6602,7 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
53915         shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
53916         shost->unique_id = shost->irq;
53917         shost->nr_hw_queues = ctrl_info->num_queue_groups;
53918 +       shost->host_tagset = 1;
53919         shost->hostdata[0] = (unsigned long)ctrl_info;
53921         rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
53922 @@ -8216,6 +8221,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
53923                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
53924                                0x152d, 0x8a37)
53925         },
53926 +       {
53927 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
53928 +                              0x193d, 0x8460)
53929 +       },
53930         {
53931                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
53932                                0x193d, 0x1104)
53933 @@ -8288,6 +8297,22 @@ static const struct pci_device_id pqi_pci_id_table[] = {
53934                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
53935                                0x1bd4, 0x004f)
53936         },
53937 +       {
53938 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
53939 +                              0x1bd4, 0x0051)
53940 +       },
53941 +       {
53942 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
53943 +                              0x1bd4, 0x0052)
53944 +       },
53945 +       {
53946 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
53947 +                              0x1bd4, 0x0053)
53948 +       },
53949 +       {
53950 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
53951 +                              0x1bd4, 0x0054)
53952 +       },
53953         {
53954                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
53955                                0x19e5, 0xd227)
53956 @@ -8448,6 +8473,122 @@ static const struct pci_device_id pqi_pci_id_table[] = {
53957                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
53958                                PCI_VENDOR_ID_ADAPTEC2, 0x1380)
53959         },
53960 +       {
53961 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
53962 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1400)
53963 +       },
53964 +       {
53965 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
53966 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1402)
53967 +       },
53968 +       {
53969 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
53970 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1410)
53971 +       },
53972 +       {
53973 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
53974 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1411)
53975 +       },
53976 +       {
53977 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
53978 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1412)
53979 +       },
53980 +       {
53981 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
53982 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1420)
53983 +       },
53984 +       {
53985 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
53986 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1430)
53987 +       },
53988 +       {
53989 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
53990 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1440)
53991 +       },
53992 +       {
53993 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
53994 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1441)
53995 +       },
53996 +       {
53997 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
53998 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1450)
53999 +       },
54000 +       {
54001 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54002 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1452)
54003 +       },
54004 +       {
54005 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54006 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1460)
54007 +       },
54008 +       {
54009 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54010 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1461)
54011 +       },
54012 +       {
54013 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54014 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1462)
54015 +       },
54016 +       {
54017 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54018 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1470)
54019 +       },
54020 +       {
54021 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54022 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1471)
54023 +       },
54024 +       {
54025 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54026 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1472)
54027 +       },
54028 +       {
54029 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54030 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1480)
54031 +       },
54032 +       {
54033 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54034 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1490)
54035 +       },
54036 +       {
54037 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54038 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1491)
54039 +       },
54040 +       {
54041 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54042 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
54043 +       },
54044 +       {
54045 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54046 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
54047 +       },
54048 +       {
54049 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54050 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
54051 +       },
54052 +       {
54053 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54054 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
54055 +       },
54056 +       {
54057 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54058 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
54059 +       },
54060 +       {
54061 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54062 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
54063 +       },
54064 +       {
54065 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54066 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
54067 +       },
54068 +       {
54069 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54070 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
54071 +       },
54072 +       {
54073 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54074 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
54075 +       },
54076         {
54077                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54078                                PCI_VENDOR_ID_ADVANTECH, 0x8312)
54079 @@ -8512,6 +8653,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
54080                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54081                                PCI_VENDOR_ID_HP, 0x1001)
54082         },
54083 +       {
54084 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54085 +                              PCI_VENDOR_ID_HP, 0x1002)
54086 +       },
54087         {
54088                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54089                                PCI_VENDOR_ID_HP, 0x1100)
54090 @@ -8520,6 +8665,22 @@ static const struct pci_device_id pqi_pci_id_table[] = {
54091                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54092                                PCI_VENDOR_ID_HP, 0x1101)
54093         },
54094 +       {
54095 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54096 +                              0x1590, 0x0294)
54097 +       },
54098 +       {
54099 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54100 +                              0x1590, 0x02db)
54101 +       },
54102 +       {
54103 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54104 +                              0x1590, 0x02dc)
54105 +       },
54106 +       {
54107 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54108 +                              0x1590, 0x032e)
54109 +       },
54110         {
54111                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54112                                0x1d8d, 0x0800)
54113 diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
54114 index 9e2e196bc202..97c6f81b1d2a 100644
54115 --- a/drivers/scsi/sni_53c710.c
54116 +++ b/drivers/scsi/sni_53c710.c
54117 @@ -58,6 +58,7 @@ static int snirm710_probe(struct platform_device *dev)
54118         struct NCR_700_Host_Parameters *hostdata;
54119         struct Scsi_Host *host;
54120         struct  resource *res;
54121 +       int rc;
54123         res = platform_get_resource(dev, IORESOURCE_MEM, 0);
54124         if (!res)
54125 @@ -83,7 +84,9 @@ static int snirm710_probe(struct platform_device *dev)
54126                 goto out_kfree;
54127         host->this_id = 7;
54128         host->base = base;
54129 -       host->irq = platform_get_irq(dev, 0);
54130 +       host->irq = rc = platform_get_irq(dev, 0);
54131 +       if (rc < 0)
54132 +               goto out_put_host;
54133         if(request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "snirm710", host)) {
54134                 printk(KERN_ERR "snirm710: request_irq failed!\n");
54135                 goto out_put_host;
54136 diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
54137 index 6dd0ff188bb4..aedf0b78f622 100644
54138 --- a/drivers/scsi/snic/snic_scsi.c
54139 +++ b/drivers/scsi/snic/snic_scsi.c
54140 @@ -2349,7 +2349,7 @@ snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc)
54142         /* Wait for all the IOs that are entered in Qcmd */
54143         while (atomic_read(&snic->ios_inflight))
54144 -               schedule_timeout(msecs_to_jiffies(1));
54145 +               schedule_msec_hrtimeout((1));
54147         ret = snic_issue_hba_reset(snic, sc);
54148         if (ret) {
54149 diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
54150 index 7de82f2c9757..d3489ac7ab28 100644
54151 --- a/drivers/scsi/sun3x_esp.c
54152 +++ b/drivers/scsi/sun3x_esp.c
54153 @@ -206,7 +206,9 @@ static int esp_sun3x_probe(struct platform_device *dev)
54154         if (!esp->command_block)
54155                 goto fail_unmap_regs_dma;
54157 -       host->irq = platform_get_irq(dev, 0);
54158 +       host->irq = err = platform_get_irq(dev, 0);
54159 +       if (err < 0)
54160 +               goto fail_unmap_command_block;
54161         err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
54162                           "SUN3X ESP", esp);
54163         if (err < 0)
54164 diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
54165 index 0aa58131e791..d0626773eb38 100644
54166 --- a/drivers/scsi/ufs/ufs-hisi.c
54167 +++ b/drivers/scsi/ufs/ufs-hisi.c
54168 @@ -467,21 +467,24 @@ static int ufs_hisi_init_common(struct ufs_hba *hba)
54169         host->hba = hba;
54170         ufshcd_set_variant(hba, host);
54172 -       host->rst  = devm_reset_control_get(dev, "rst");
54173 +       host->rst = devm_reset_control_get(dev, "rst");
54174         if (IS_ERR(host->rst)) {
54175                 dev_err(dev, "%s: failed to get reset control\n", __func__);
54176 -               return PTR_ERR(host->rst);
54177 +               err = PTR_ERR(host->rst);
54178 +               goto error;
54179         }
54181         ufs_hisi_set_pm_lvl(hba);
54183         err = ufs_hisi_get_resource(host);
54184 -       if (err) {
54185 -               ufshcd_set_variant(hba, NULL);
54186 -               return err;
54187 -       }
54188 +       if (err)
54189 +               goto error;
54191         return 0;
54193 +error:
54194 +       ufshcd_set_variant(hba, NULL);
54195 +       return err;
54198  static int ufs_hi3660_init(struct ufs_hba *hba)
54199 diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
54200 index 1a69949a4ea1..b56d9b4e5f03 100644
54201 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c
54202 +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
54203 @@ -377,7 +377,7 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
54205         irq = platform_get_irq(pdev, 0);
54206         if (irq < 0) {
54207 -               err = -ENODEV;
54208 +               err = irq;
54209                 goto out;
54210         }
54212 diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
54213 index d3d05e997c13..e1e510882ff4 100644
54214 --- a/drivers/scsi/ufs/ufshcd.c
54215 +++ b/drivers/scsi/ufs/ufshcd.c
54216 @@ -2849,7 +2849,7 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
54217   * ufshcd_exec_dev_cmd - API for sending device management requests
54218   * @hba: UFS hba
54219   * @cmd_type: specifies the type (NOP, Query...)
54220 - * @timeout: time in seconds
54221 + * @timeout: timeout in milliseconds
54222   *
54223   * NOTE: Since there is only one available tag for device management commands,
54224   * it is expected you hold the hba->dev_cmd.lock mutex.
54225 @@ -2879,6 +2879,9 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
54226         }
54227         tag = req->tag;
54228         WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
54229 +       /* Set the timeout such that the SCSI error handler is not activated. */
54230 +       req->timeout = msecs_to_jiffies(2 * timeout);
54231 +       blk_mq_start_request(req);
54233         init_completion(&wait);
54234         lrbp = &hba->lrb[tag];
54235 @@ -8599,7 +8602,7 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
54236         } else if (!ufshcd_is_ufs_dev_active(hba)) {
54237                 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
54238                 vcc_off = true;
54239 -               if (!ufshcd_is_link_active(hba)) {
54240 +               if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
54241                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
54242                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
54243                 }
54244 @@ -8621,7 +8624,7 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
54245             !hba->dev_info.is_lu_power_on_wp) {
54246                 ret = ufshcd_setup_vreg(hba, true);
54247         } else if (!ufshcd_is_ufs_dev_active(hba)) {
54248 -               if (!ret && !ufshcd_is_link_active(hba)) {
54249 +               if (!ufshcd_is_link_active(hba)) {
54250                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
54251                         if (ret)
54252                                 goto vcc_disable;
54253 @@ -8978,10 +8981,13 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
54254         if (!hba->is_powered)
54255                 return 0;
54257 +       cancel_delayed_work_sync(&hba->rpm_dev_flush_recheck_work);
54259         if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
54260              hba->curr_dev_pwr_mode) &&
54261             (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
54262              hba->uic_link_state) &&
54263 +            pm_runtime_suspended(hba->dev) &&
54264              !hba->dev_info.b_rpm_dev_flush_capable)
54265                 goto out;
54267 diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
54268 index 20acac6342ef..5828f94b8a7d 100644
54269 --- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
54270 +++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
54271 @@ -95,8 +95,10 @@ static ssize_t snoop_file_read(struct file *file, char __user *buffer,
54272                         return -EINTR;
54273         }
54274         ret = kfifo_to_user(&chan->fifo, buffer, count, &copied);
54275 +       if (ret)
54276 +               return ret;
54278 -       return ret ? ret : copied;
54279 +       return copied;
54282  static __poll_t snoop_file_poll(struct file *file,
54283 diff --git a/drivers/soc/mediatek/mt8173-pm-domains.h b/drivers/soc/mediatek/mt8173-pm-domains.h
54284 index 3e8ee5dabb43..654c717e5467 100644
54285 --- a/drivers/soc/mediatek/mt8173-pm-domains.h
54286 +++ b/drivers/soc/mediatek/mt8173-pm-domains.h
54287 @@ -12,24 +12,28 @@
54289  static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
54290         [MT8173_POWER_DOMAIN_VDEC] = {
54291 +               .name = "vdec",
54292                 .sta_mask = PWR_STATUS_VDEC,
54293                 .ctl_offs = SPM_VDE_PWR_CON,
54294                 .sram_pdn_bits = GENMASK(11, 8),
54295                 .sram_pdn_ack_bits = GENMASK(12, 12),
54296         },
54297         [MT8173_POWER_DOMAIN_VENC] = {
54298 +               .name = "venc",
54299                 .sta_mask = PWR_STATUS_VENC,
54300                 .ctl_offs = SPM_VEN_PWR_CON,
54301                 .sram_pdn_bits = GENMASK(11, 8),
54302                 .sram_pdn_ack_bits = GENMASK(15, 12),
54303         },
54304         [MT8173_POWER_DOMAIN_ISP] = {
54305 +               .name = "isp",
54306                 .sta_mask = PWR_STATUS_ISP,
54307                 .ctl_offs = SPM_ISP_PWR_CON,
54308                 .sram_pdn_bits = GENMASK(11, 8),
54309                 .sram_pdn_ack_bits = GENMASK(13, 12),
54310         },
54311         [MT8173_POWER_DOMAIN_MM] = {
54312 +               .name = "mm",
54313                 .sta_mask = PWR_STATUS_DISP,
54314                 .ctl_offs = SPM_DIS_PWR_CON,
54315                 .sram_pdn_bits = GENMASK(11, 8),
54316 @@ -40,18 +44,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
54317                 },
54318         },
54319         [MT8173_POWER_DOMAIN_VENC_LT] = {
54320 +               .name = "venc_lt",
54321                 .sta_mask = PWR_STATUS_VENC_LT,
54322                 .ctl_offs = SPM_VEN2_PWR_CON,
54323                 .sram_pdn_bits = GENMASK(11, 8),
54324                 .sram_pdn_ack_bits = GENMASK(15, 12),
54325         },
54326         [MT8173_POWER_DOMAIN_AUDIO] = {
54327 +               .name = "audio",
54328                 .sta_mask = PWR_STATUS_AUDIO,
54329                 .ctl_offs = SPM_AUDIO_PWR_CON,
54330                 .sram_pdn_bits = GENMASK(11, 8),
54331                 .sram_pdn_ack_bits = GENMASK(15, 12),
54332         },
54333         [MT8173_POWER_DOMAIN_USB] = {
54334 +               .name = "usb",
54335                 .sta_mask = PWR_STATUS_USB,
54336                 .ctl_offs = SPM_USB_PWR_CON,
54337                 .sram_pdn_bits = GENMASK(11, 8),
54338 @@ -59,18 +66,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
54339                 .caps = MTK_SCPD_ACTIVE_WAKEUP,
54340         },
54341         [MT8173_POWER_DOMAIN_MFG_ASYNC] = {
54342 +               .name = "mfg_async",
54343                 .sta_mask = PWR_STATUS_MFG_ASYNC,
54344                 .ctl_offs = SPM_MFG_ASYNC_PWR_CON,
54345                 .sram_pdn_bits = GENMASK(11, 8),
54346                 .sram_pdn_ack_bits = 0,
54347         },
54348         [MT8173_POWER_DOMAIN_MFG_2D] = {
54349 +               .name = "mfg_2d",
54350                 .sta_mask = PWR_STATUS_MFG_2D,
54351                 .ctl_offs = SPM_MFG_2D_PWR_CON,
54352                 .sram_pdn_bits = GENMASK(11, 8),
54353                 .sram_pdn_ack_bits = GENMASK(13, 12),
54354         },
54355         [MT8173_POWER_DOMAIN_MFG] = {
54356 +               .name = "mfg",
54357                 .sta_mask = PWR_STATUS_MFG,
54358                 .ctl_offs = SPM_MFG_PWR_CON,
54359                 .sram_pdn_bits = GENMASK(13, 8),
54360 diff --git a/drivers/soc/mediatek/mt8183-pm-domains.h b/drivers/soc/mediatek/mt8183-pm-domains.h
54361 index aa5230e6c12f..98a9940d05fb 100644
54362 --- a/drivers/soc/mediatek/mt8183-pm-domains.h
54363 +++ b/drivers/soc/mediatek/mt8183-pm-domains.h
54364 @@ -12,12 +12,14 @@
54366  static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
54367         [MT8183_POWER_DOMAIN_AUDIO] = {
54368 +               .name = "audio",
54369                 .sta_mask = PWR_STATUS_AUDIO,
54370                 .ctl_offs = 0x0314,
54371                 .sram_pdn_bits = GENMASK(11, 8),
54372                 .sram_pdn_ack_bits = GENMASK(15, 12),
54373         },
54374         [MT8183_POWER_DOMAIN_CONN] = {
54375 +               .name = "conn",
54376                 .sta_mask = PWR_STATUS_CONN,
54377                 .ctl_offs = 0x032c,
54378                 .sram_pdn_bits = 0,
54379 @@ -28,12 +30,14 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
54380                 },
54381         },
54382         [MT8183_POWER_DOMAIN_MFG_ASYNC] = {
54383 +               .name = "mfg_async",
54384                 .sta_mask = PWR_STATUS_MFG_ASYNC,
54385                 .ctl_offs = 0x0334,
54386                 .sram_pdn_bits = 0,
54387                 .sram_pdn_ack_bits = 0,
54388         },
54389         [MT8183_POWER_DOMAIN_MFG] = {
54390 +               .name = "mfg",
54391                 .sta_mask = PWR_STATUS_MFG,
54392                 .ctl_offs = 0x0338,
54393                 .sram_pdn_bits = GENMASK(8, 8),
54394 @@ -41,18 +45,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
54395                 .caps = MTK_SCPD_DOMAIN_SUPPLY,
54396         },
54397         [MT8183_POWER_DOMAIN_MFG_CORE0] = {
54398 +               .name = "mfg_core0",
54399                 .sta_mask = BIT(7),
54400                 .ctl_offs = 0x034c,
54401                 .sram_pdn_bits = GENMASK(8, 8),
54402                 .sram_pdn_ack_bits = GENMASK(12, 12),
54403         },
54404         [MT8183_POWER_DOMAIN_MFG_CORE1] = {
54405 +               .name = "mfg_core1",
54406                 .sta_mask = BIT(20),
54407                 .ctl_offs = 0x0310,
54408                 .sram_pdn_bits = GENMASK(8, 8),
54409                 .sram_pdn_ack_bits = GENMASK(12, 12),
54410         },
54411         [MT8183_POWER_DOMAIN_MFG_2D] = {
54412 +               .name = "mfg_2d",
54413                 .sta_mask = PWR_STATUS_MFG_2D,
54414                 .ctl_offs = 0x0348,
54415                 .sram_pdn_bits = GENMASK(8, 8),
54416 @@ -65,6 +72,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
54417                 },
54418         },
54419         [MT8183_POWER_DOMAIN_DISP] = {
54420 +               .name = "disp",
54421                 .sta_mask = PWR_STATUS_DISP,
54422                 .ctl_offs = 0x030c,
54423                 .sram_pdn_bits = GENMASK(8, 8),
54424 @@ -83,6 +91,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
54425                 },
54426         },
54427         [MT8183_POWER_DOMAIN_CAM] = {
54428 +               .name = "cam",
54429                 .sta_mask = BIT(25),
54430                 .ctl_offs = 0x0344,
54431                 .sram_pdn_bits = GENMASK(9, 8),
54432 @@ -105,6 +114,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
54433                 },
54434         },
54435         [MT8183_POWER_DOMAIN_ISP] = {
54436 +               .name = "isp",
54437                 .sta_mask = PWR_STATUS_ISP,
54438                 .ctl_offs = 0x0308,
54439                 .sram_pdn_bits = GENMASK(9, 8),
54440 @@ -127,6 +137,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
54441                 },
54442         },
54443         [MT8183_POWER_DOMAIN_VDEC] = {
54444 +               .name = "vdec",
54445                 .sta_mask = BIT(31),
54446                 .ctl_offs = 0x0300,
54447                 .sram_pdn_bits = GENMASK(8, 8),
54448 @@ -139,6 +150,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
54449                 },
54450         },
54451         [MT8183_POWER_DOMAIN_VENC] = {
54452 +               .name = "venc",
54453                 .sta_mask = PWR_STATUS_VENC,
54454                 .ctl_offs = 0x0304,
54455                 .sram_pdn_bits = GENMASK(11, 8),
54456 @@ -151,6 +163,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
54457                 },
54458         },
54459         [MT8183_POWER_DOMAIN_VPU_TOP] = {
54460 +               .name = "vpu_top",
54461                 .sta_mask = BIT(26),
54462                 .ctl_offs = 0x0324,
54463                 .sram_pdn_bits = GENMASK(8, 8),
54464 @@ -177,6 +190,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
54465                 },
54466         },
54467         [MT8183_POWER_DOMAIN_VPU_CORE0] = {
54468 +               .name = "vpu_core0",
54469                 .sta_mask = BIT(27),
54470                 .ctl_offs = 0x33c,
54471                 .sram_pdn_bits = GENMASK(11, 8),
54472 @@ -194,6 +208,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
54473                 .caps = MTK_SCPD_SRAM_ISO,
54474         },
54475         [MT8183_POWER_DOMAIN_VPU_CORE1] = {
54476 +               .name = "vpu_core1",
54477                 .sta_mask = BIT(28),
54478                 .ctl_offs = 0x0340,
54479                 .sram_pdn_bits = GENMASK(11, 8),
54480 diff --git a/drivers/soc/mediatek/mt8192-pm-domains.h b/drivers/soc/mediatek/mt8192-pm-domains.h
54481 index 0fdf6dc6231f..543dda70de01 100644
54482 --- a/drivers/soc/mediatek/mt8192-pm-domains.h
54483 +++ b/drivers/soc/mediatek/mt8192-pm-domains.h
54484 @@ -12,6 +12,7 @@
54486  static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
54487         [MT8192_POWER_DOMAIN_AUDIO] = {
54488 +               .name = "audio",
54489                 .sta_mask = BIT(21),
54490                 .ctl_offs = 0x0354,
54491                 .sram_pdn_bits = GENMASK(8, 8),
54492 @@ -24,6 +25,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
54493                 },
54494         },
54495         [MT8192_POWER_DOMAIN_CONN] = {
54496 +               .name = "conn",
54497                 .sta_mask = PWR_STATUS_CONN,
54498                 .ctl_offs = 0x0304,
54499                 .sram_pdn_bits = 0,
54500 @@ -45,12 +47,14 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
54501                 .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
54502         },
54503         [MT8192_POWER_DOMAIN_MFG0] = {
54504 +               .name = "mfg0",
54505                 .sta_mask = BIT(2),
54506                 .ctl_offs = 0x0308,
54507                 .sram_pdn_bits = GENMASK(8, 8),
54508                 .sram_pdn_ack_bits = GENMASK(12, 12),
54509         },
54510         [MT8192_POWER_DOMAIN_MFG1] = {
54511 +               .name = "mfg1",
54512                 .sta_mask = BIT(3),
54513                 .ctl_offs = 0x030c,
54514                 .sram_pdn_bits = GENMASK(8, 8),
54515 @@ -75,36 +79,42 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
54516                 },
54517         },
54518         [MT8192_POWER_DOMAIN_MFG2] = {
54519 +               .name = "mfg2",
54520                 .sta_mask = BIT(4),
54521                 .ctl_offs = 0x0310,
54522                 .sram_pdn_bits = GENMASK(8, 8),
54523                 .sram_pdn_ack_bits = GENMASK(12, 12),
54524         },
54525         [MT8192_POWER_DOMAIN_MFG3] = {
54526 +               .name = "mfg3",
54527                 .sta_mask = BIT(5),
54528                 .ctl_offs = 0x0314,
54529                 .sram_pdn_bits = GENMASK(8, 8),
54530                 .sram_pdn_ack_bits = GENMASK(12, 12),
54531         },
54532         [MT8192_POWER_DOMAIN_MFG4] = {
54533 +               .name = "mfg4",
54534                 .sta_mask = BIT(6),
54535                 .ctl_offs = 0x0318,
54536                 .sram_pdn_bits = GENMASK(8, 8),
54537                 .sram_pdn_ack_bits = GENMASK(12, 12),
54538         },
54539         [MT8192_POWER_DOMAIN_MFG5] = {
54540 +               .name = "mfg5",
54541                 .sta_mask = BIT(7),
54542                 .ctl_offs = 0x031c,
54543                 .sram_pdn_bits = GENMASK(8, 8),
54544                 .sram_pdn_ack_bits = GENMASK(12, 12),
54545         },
54546         [MT8192_POWER_DOMAIN_MFG6] = {
54547 +               .name = "mfg6",
54548                 .sta_mask = BIT(8),
54549                 .ctl_offs = 0x0320,
54550                 .sram_pdn_bits = GENMASK(8, 8),
54551                 .sram_pdn_ack_bits = GENMASK(12, 12),
54552         },
54553         [MT8192_POWER_DOMAIN_DISP] = {
54554 +               .name = "disp",
54555                 .sta_mask = BIT(20),
54556                 .ctl_offs = 0x0350,
54557                 .sram_pdn_bits = GENMASK(8, 8),
54558 @@ -133,6 +143,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
54559                 },
54560         },
54561         [MT8192_POWER_DOMAIN_IPE] = {
54562 +               .name = "ipe",
54563                 .sta_mask = BIT(14),
54564                 .ctl_offs = 0x0338,
54565                 .sram_pdn_bits = GENMASK(8, 8),
54566 @@ -149,6 +160,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
54567                 },
54568         },
54569         [MT8192_POWER_DOMAIN_ISP] = {
54570 +               .name = "isp",
54571                 .sta_mask = BIT(12),
54572                 .ctl_offs = 0x0330,
54573                 .sram_pdn_bits = GENMASK(8, 8),
54574 @@ -165,6 +177,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
54575                 },
54576         },
54577         [MT8192_POWER_DOMAIN_ISP2] = {
54578 +               .name = "isp2",
54579                 .sta_mask = BIT(13),
54580                 .ctl_offs = 0x0334,
54581                 .sram_pdn_bits = GENMASK(8, 8),
54582 @@ -181,6 +194,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
54583                 },
54584         },
54585         [MT8192_POWER_DOMAIN_MDP] = {
54586 +               .name = "mdp",
54587                 .sta_mask = BIT(19),
54588                 .ctl_offs = 0x034c,
54589                 .sram_pdn_bits = GENMASK(8, 8),
54590 @@ -197,6 +211,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
54591                 },
54592         },
54593         [MT8192_POWER_DOMAIN_VENC] = {
54594 +               .name = "venc",
54595                 .sta_mask = BIT(17),
54596                 .ctl_offs = 0x0344,
54597                 .sram_pdn_bits = GENMASK(8, 8),
54598 @@ -213,6 +228,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
54599                 },
54600         },
54601         [MT8192_POWER_DOMAIN_VDEC] = {
54602 +               .name = "vdec",
54603                 .sta_mask = BIT(15),
54604                 .ctl_offs = 0x033c,
54605                 .sram_pdn_bits = GENMASK(8, 8),
54606 @@ -229,12 +245,14 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
54607                 },
54608         },
54609         [MT8192_POWER_DOMAIN_VDEC2] = {
54610 +               .name = "vdec2",
54611                 .sta_mask = BIT(16),
54612                 .ctl_offs = 0x0340,
54613                 .sram_pdn_bits = GENMASK(8, 8),
54614                 .sram_pdn_ack_bits = GENMASK(12, 12),
54615         },
54616         [MT8192_POWER_DOMAIN_CAM] = {
54617 +               .name = "cam",
54618                 .sta_mask = BIT(23),
54619                 .ctl_offs = 0x035c,
54620                 .sram_pdn_bits = GENMASK(8, 8),
54621 @@ -263,18 +281,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
54622                 },
54623         },
54624         [MT8192_POWER_DOMAIN_CAM_RAWA] = {
54625 +               .name = "cam_rawa",
54626                 .sta_mask = BIT(24),
54627                 .ctl_offs = 0x0360,
54628                 .sram_pdn_bits = GENMASK(8, 8),
54629                 .sram_pdn_ack_bits = GENMASK(12, 12),
54630         },
54631         [MT8192_POWER_DOMAIN_CAM_RAWB] = {
54632 +               .name = "cam_rawb",
54633                 .sta_mask = BIT(25),
54634                 .ctl_offs = 0x0364,
54635                 .sram_pdn_bits = GENMASK(8, 8),
54636                 .sram_pdn_ack_bits = GENMASK(12, 12),
54637         },
54638         [MT8192_POWER_DOMAIN_CAM_RAWC] = {
54639 +               .name = "cam_rawc",
54640                 .sta_mask = BIT(26),
54641                 .ctl_offs = 0x0368,
54642                 .sram_pdn_bits = GENMASK(8, 8),
54643 diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
54644 index b7f697666bdd..0af00efa0ef8 100644
54645 --- a/drivers/soc/mediatek/mtk-pm-domains.c
54646 +++ b/drivers/soc/mediatek/mtk-pm-domains.c
54647 @@ -438,7 +438,11 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
54648                 goto err_unprepare_subsys_clocks;
54649         }
54651 -       pd->genpd.name = node->name;
54652 +       if (!pd->data->name)
54653 +               pd->genpd.name = node->name;
54654 +       else
54655 +               pd->genpd.name = pd->data->name;
54657         pd->genpd.power_off = scpsys_power_off;
54658         pd->genpd.power_on = scpsys_power_on;
54660 @@ -487,8 +491,9 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren
54662                 child_pd = scpsys_add_one_domain(scpsys, child);
54663                 if (IS_ERR(child_pd)) {
54664 -                       dev_err_probe(scpsys->dev, PTR_ERR(child_pd),
54665 -                                     "%pOF: failed to get child domain id\n", child);
54666 +                       ret = PTR_ERR(child_pd);
54667 +                       dev_err_probe(scpsys->dev, ret, "%pOF: failed to get child domain id\n",
54668 +                                     child);
54669                         goto err_put_node;
54670                 }
54672 diff --git a/drivers/soc/mediatek/mtk-pm-domains.h b/drivers/soc/mediatek/mtk-pm-domains.h
54673 index 141dc76054e6..21a4e113bbec 100644
54674 --- a/drivers/soc/mediatek/mtk-pm-domains.h
54675 +++ b/drivers/soc/mediatek/mtk-pm-domains.h
54676 @@ -76,6 +76,7 @@ struct scpsys_bus_prot_data {
54678  /**
54679   * struct scpsys_domain_data - scp domain data for power on/off flow
54680 + * @name: The name of the power domain.
54681   * @sta_mask: The mask for power on/off status bit.
54682   * @ctl_offs: The offset for main power control register.
54683   * @sram_pdn_bits: The mask for sram power control bits.
54684 @@ -85,6 +86,7 @@ struct scpsys_bus_prot_data {
54685   * @bp_smi: bus protection for smi subsystem
54686   */
54687  struct scpsys_domain_data {
54688 +       const char *name;
54689         u32 sta_mask;
54690         int ctl_offs;
54691         u32 sram_pdn_bits;
54692 diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
54693 index 24cd193dec55..eba7f76f9d61 100644
54694 --- a/drivers/soc/qcom/mdt_loader.c
54695 +++ b/drivers/soc/qcom/mdt_loader.c
54696 @@ -230,6 +230,14 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
54697                         break;
54698                 }
54700 +               if (phdr->p_filesz > phdr->p_memsz) {
54701 +                       dev_err(dev,
54702 +                               "refusing to load segment %d with p_filesz > p_memsz\n",
54703 +                               i);
54704 +                       ret = -EINVAL;
54705 +                       break;
54706 +               }
54708                 ptr = mem_region + offset;
54710                 if (phdr->p_filesz && phdr->p_offset < fw->size) {
54711 @@ -253,6 +261,15 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
54712                                 break;
54713                         }
54715 +                       if (seg_fw->size != phdr->p_filesz) {
54716 +                               dev_err(dev,
54717 +                                       "failed to load segment %d from truncated file %s\n",
54718 +                                       i, fw_name);
54719 +                               release_firmware(seg_fw);
54720 +                               ret = -EINVAL;
54721 +                               break;
54722 +                       }
54724                         release_firmware(seg_fw);
54725                 }
54727 diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
54728 index 209dcdca923f..915d5bc3d46e 100644
54729 --- a/drivers/soc/qcom/pdr_interface.c
54730 +++ b/drivers/soc/qcom/pdr_interface.c
54731 @@ -153,7 +153,7 @@ static int pdr_register_listener(struct pdr_handle *pdr,
54732         if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
54733                 pr_err("PDR: %s register listener failed: 0x%x\n",
54734                        pds->service_path, resp.resp.error);
54735 -               return ret;
54736 +               return -EREMOTEIO;
54737         }
54739         pds->state = resp.curr_state;
54740 diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
54741 index df9a5ca8c99c..0118bd986f90 100644
54742 --- a/drivers/soc/tegra/pmc.c
54743 +++ b/drivers/soc/tegra/pmc.c
54744 @@ -317,6 +317,8 @@ struct tegra_pmc_soc {
54745                                    bool invert);
54746         int (*irq_set_wake)(struct irq_data *data, unsigned int on);
54747         int (*irq_set_type)(struct irq_data *data, unsigned int type);
54748 +       int (*powergate_set)(struct tegra_pmc *pmc, unsigned int id,
54749 +                            bool new_state);
54751         const char * const *reset_sources;
54752         unsigned int num_reset_sources;
54753 @@ -517,6 +519,63 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
54754         return -ENODEV;
54757 +static int tegra20_powergate_set(struct tegra_pmc *pmc, unsigned int id,
54758 +                                bool new_state)
54760 +       unsigned int retries = 100;
54761 +       bool status;
54762 +       int ret;
54764 +       /*
54765 +        * As per TRM documentation, the toggle command will be dropped by PMC
54766 +        * if there is contention with a HW-initiated toggling (i.e. CPU core
54767 +        * power-gated), the command should be retried in that case.
54768 +        */
54769 +       do {
54770 +               tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
54772 +               /* wait for PMC to execute the command */
54773 +               ret = readx_poll_timeout(tegra_powergate_state, id, status,
54774 +                                        status == new_state, 1, 10);
54775 +       } while (ret == -ETIMEDOUT && retries--);
54777 +       return ret;
54780 +static inline bool tegra_powergate_toggle_ready(struct tegra_pmc *pmc)
54782 +       return !(tegra_pmc_readl(pmc, PWRGATE_TOGGLE) & PWRGATE_TOGGLE_START);
54785 +static int tegra114_powergate_set(struct tegra_pmc *pmc, unsigned int id,
54786 +                                 bool new_state)
54788 +       bool status;
54789 +       int err;
54791 +       /* wait while PMC power gating is contended */
54792 +       err = readx_poll_timeout(tegra_powergate_toggle_ready, pmc, status,
54793 +                                status == true, 1, 100);
54794 +       if (err)
54795 +               return err;
54797 +       tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
54799 +       /* wait for PMC to accept the command */
54800 +       err = readx_poll_timeout(tegra_powergate_toggle_ready, pmc, status,
54801 +                                status == true, 1, 100);
54802 +       if (err)
54803 +               return err;
54805 +       /* wait for PMC to execute the command */
54806 +       err = readx_poll_timeout(tegra_powergate_state, id, status,
54807 +                                status == new_state, 10, 100000);
54808 +       if (err)
54809 +               return err;
54811 +       return 0;
54814  /**
54815   * tegra_powergate_set() - set the state of a partition
54816   * @pmc: power management controller
54817 @@ -526,7 +585,6 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
54818  static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id,
54819                                bool new_state)
54821 -       bool status;
54822         int err;
54824         if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps)
54825 @@ -539,10 +597,7 @@ static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id,
54826                 return 0;
54827         }
54829 -       tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
54831 -       err = readx_poll_timeout(tegra_powergate_state, id, status,
54832 -                                status == new_state, 10, 100000);
54833 +       err = pmc->soc->powergate_set(pmc, id, new_state);
54835         mutex_unlock(&pmc->powergates_lock);
54837 @@ -2699,6 +2754,7 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = {
54838         .regs = &tegra20_pmc_regs,
54839         .init = tegra20_pmc_init,
54840         .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
54841 +       .powergate_set = tegra20_powergate_set,
54842         .reset_sources = NULL,
54843         .num_reset_sources = 0,
54844         .reset_levels = NULL,
54845 @@ -2757,6 +2813,7 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
54846         .regs = &tegra20_pmc_regs,
54847         .init = tegra20_pmc_init,
54848         .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
54849 +       .powergate_set = tegra20_powergate_set,
54850         .reset_sources = tegra30_reset_sources,
54851         .num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
54852         .reset_levels = NULL,
54853 @@ -2811,6 +2868,7 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = {
54854         .regs = &tegra20_pmc_regs,
54855         .init = tegra20_pmc_init,
54856         .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
54857 +       .powergate_set = tegra114_powergate_set,
54858         .reset_sources = tegra30_reset_sources,
54859         .num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
54860         .reset_levels = NULL,
54861 @@ -2925,6 +2983,7 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
54862         .regs = &tegra20_pmc_regs,
54863         .init = tegra20_pmc_init,
54864         .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
54865 +       .powergate_set = tegra114_powergate_set,
54866         .reset_sources = tegra30_reset_sources,
54867         .num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
54868         .reset_levels = NULL,
54869 @@ -3048,6 +3107,7 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
54870         .regs = &tegra20_pmc_regs,
54871         .init = tegra20_pmc_init,
54872         .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
54873 +       .powergate_set = tegra114_powergate_set,
54874         .irq_set_wake = tegra210_pmc_irq_set_wake,
54875         .irq_set_type = tegra210_pmc_irq_set_type,
54876         .reset_sources = tegra210_reset_sources,
54877 diff --git a/drivers/soc/tegra/regulators-tegra30.c b/drivers/soc/tegra/regulators-tegra30.c
54878 index 7f21f31de09d..0e776b20f625 100644
54879 --- a/drivers/soc/tegra/regulators-tegra30.c
54880 +++ b/drivers/soc/tegra/regulators-tegra30.c
54881 @@ -178,7 +178,7 @@ static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
54882          * survive the voltage drop if it's running on a higher frequency.
54883          */
54884         if (!cpu_min_uV_consumers)
54885 -               cpu_min_uV = cpu_uV;
54886 +               cpu_min_uV = max(cpu_uV, cpu_min_uV);
54888         /*
54889          * Bootloader shall set up voltages correctly, but if it
54890 diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
54891 index 46885429928a..4ec29338ce9a 100644
54892 --- a/drivers/soundwire/bus.c
54893 +++ b/drivers/soundwire/bus.c
54894 @@ -705,7 +705,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
54895         struct sdw_slave *slave, *_s;
54896         struct sdw_slave_id id;
54897         struct sdw_msg msg;
54898 -       bool found = false;
54899 +       bool found;
54900         int count = 0, ret;
54901         u64 addr;
54903 @@ -737,6 +737,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
54905                 sdw_extract_slave_id(bus, addr, &id);
54907 +               found = false;
54908                 /* Now compare with entries */
54909                 list_for_each_entry_safe(slave, _s, &bus->slaves, node) {
54910                         if (sdw_compare_devid(slave, id) == 0) {
54911 diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
54912 index d05442e646a3..57c59a33ce61 100644
54913 --- a/drivers/soundwire/cadence_master.c
54914 +++ b/drivers/soundwire/cadence_master.c
54915 @@ -1450,10 +1450,12 @@ int sdw_cdns_clock_stop(struct sdw_cdns *cdns, bool block_wake)
54916         }
54918         /* Prepare slaves for clock stop */
54919 -       ret = sdw_bus_prep_clk_stop(&cdns->bus);
54920 -       if (ret < 0) {
54921 -               dev_err(cdns->dev, "prepare clock stop failed %d", ret);
54922 -               return ret;
54923 +       if (slave_present) {
54924 +               ret = sdw_bus_prep_clk_stop(&cdns->bus);
54925 +               if (ret < 0 && ret != -ENODATA) {
54926 +                       dev_err(cdns->dev, "prepare clock stop failed %d\n", ret);
54927 +                       return ret;
54928 +               }
54929         }
54931         /*
54932 diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
54933 index 1099b5d1262b..a418c3c7001c 100644
54934 --- a/drivers/soundwire/stream.c
54935 +++ b/drivers/soundwire/stream.c
54936 @@ -1375,8 +1375,16 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
54937         }
54939         ret = sdw_config_stream(&slave->dev, stream, stream_config, true);
54940 -       if (ret)
54941 +       if (ret) {
54942 +               /*
54943 +                * sdw_release_master_stream will release s_rt in slave_rt_list in
54944 +                * stream_error case, but s_rt is only added to slave_rt_list
54945 +                * when sdw_config_stream is successful, so free s_rt explicitly
54946 +                * when sdw_config_stream is failed.
54947 +                */
54948 +               kfree(s_rt);
54949                 goto stream_error;
54950 +       }
54952         list_add_tail(&s_rt->m_rt_node, &m_rt->slave_rt_list);
54954 diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
54955 index eb9a243e9526..98ace748cd98 100644
54956 --- a/drivers/spi/spi-ath79.c
54957 +++ b/drivers/spi/spi-ath79.c
54958 @@ -156,8 +156,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
54960         master->use_gpio_descriptors = true;
54961         master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
54962 -       master->setup = spi_bitbang_setup;
54963 -       master->cleanup = spi_bitbang_cleanup;
54964 +       master->flags = SPI_MASTER_GPIO_SS;
54965         if (pdata) {
54966                 master->bus_num = pdata->bus_num;
54967                 master->num_chipselect = pdata->num_chipselect;
54968 diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c
54969 index 75b33d7d14b0..9a4d942fafcf 100644
54970 --- a/drivers/spi/spi-dln2.c
54971 +++ b/drivers/spi/spi-dln2.c
54972 @@ -780,7 +780,7 @@ static int dln2_spi_probe(struct platform_device *pdev)
54974  static int dln2_spi_remove(struct platform_device *pdev)
54976 -       struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
54977 +       struct spi_master *master = platform_get_drvdata(pdev);
54978         struct dln2_spi *dln2 = spi_master_get_devdata(master);
54980         pm_runtime_disable(&pdev->dev);
54981 diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
54982 index a2886ee44e4c..5d98611dd999 100644
54983 --- a/drivers/spi/spi-fsl-lpspi.c
54984 +++ b/drivers/spi/spi-fsl-lpspi.c
54985 @@ -200,7 +200,7 @@ static int lpspi_prepare_xfer_hardware(struct spi_controller *controller)
54986                                 spi_controller_get_devdata(controller);
54987         int ret;
54989 -       ret = pm_runtime_get_sync(fsl_lpspi->dev);
54990 +       ret = pm_runtime_resume_and_get(fsl_lpspi->dev);
54991         if (ret < 0) {
54992                 dev_err(fsl_lpspi->dev, "failed to enable clock\n");
54993                 return ret;
54994 diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
54995 index e4a8d203f940..d0e5aa18b7ba 100644
54996 --- a/drivers/spi/spi-fsl-spi.c
54997 +++ b/drivers/spi/spi-fsl-spi.c
54998 @@ -707,6 +707,11 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
54999         struct resource mem;
55000         int irq, type;
55001         int ret;
55002 +       bool spisel_boot = false;
55003 +#if IS_ENABLED(CONFIG_FSL_SOC)
55004 +       struct mpc8xxx_spi_probe_info *pinfo = NULL;
55005 +#endif
55008         ret = of_mpc8xxx_spi_probe(ofdev);
55009         if (ret)
55010 @@ -715,9 +720,8 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
55011         type = fsl_spi_get_type(&ofdev->dev);
55012         if (type == TYPE_FSL) {
55013                 struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
55014 -               bool spisel_boot = false;
55015  #if IS_ENABLED(CONFIG_FSL_SOC)
55016 -               struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
55017 +               pinfo = to_of_pinfo(pdata);
55019                 spisel_boot = of_property_read_bool(np, "fsl,spisel_boot");
55020                 if (spisel_boot) {
55021 @@ -746,15 +750,24 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
55023         ret = of_address_to_resource(np, 0, &mem);
55024         if (ret)
55025 -               return ret;
55026 +               goto unmap_out;
55028         irq = platform_get_irq(ofdev, 0);
55029 -       if (irq < 0)
55030 -               return irq;
55031 +       if (irq < 0) {
55032 +               ret = irq;
55033 +               goto unmap_out;
55034 +       }
55036         master = fsl_spi_probe(dev, &mem, irq);
55038         return PTR_ERR_OR_ZERO(master);
55040 +unmap_out:
55041 +#if IS_ENABLED(CONFIG_FSL_SOC)
55042 +       if (spisel_boot)
55043 +               iounmap(pinfo->immr_spi_cs);
55044 +#endif
55045 +       return ret;
55048  static int of_fsl_spi_remove(struct platform_device *ofdev)
55049 diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
55050 index 36a4922a134a..ccd817ee4917 100644
55051 --- a/drivers/spi/spi-omap-100k.c
55052 +++ b/drivers/spi/spi-omap-100k.c
55053 @@ -424,7 +424,7 @@ static int omap1_spi100k_probe(struct platform_device *pdev)
55055  static int omap1_spi100k_remove(struct platform_device *pdev)
55057 -       struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
55058 +       struct spi_master *master = platform_get_drvdata(pdev);
55059         struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
55061         pm_runtime_disable(&pdev->dev);
55062 @@ -438,7 +438,7 @@ static int omap1_spi100k_remove(struct platform_device *pdev)
55063  #ifdef CONFIG_PM
55064  static int omap1_spi100k_runtime_suspend(struct device *dev)
55066 -       struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
55067 +       struct spi_master *master = dev_get_drvdata(dev);
55068         struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
55070         clk_disable_unprepare(spi100k->ick);
55071 @@ -449,7 +449,7 @@ static int omap1_spi100k_runtime_suspend(struct device *dev)
55073  static int omap1_spi100k_runtime_resume(struct device *dev)
55075 -       struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
55076 +       struct spi_master *master = dev_get_drvdata(dev);
55077         struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
55078         int ret;
55080 diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
55081 index 8dcb2e70735c..d39dec6d1c91 100644
55082 --- a/drivers/spi/spi-qup.c
55083 +++ b/drivers/spi/spi-qup.c
55084 @@ -1263,7 +1263,7 @@ static int spi_qup_remove(struct platform_device *pdev)
55085         struct spi_qup *controller = spi_master_get_devdata(master);
55086         int ret;
55088 -       ret = pm_runtime_get_sync(&pdev->dev);
55089 +       ret = pm_runtime_resume_and_get(&pdev->dev);
55090         if (ret < 0)
55091                 return ret;
55093 diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
55094 index 936ef54e0903..0d75080da648 100644
55095 --- a/drivers/spi/spi-rockchip.c
55096 +++ b/drivers/spi/spi-rockchip.c
55097 @@ -476,7 +476,7 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
55098         return 1;
55101 -static void rockchip_spi_config(struct rockchip_spi *rs,
55102 +static int rockchip_spi_config(struct rockchip_spi *rs,
55103                 struct spi_device *spi, struct spi_transfer *xfer,
55104                 bool use_dma, bool slave_mode)
55106 @@ -521,7 +521,9 @@ static void rockchip_spi_config(struct rockchip_spi *rs,
55107                  * ctlr->bits_per_word_mask, so this shouldn't
55108                  * happen
55109                  */
55110 -               unreachable();
55111 +               dev_err(rs->dev, "unknown bits per word: %d\n",
55112 +                       xfer->bits_per_word);
55113 +               return -EINVAL;
55114         }
55116         if (use_dma) {
55117 @@ -554,6 +556,8 @@ static void rockchip_spi_config(struct rockchip_spi *rs,
55118          */
55119         writel_relaxed(2 * DIV_ROUND_UP(rs->freq, 2 * xfer->speed_hz),
55120                         rs->regs + ROCKCHIP_SPI_BAUDR);
55122 +       return 0;
55125  static size_t rockchip_spi_max_transfer_size(struct spi_device *spi)
55126 @@ -577,6 +581,7 @@ static int rockchip_spi_transfer_one(
55127                 struct spi_transfer *xfer)
55129         struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
55130 +       int ret;
55131         bool use_dma;
55133         WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
55134 @@ -596,7 +601,9 @@ static int rockchip_spi_transfer_one(
55136         use_dma = ctlr->can_dma ? ctlr->can_dma(ctlr, spi, xfer) : false;
55138 -       rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave);
55139 +       ret = rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave);
55140 +       if (ret)
55141 +               return ret;
55143         if (use_dma)
55144                 return rockchip_spi_prepare_dma(rs, ctlr, xfer);
55145 diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
55146 index 947e6b9dc9f4..2786470a5201 100644
55147 --- a/drivers/spi/spi-stm32-qspi.c
55148 +++ b/drivers/spi/spi-stm32-qspi.c
55149 @@ -727,21 +727,31 @@ static int __maybe_unused stm32_qspi_suspend(struct device *dev)
55151         pinctrl_pm_select_sleep_state(dev);
55153 -       return 0;
55154 +       return pm_runtime_force_suspend(dev);
55157  static int __maybe_unused stm32_qspi_resume(struct device *dev)
55159         struct stm32_qspi *qspi = dev_get_drvdata(dev);
55160 +       int ret;
55162 +       ret = pm_runtime_force_resume(dev);
55163 +       if (ret < 0)
55164 +               return ret;
55166         pinctrl_pm_select_default_state(dev);
55167 -       clk_prepare_enable(qspi->clk);
55169 +       ret = pm_runtime_get_sync(dev);
55170 +       if (ret < 0) {
55171 +               pm_runtime_put_noidle(dev);
55172 +               return ret;
55173 +       }
55175         writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
55176         writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
55178 -       pm_runtime_mark_last_busy(qspi->dev);
55179 -       pm_runtime_put_autosuspend(qspi->dev);
55180 +       pm_runtime_mark_last_busy(dev);
55181 +       pm_runtime_put_autosuspend(dev);
55183         return 0;
55185 diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
55186 index 25c076461011..7f0244a246e9 100644
55187 --- a/drivers/spi/spi-stm32.c
55188 +++ b/drivers/spi/spi-stm32.c
55189 @@ -1803,7 +1803,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
55190         struct reset_control *rst;
55191         int ret;
55193 -       master = spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
55194 +       master = devm_spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
55195         if (!master) {
55196                 dev_err(&pdev->dev, "spi master allocation failed\n");
55197                 return -ENOMEM;
55198 @@ -1821,18 +1821,16 @@ static int stm32_spi_probe(struct platform_device *pdev)
55200         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
55201         spi->base = devm_ioremap_resource(&pdev->dev, res);
55202 -       if (IS_ERR(spi->base)) {
55203 -               ret = PTR_ERR(spi->base);
55204 -               goto err_master_put;
55205 -       }
55206 +       if (IS_ERR(spi->base))
55207 +               return PTR_ERR(spi->base);
55209         spi->phys_addr = (dma_addr_t)res->start;
55211         spi->irq = platform_get_irq(pdev, 0);
55212 -       if (spi->irq <= 0) {
55213 -               ret = dev_err_probe(&pdev->dev, spi->irq, "failed to get irq\n");
55214 -               goto err_master_put;
55215 -       }
55216 +       if (spi->irq <= 0)
55217 +               return dev_err_probe(&pdev->dev, spi->irq,
55218 +                                    "failed to get irq\n");
55220         ret = devm_request_threaded_irq(&pdev->dev, spi->irq,
55221                                         spi->cfg->irq_handler_event,
55222                                         spi->cfg->irq_handler_thread,
55223 @@ -1840,20 +1838,20 @@ static int stm32_spi_probe(struct platform_device *pdev)
55224         if (ret) {
55225                 dev_err(&pdev->dev, "irq%d request failed: %d\n", spi->irq,
55226                         ret);
55227 -               goto err_master_put;
55228 +               return ret;
55229         }
55231         spi->clk = devm_clk_get(&pdev->dev, NULL);
55232         if (IS_ERR(spi->clk)) {
55233                 ret = PTR_ERR(spi->clk);
55234                 dev_err(&pdev->dev, "clk get failed: %d\n", ret);
55235 -               goto err_master_put;
55236 +               return ret;
55237         }
55239         ret = clk_prepare_enable(spi->clk);
55240         if (ret) {
55241                 dev_err(&pdev->dev, "clk enable failed: %d\n", ret);
55242 -               goto err_master_put;
55243 +               return ret;
55244         }
55245         spi->clk_rate = clk_get_rate(spi->clk);
55246         if (!spi->clk_rate) {
55247 @@ -1929,7 +1927,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
55248         pm_runtime_set_active(&pdev->dev);
55249         pm_runtime_enable(&pdev->dev);
55251 -       ret = devm_spi_register_master(&pdev->dev, master);
55252 +       ret = spi_register_master(master);
55253         if (ret) {
55254                 dev_err(&pdev->dev, "spi master registration failed: %d\n",
55255                         ret);
55256 @@ -1949,8 +1947,6 @@ static int stm32_spi_probe(struct platform_device *pdev)
55257                 dma_release_channel(spi->dma_rx);
55258  err_clk_disable:
55259         clk_disable_unprepare(spi->clk);
55260 -err_master_put:
55261 -       spi_master_put(master);
55263         return ret;
55265 @@ -1960,6 +1956,7 @@ static int stm32_spi_remove(struct platform_device *pdev)
55266         struct spi_master *master = platform_get_drvdata(pdev);
55267         struct stm32_spi *spi = spi_master_get_devdata(master);
55269 +       spi_unregister_master(master);
55270         spi->cfg->disable(spi);
55272         if (master->dma_tx)
55273 diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
55274 index 9417385c0921..e06aafe169e0 100644
55275 --- a/drivers/spi/spi-ti-qspi.c
55276 +++ b/drivers/spi/spi-ti-qspi.c
55277 @@ -733,6 +733,17 @@ static int ti_qspi_runtime_resume(struct device *dev)
55278         return 0;
55281 +static void ti_qspi_dma_cleanup(struct ti_qspi *qspi)
55283 +       if (qspi->rx_bb_addr)
55284 +               dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE,
55285 +                                 qspi->rx_bb_addr,
55286 +                                 qspi->rx_bb_dma_addr);
55288 +       if (qspi->rx_chan)
55289 +               dma_release_channel(qspi->rx_chan);
55292  static const struct of_device_id ti_qspi_match[] = {
55293         {.compatible = "ti,dra7xxx-qspi" },
55294         {.compatible = "ti,am4372-qspi" },
55295 @@ -886,6 +897,8 @@ static int ti_qspi_probe(struct platform_device *pdev)
55296         if (!ret)
55297                 return 0;
55299 +       ti_qspi_dma_cleanup(qspi);
55301         pm_runtime_disable(&pdev->dev);
55302  free_master:
55303         spi_master_put(master);
55304 @@ -904,12 +917,7 @@ static int ti_qspi_remove(struct platform_device *pdev)
55305         pm_runtime_put_sync(&pdev->dev);
55306         pm_runtime_disable(&pdev->dev);
55308 -       if (qspi->rx_bb_addr)
55309 -               dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE,
55310 -                                 qspi->rx_bb_addr,
55311 -                                 qspi->rx_bb_dma_addr);
55312 -       if (qspi->rx_chan)
55313 -               dma_release_channel(qspi->rx_chan);
55314 +       ti_qspi_dma_cleanup(qspi);
55316         return 0;
55318 diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
55319 index c8fa6ee18ae7..7162387b9f96 100644
55320 --- a/drivers/spi/spi-zynqmp-gqspi.c
55321 +++ b/drivers/spi/spi-zynqmp-gqspi.c
55322 @@ -157,6 +157,7 @@ enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
55323   * @data_completion:   completion structure
55324   */
55325  struct zynqmp_qspi {
55326 +       struct spi_controller *ctlr;
55327         void __iomem *regs;
55328         struct clk *refclk;
55329         struct clk *pclk;
55330 @@ -173,6 +174,7 @@ struct zynqmp_qspi {
55331         u32 genfifoentry;
55332         enum mode_type mode;
55333         struct completion data_completion;
55334 +       struct mutex op_lock;
55335  };
55337  /**
55338 @@ -486,24 +488,10 @@ static int zynqmp_qspi_setup_op(struct spi_device *qspi)
55340         struct spi_controller *ctlr = qspi->master;
55341         struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
55342 -       struct device *dev = &ctlr->dev;
55343 -       int ret;
55345         if (ctlr->busy)
55346                 return -EBUSY;
55348 -       ret = clk_enable(xqspi->refclk);
55349 -       if (ret) {
55350 -               dev_err(dev, "Cannot enable device clock.\n");
55351 -               return ret;
55352 -       }
55354 -       ret = clk_enable(xqspi->pclk);
55355 -       if (ret) {
55356 -               dev_err(dev, "Cannot enable APB clock.\n");
55357 -               clk_disable(xqspi->refclk);
55358 -               return ret;
55359 -       }
55360         zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
55362         return 0;
55363 @@ -520,7 +508,7 @@ static void zynqmp_qspi_filltxfifo(struct zynqmp_qspi *xqspi, int size)
55365         u32 count = 0, intermediate;
55367 -       while ((xqspi->bytes_to_transfer > 0) && (count < size)) {
55368 +       while ((xqspi->bytes_to_transfer > 0) && (count < size) && (xqspi->txbuf)) {
55369                 memcpy(&intermediate, xqspi->txbuf, 4);
55370                 zynqmp_gqspi_write(xqspi, GQSPI_TXD_OFST, intermediate);
55372 @@ -579,7 +567,7 @@ static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
55373                 genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
55374                 genfifoentry |= GQSPI_GENFIFO_TX;
55375                 transfer_len = xqspi->bytes_to_transfer;
55376 -       } else {
55377 +       } else if (xqspi->rxbuf) {
55378                 genfifoentry &= ~GQSPI_GENFIFO_TX;
55379                 genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
55380                 genfifoentry |= GQSPI_GENFIFO_RX;
55381 @@ -587,6 +575,11 @@ static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
55382                         transfer_len = xqspi->dma_rx_bytes;
55383                 else
55384                         transfer_len = xqspi->bytes_to_receive;
55385 +       } else {
55386 +               /* Sending dummy circles here */
55387 +               genfifoentry &= ~(GQSPI_GENFIFO_TX | GQSPI_GENFIFO_RX);
55388 +               genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
55389 +               transfer_len = xqspi->bytes_to_transfer;
55390         }
55391         genfifoentry |= zynqmp_qspi_selectspimode(xqspi, nbits);
55392         xqspi->genfifoentry = genfifoentry;
55393 @@ -738,7 +731,7 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
55394   * zynqmp_qspi_setuprxdma - This function sets up the RX DMA operation
55395   * @xqspi:     xqspi is a pointer to the GQSPI instance.
55396   */
55397 -static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
55398 +static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
55400         u32 rx_bytes, rx_rem, config_reg;
55401         dma_addr_t addr;
55402 @@ -752,7 +745,7 @@ static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
55403                 zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
55404                 xqspi->mode = GQSPI_MODE_IO;
55405                 xqspi->dma_rx_bytes = 0;
55406 -               return;
55407 +               return 0;
55408         }
55410         rx_rem = xqspi->bytes_to_receive % 4;
55411 @@ -760,8 +753,10 @@ static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
55413         addr = dma_map_single(xqspi->dev, (void *)xqspi->rxbuf,
55414                               rx_bytes, DMA_FROM_DEVICE);
55415 -       if (dma_mapping_error(xqspi->dev, addr))
55416 +       if (dma_mapping_error(xqspi->dev, addr)) {
55417                 dev_err(xqspi->dev, "ERR:rxdma:memory not mapped\n");
55418 +               return -ENOMEM;
55419 +       }
55421         xqspi->dma_rx_bytes = rx_bytes;
55422         xqspi->dma_addr = addr;
55423 @@ -782,6 +777,8 @@ static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
55425         /* Write the number of bytes to transfer */
55426         zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_SIZE_OFST, rx_bytes);
55428 +       return 0;
55431  /**
55432 @@ -818,11 +815,17 @@ static void zynqmp_qspi_write_op(struct zynqmp_qspi *xqspi, u8 tx_nbits,
55433   * @genfifoentry:      genfifoentry is pointer to the variable in which
55434   *                     GENFIFO mask is returned to calling function
55435   */
55436 -static void zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
55437 +static int zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
55438                                 u32 genfifoentry)
55440 +       int ret;
55442 +       ret = zynqmp_qspi_setuprxdma(xqspi);
55443 +       if (ret)
55444 +               return ret;
55445         zynqmp_qspi_fillgenfifo(xqspi, rx_nbits, genfifoentry);
55446 -       zynqmp_qspi_setuprxdma(xqspi);
55448 +       return 0;
55451  /**
55452 @@ -835,10 +838,13 @@ static void zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
55453   */
55454  static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
55456 -       struct spi_controller *ctlr = dev_get_drvdata(dev);
55457 -       struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
55458 +       struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
55459 +       struct spi_controller *ctlr = xqspi->ctlr;
55460 +       int ret;
55462 -       spi_controller_suspend(ctlr);
55463 +       ret = spi_controller_suspend(ctlr);
55464 +       if (ret)
55465 +               return ret;
55467         zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
55469 @@ -856,27 +862,13 @@ static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
55470   */
55471  static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
55473 -       struct spi_controller *ctlr = dev_get_drvdata(dev);
55474 -       struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
55475 -       int ret = 0;
55477 -       ret = clk_enable(xqspi->pclk);
55478 -       if (ret) {
55479 -               dev_err(dev, "Cannot enable APB clock.\n");
55480 -               return ret;
55481 -       }
55482 +       struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
55483 +       struct spi_controller *ctlr = xqspi->ctlr;
55485 -       ret = clk_enable(xqspi->refclk);
55486 -       if (ret) {
55487 -               dev_err(dev, "Cannot enable device clock.\n");
55488 -               clk_disable(xqspi->pclk);
55489 -               return ret;
55490 -       }
55491 +       zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
55493         spi_controller_resume(ctlr);
55495 -       clk_disable(xqspi->refclk);
55496 -       clk_disable(xqspi->pclk);
55497         return 0;
55500 @@ -890,10 +882,10 @@ static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
55501   */
55502  static int __maybe_unused zynqmp_runtime_suspend(struct device *dev)
55504 -       struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_get_drvdata(dev);
55505 +       struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
55507 -       clk_disable(xqspi->refclk);
55508 -       clk_disable(xqspi->pclk);
55509 +       clk_disable_unprepare(xqspi->refclk);
55510 +       clk_disable_unprepare(xqspi->pclk);
55512         return 0;
55514 @@ -908,19 +900,19 @@ static int __maybe_unused zynqmp_runtime_suspend(struct device *dev)
55515   */
55516  static int __maybe_unused zynqmp_runtime_resume(struct device *dev)
55518 -       struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_get_drvdata(dev);
55519 +       struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
55520         int ret;
55522 -       ret = clk_enable(xqspi->pclk);
55523 +       ret = clk_prepare_enable(xqspi->pclk);
55524         if (ret) {
55525                 dev_err(dev, "Cannot enable APB clock.\n");
55526                 return ret;
55527         }
55529 -       ret = clk_enable(xqspi->refclk);
55530 +       ret = clk_prepare_enable(xqspi->refclk);
55531         if (ret) {
55532                 dev_err(dev, "Cannot enable device clock.\n");
55533 -               clk_disable(xqspi->pclk);
55534 +               clk_disable_unprepare(xqspi->pclk);
55535                 return ret;
55536         }
55538 @@ -944,25 +936,23 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
55539         struct zynqmp_qspi *xqspi = spi_controller_get_devdata
55540                                     (mem->spi->master);
55541         int err = 0, i;
55542 -       u8 *tmpbuf;
55543         u32 genfifoentry = 0;
55544 +       u16 opcode = op->cmd.opcode;
55545 +       u64 opaddr;
55547         dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
55548                 op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
55549                 op->dummy.buswidth, op->data.buswidth);
55551 +       mutex_lock(&xqspi->op_lock);
55552         zynqmp_qspi_config_op(xqspi, mem->spi);
55553         zynqmp_qspi_chipselect(mem->spi, false);
55554         genfifoentry |= xqspi->genfifocs;
55555         genfifoentry |= xqspi->genfifobus;
55557         if (op->cmd.opcode) {
55558 -               tmpbuf = kzalloc(op->cmd.nbytes, GFP_KERNEL | GFP_DMA);
55559 -               if (!tmpbuf)
55560 -                       return -ENOMEM;
55561 -               tmpbuf[0] = op->cmd.opcode;
55562                 reinit_completion(&xqspi->data_completion);
55563 -               xqspi->txbuf = tmpbuf;
55564 +               xqspi->txbuf = &opcode;
55565                 xqspi->rxbuf = NULL;
55566                 xqspi->bytes_to_transfer = op->cmd.nbytes;
55567                 xqspi->bytes_to_receive = 0;
55568 @@ -973,16 +963,15 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
55569                 zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
55570                                    GQSPI_IER_GENFIFOEMPTY_MASK |
55571                                    GQSPI_IER_TXNOT_FULL_MASK);
55572 -               if (!wait_for_completion_interruptible_timeout
55573 +               if (!wait_for_completion_timeout
55574                     (&xqspi->data_completion, msecs_to_jiffies(1000))) {
55575                         err = -ETIMEDOUT;
55576 -                       kfree(tmpbuf);
55577                         goto return_err;
55578                 }
55579 -               kfree(tmpbuf);
55580         }
55582         if (op->addr.nbytes) {
55583 +               xqspi->txbuf = &opaddr;
55584                 for (i = 0; i < op->addr.nbytes; i++) {
55585                         *(((u8 *)xqspi->txbuf) + i) = op->addr.val >>
55586                                         (8 * (op->addr.nbytes - i - 1));
55587 @@ -1001,7 +990,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
55588                                    GQSPI_IER_TXEMPTY_MASK |
55589                                    GQSPI_IER_GENFIFOEMPTY_MASK |
55590                                    GQSPI_IER_TXNOT_FULL_MASK);
55591 -               if (!wait_for_completion_interruptible_timeout
55592 +               if (!wait_for_completion_timeout
55593                     (&xqspi->data_completion, msecs_to_jiffies(1000))) {
55594                         err = -ETIMEDOUT;
55595                         goto return_err;
55596 @@ -1009,32 +998,23 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
55597         }
55599         if (op->dummy.nbytes) {
55600 -               tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL | GFP_DMA);
55601 -               if (!tmpbuf)
55602 -                       return -ENOMEM;
55603 -               memset(tmpbuf, 0xff, op->dummy.nbytes);
55604 -               reinit_completion(&xqspi->data_completion);
55605 -               xqspi->txbuf = tmpbuf;
55606 +               xqspi->txbuf = NULL;
55607                 xqspi->rxbuf = NULL;
55608 -               xqspi->bytes_to_transfer = op->dummy.nbytes;
55609 +               /*
55610 +                * xqspi->bytes_to_transfer here represents the dummy circles
55611 +                * which need to be sent.
55612 +                */
55613 +               xqspi->bytes_to_transfer = op->dummy.nbytes * 8 / op->dummy.buswidth;
55614                 xqspi->bytes_to_receive = 0;
55615 -               zynqmp_qspi_write_op(xqspi, op->dummy.buswidth,
55616 +               /*
55617 +                * Using op->data.buswidth instead of op->dummy.buswidth here because
55618 +                * we need to use it to configure the correct SPI mode.
55619 +                */
55620 +               zynqmp_qspi_write_op(xqspi, op->data.buswidth,
55621                                      genfifoentry);
55622                 zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
55623                                    zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
55624                                    GQSPI_CFG_START_GEN_FIFO_MASK);
55625 -               zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
55626 -                                  GQSPI_IER_TXEMPTY_MASK |
55627 -                                  GQSPI_IER_GENFIFOEMPTY_MASK |
55628 -                                  GQSPI_IER_TXNOT_FULL_MASK);
55629 -               if (!wait_for_completion_interruptible_timeout
55630 -                   (&xqspi->data_completion, msecs_to_jiffies(1000))) {
55631 -                       err = -ETIMEDOUT;
55632 -                       kfree(tmpbuf);
55633 -                       goto return_err;
55634 -               }
55636 -               kfree(tmpbuf);
55637         }
55639         if (op->data.nbytes) {
55640 @@ -1059,8 +1039,11 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
55641                         xqspi->rxbuf = (u8 *)op->data.buf.in;
55642                         xqspi->bytes_to_receive = op->data.nbytes;
55643                         xqspi->bytes_to_transfer = 0;
55644 -                       zynqmp_qspi_read_op(xqspi, op->data.buswidth,
55645 +                       err = zynqmp_qspi_read_op(xqspi, op->data.buswidth,
55646                                             genfifoentry);
55647 +                       if (err)
55648 +                               goto return_err;
55650                         zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
55651                                            zynqmp_gqspi_read
55652                                            (xqspi, GQSPI_CONFIG_OFST) |
55653 @@ -1076,7 +1059,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
55654                                                    GQSPI_IER_RXEMPTY_MASK);
55655                         }
55656                 }
55657 -               if (!wait_for_completion_interruptible_timeout
55658 +               if (!wait_for_completion_timeout
55659                     (&xqspi->data_completion, msecs_to_jiffies(1000)))
55660                         err = -ETIMEDOUT;
55661         }
55662 @@ -1084,6 +1067,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
55663  return_err:
55665         zynqmp_qspi_chipselect(mem->spi, true);
55666 +       mutex_unlock(&xqspi->op_lock);
55668         return err;
55670 @@ -1120,6 +1104,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
55672         xqspi = spi_controller_get_devdata(ctlr);
55673         xqspi->dev = dev;
55674 +       xqspi->ctlr = ctlr;
55675         platform_set_drvdata(pdev, xqspi);
55677         xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
55678 @@ -1135,13 +1120,11 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
55679                 goto remove_master;
55680         }
55682 -       init_completion(&xqspi->data_completion);
55684         xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk");
55685         if (IS_ERR(xqspi->refclk)) {
55686                 dev_err(dev, "ref_clk clock not found.\n");
55687                 ret = PTR_ERR(xqspi->refclk);
55688 -               goto clk_dis_pclk;
55689 +               goto remove_master;
55690         }
55692         ret = clk_prepare_enable(xqspi->pclk);
55693 @@ -1156,15 +1139,24 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
55694                 goto clk_dis_pclk;
55695         }
55697 +       init_completion(&xqspi->data_completion);
55699 +       mutex_init(&xqspi->op_lock);
55701         pm_runtime_use_autosuspend(&pdev->dev);
55702         pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
55703         pm_runtime_set_active(&pdev->dev);
55704         pm_runtime_enable(&pdev->dev);
55706 +       ret = pm_runtime_get_sync(&pdev->dev);
55707 +       if (ret < 0) {
55708 +               dev_err(&pdev->dev, "Failed to pm_runtime_get_sync: %d\n", ret);
55709 +               goto clk_dis_all;
55710 +       }
55712         /* QSPI controller initializations */
55713         zynqmp_qspi_init_hw(xqspi);
55715 -       pm_runtime_mark_last_busy(&pdev->dev);
55716 -       pm_runtime_put_autosuspend(&pdev->dev);
55717         xqspi->irq = platform_get_irq(pdev, 0);
55718         if (xqspi->irq <= 0) {
55719                 ret = -ENXIO;
55720 @@ -1178,6 +1170,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
55721                 goto clk_dis_all;
55722         }
55724 +       dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
55725         ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
55726         ctlr->num_chipselect = GQSPI_DEFAULT_NUM_CS;
55727         ctlr->mem_ops = &zynqmp_qspi_mem_ops;
55728 @@ -1187,6 +1180,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
55729         ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
55730                             SPI_TX_DUAL | SPI_TX_QUAD;
55731         ctlr->dev.of_node = np;
55732 +       ctlr->auto_runtime_pm = true;
55734         ret = devm_spi_register_controller(&pdev->dev, ctlr);
55735         if (ret) {
55736 @@ -1194,9 +1188,13 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
55737                 goto clk_dis_all;
55738         }
55740 +       pm_runtime_mark_last_busy(&pdev->dev);
55741 +       pm_runtime_put_autosuspend(&pdev->dev);
55743         return 0;
55745  clk_dis_all:
55746 +       pm_runtime_put_sync(&pdev->dev);
55747         pm_runtime_set_suspended(&pdev->dev);
55748         pm_runtime_disable(&pdev->dev);
55749         clk_disable_unprepare(xqspi->refclk);
55750 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
55751 index b08efe88ccd6..8da4fe475b84 100644
55752 --- a/drivers/spi/spi.c
55753 +++ b/drivers/spi/spi.c
55754 @@ -795,7 +795,7 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
55756  /*-------------------------------------------------------------------------*/
55758 -static void spi_set_cs(struct spi_device *spi, bool enable)
55759 +static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
55761         bool enable1 = enable;
55763 @@ -803,7 +803,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
55764          * Avoid calling into the driver (or doing delays) if the chip select
55765          * isn't actually changing from the last time this was called.
55766          */
55767 -       if ((spi->controller->last_cs_enable == enable) &&
55768 +       if (!force && (spi->controller->last_cs_enable == enable) &&
55769             (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
55770                 return;
55772 @@ -1253,7 +1253,7 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
55773         struct spi_statistics *statm = &ctlr->statistics;
55774         struct spi_statistics *stats = &msg->spi->statistics;
55776 -       spi_set_cs(msg->spi, true);
55777 +       spi_set_cs(msg->spi, true, false);
55779         SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
55780         SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
55781 @@ -1321,9 +1321,9 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
55782                                          &msg->transfers)) {
55783                                 keep_cs = true;
55784                         } else {
55785 -                               spi_set_cs(msg->spi, false);
55786 +                               spi_set_cs(msg->spi, false, false);
55787                                 _spi_transfer_cs_change_delay(msg, xfer);
55788 -                               spi_set_cs(msg->spi, true);
55789 +                               spi_set_cs(msg->spi, true, false);
55790                         }
55791                 }
55793 @@ -1332,7 +1332,7 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
55795  out:
55796         if (ret != 0 || !keep_cs)
55797 -               spi_set_cs(msg->spi, false);
55798 +               spi_set_cs(msg->spi, false, false);
55800         if (msg->status == -EINPROGRESS)
55801                 msg->status = ret;
55802 @@ -2496,6 +2496,7 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
55804         ctlr = __spi_alloc_controller(dev, size, slave);
55805         if (ctlr) {
55806 +               ctlr->devm_allocated = true;
55807                 *ptr = ctlr;
55808                 devres_add(dev, ptr);
55809         } else {
55810 @@ -2842,11 +2843,6 @@ int devm_spi_register_controller(struct device *dev,
55812  EXPORT_SYMBOL_GPL(devm_spi_register_controller);
55814 -static int devm_spi_match_controller(struct device *dev, void *res, void *ctlr)
55816 -       return *(struct spi_controller **)res == ctlr;
55819  static int __unregister(struct device *dev, void *null)
55821         spi_unregister_device(to_spi_device(dev));
55822 @@ -2893,8 +2889,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
55823         /* Release the last reference on the controller if its driver
55824          * has not yet been converted to devm_spi_alloc_master/slave().
55825          */
55826 -       if (!devres_find(ctlr->dev.parent, devm_spi_release_controller,
55827 -                        devm_spi_match_controller, ctlr))
55828 +       if (!ctlr->devm_allocated)
55829                 put_device(&ctlr->dev);
55831         /* free bus id */
55832 @@ -3423,11 +3418,11 @@ int spi_setup(struct spi_device *spi)
55833                  */
55834                 status = 0;
55836 -               spi_set_cs(spi, false);
55837 +               spi_set_cs(spi, false, true);
55838                 pm_runtime_mark_last_busy(spi->controller->dev.parent);
55839                 pm_runtime_put_autosuspend(spi->controller->dev.parent);
55840         } else {
55841 -               spi_set_cs(spi, false);
55842 +               spi_set_cs(spi, false, true);
55843         }
55845         mutex_unlock(&spi->controller->io_mutex);
55846 diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
55847 index 70498adb1575..5c35653ed36d 100644
55848 --- a/drivers/staging/android/Kconfig
55849 +++ b/drivers/staging/android/Kconfig
55850 @@ -4,7 +4,7 @@ menu "Android"
55851  if ANDROID
55853  config ASHMEM
55854 -       bool "Enable the Anonymous Shared Memory Subsystem"
55855 +       tristate "Enable the Anonymous Shared Memory Subsystem"
55856         depends on SHMEM
55857         help
55858           The ashmem subsystem is a new shared memory allocator, similar to
55859 diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
55860 index e9a55a5e6529..3d794218dd4b 100644
55861 --- a/drivers/staging/android/Makefile
55862 +++ b/drivers/staging/android/Makefile
55863 @@ -1,4 +1,5 @@
55864  # SPDX-License-Identifier: GPL-2.0
55865  ccflags-y += -I$(src)                  # needed for trace events
55867 -obj-$(CONFIG_ASHMEM)                   += ashmem.o
55868 +ashmem_linux-y                         += ashmem.o
55869 +obj-$(CONFIG_ASHMEM)                   += ashmem_linux.o
55870 diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
55871 index d66a64e42273..e28d9a2ce7f1 100644
55872 --- a/drivers/staging/android/ashmem.c
55873 +++ b/drivers/staging/android/ashmem.c
55874 @@ -19,6 +19,7 @@
55875  #include <linux/security.h>
55876  #include <linux/mm.h>
55877  #include <linux/mman.h>
55878 +#include <linux/module.h>
55879  #include <linux/uaccess.h>
55880  #include <linux/personality.h>
55881  #include <linux/bitops.h>
55882 @@ -964,4 +965,18 @@ static int __init ashmem_init(void)
55883  out:
55884         return ret;
55886 -device_initcall(ashmem_init);
55888 +static void __exit ashmem_exit(void)
55890 +       misc_deregister(&ashmem_misc);
55891 +       unregister_shrinker(&ashmem_shrinker);
55892 +       kmem_cache_destroy(ashmem_range_cachep);
55893 +       kmem_cache_destroy(ashmem_area_cachep);
55896 +module_init(ashmem_init);
55897 +module_exit(ashmem_exit);
55899 +MODULE_AUTHOR("Google, Inc.");
55900 +MODULE_DESCRIPTION("Driver for Android shared memory device");
55901 +MODULE_LICENSE("GPL v2");
55902 diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
55903 index 4f80a4991f95..c164c8524909 100644
55904 --- a/drivers/staging/comedi/drivers/ni_mio_common.c
55905 +++ b/drivers/staging/comedi/drivers/ni_mio_common.c
55906 @@ -4747,7 +4747,7 @@ static int cs5529_wait_for_idle(struct comedi_device *dev)
55907                 if ((status & NI67XX_CAL_STATUS_BUSY) == 0)
55908                         break;
55909                 set_current_state(TASK_INTERRUPTIBLE);
55910 -               if (schedule_timeout(1))
55911 +               if (schedule_min_hrtimeout())
55912                         return -EIO;
55913         }
55914         if (i == timeout) {
55915 diff --git a/drivers/staging/comedi/drivers/tests/ni_routes_test.c b/drivers/staging/comedi/drivers/tests/ni_routes_test.c
55916 index 4061b3b5f8e9..68defeb53de4 100644
55917 --- a/drivers/staging/comedi/drivers/tests/ni_routes_test.c
55918 +++ b/drivers/staging/comedi/drivers/tests/ni_routes_test.c
55919 @@ -217,7 +217,8 @@ void test_ni_assign_device_routes(void)
55920         const u8 *table, *oldtable;
55922         init_pci_6070e();
55923 -       ni_assign_device_routes(ni_eseries, pci_6070e, &private.routing_tables);
55924 +       ni_assign_device_routes(ni_eseries, pci_6070e, NULL,
55925 +                               &private.routing_tables);
55926         devroutes = private.routing_tables.valid_routes;
55927         table = private.routing_tables.route_values;
55929 @@ -253,7 +254,8 @@ void test_ni_assign_device_routes(void)
55930         olddevroutes = devroutes;
55931         oldtable = table;
55932         init_pci_6220();
55933 -       ni_assign_device_routes(ni_mseries, pci_6220, &private.routing_tables);
55934 +       ni_assign_device_routes(ni_mseries, pci_6220, NULL,
55935 +                               &private.routing_tables);
55936         devroutes = private.routing_tables.valid_routes;
55937         table = private.routing_tables.route_values;
55939 diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
55940 index c368082aae1a..0f4655d7d520 100644
55941 --- a/drivers/staging/fwserial/fwserial.c
55942 +++ b/drivers/staging/fwserial/fwserial.c
55943 @@ -1218,13 +1218,12 @@ static int get_serial_info(struct tty_struct *tty,
55944         struct fwtty_port *port = tty->driver_data;
55946         mutex_lock(&port->port.mutex);
55947 -       ss->type =  PORT_UNKNOWN;
55948 -       ss->line =  port->port.tty->index;
55949 -       ss->flags = port->port.flags;
55950 -       ss->xmit_fifo_size = FWTTY_PORT_TXFIFO_LEN;
55951 +       ss->line = port->index;
55952         ss->baud_base = 400000000;
55953 -       ss->close_delay = port->port.close_delay;
55954 +       ss->close_delay = jiffies_to_msecs(port->port.close_delay) / 10;
55955 +       ss->closing_wait = 3000;
55956         mutex_unlock(&port->port.mutex);
55958         return 0;
55961 @@ -1232,20 +1231,20 @@ static int set_serial_info(struct tty_struct *tty,
55962                            struct serial_struct *ss)
55964         struct fwtty_port *port = tty->driver_data;
55965 +       unsigned int cdelay;
55967 -       if (ss->irq != 0 || ss->port != 0 || ss->custom_divisor != 0 ||
55968 -           ss->baud_base != 400000000)
55969 -               return -EPERM;
55970 +       cdelay = msecs_to_jiffies(ss->close_delay * 10);
55972         mutex_lock(&port->port.mutex);
55973         if (!capable(CAP_SYS_ADMIN)) {
55974 -               if (((ss->flags & ~ASYNC_USR_MASK) !=
55975 +               if (cdelay != port->port.close_delay ||
55976 +                   ((ss->flags & ~ASYNC_USR_MASK) !=
55977                      (port->port.flags & ~ASYNC_USR_MASK))) {
55978                         mutex_unlock(&port->port.mutex);
55979                         return -EPERM;
55980                 }
55981         }
55982 -       port->port.close_delay = ss->close_delay * HZ / 100;
55983 +       port->port.close_delay = cdelay;
55984         mutex_unlock(&port->port.mutex);
55986         return 0;
55987 diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
55988 index 607378bfebb7..a520f7f213db 100644
55989 --- a/drivers/staging/greybus/uart.c
55990 +++ b/drivers/staging/greybus/uart.c
55991 @@ -614,10 +614,12 @@ static int get_serial_info(struct tty_struct *tty,
55992         ss->line = gb_tty->minor;
55993         ss->xmit_fifo_size = 16;
55994         ss->baud_base = 9600;
55995 -       ss->close_delay = gb_tty->port.close_delay / 10;
55996 +       ss->close_delay = jiffies_to_msecs(gb_tty->port.close_delay) / 10;
55997         ss->closing_wait =
55998                 gb_tty->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
55999 -               ASYNC_CLOSING_WAIT_NONE : gb_tty->port.closing_wait / 10;
56000 +               ASYNC_CLOSING_WAIT_NONE :
56001 +               jiffies_to_msecs(gb_tty->port.closing_wait) / 10;
56003         return 0;
56006 @@ -629,17 +631,16 @@ static int set_serial_info(struct tty_struct *tty,
56007         unsigned int close_delay;
56008         int retval = 0;
56010 -       close_delay = ss->close_delay * 10;
56011 +       close_delay = msecs_to_jiffies(ss->close_delay * 10);
56012         closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
56013 -                       ASYNC_CLOSING_WAIT_NONE : ss->closing_wait * 10;
56014 +                       ASYNC_CLOSING_WAIT_NONE :
56015 +                       msecs_to_jiffies(ss->closing_wait * 10);
56017         mutex_lock(&gb_tty->port.mutex);
56018         if (!capable(CAP_SYS_ADMIN)) {
56019                 if ((close_delay != gb_tty->port.close_delay) ||
56020                     (closing_wait != gb_tty->port.closing_wait))
56021                         retval = -EPERM;
56022 -               else
56023 -                       retval = -EOPNOTSUPP;
56024         } else {
56025                 gb_tty->port.close_delay = close_delay;
56026                 gb_tty->port.closing_wait = closing_wait;
56027 diff --git a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
56028 index 7ca7378b1859..0ab67b2aec67 100644
56029 --- a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
56030 +++ b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
56031 @@ -843,8 +843,10 @@ static int lm3554_probe(struct i2c_client *client)
56032                 return -ENOMEM;
56034         flash->pdata = lm3554_platform_data_func(client);
56035 -       if (IS_ERR(flash->pdata))
56036 -               return PTR_ERR(flash->pdata);
56037 +       if (IS_ERR(flash->pdata)) {
56038 +               err = PTR_ERR(flash->pdata);
56039 +               goto fail1;
56040 +       }
56042         v4l2_i2c_subdev_init(&flash->sd, client, &lm3554_ops);
56043         flash->sd.internal_ops = &lm3554_internal_ops;
56044 @@ -856,7 +858,7 @@ static int lm3554_probe(struct i2c_client *client)
56045                                    ARRAY_SIZE(lm3554_controls));
56046         if (ret) {
56047                 dev_err(&client->dev, "error initialize a ctrl_handler.\n");
56048 -               goto fail2;
56049 +               goto fail3;
56050         }
56052         for (i = 0; i < ARRAY_SIZE(lm3554_controls); i++)
56053 @@ -865,14 +867,14 @@ static int lm3554_probe(struct i2c_client *client)
56055         if (flash->ctrl_handler.error) {
56056                 dev_err(&client->dev, "ctrl_handler error.\n");
56057 -               goto fail2;
56058 +               goto fail3;
56059         }
56061         flash->sd.ctrl_handler = &flash->ctrl_handler;
56062         err = media_entity_pads_init(&flash->sd.entity, 0, NULL);
56063         if (err) {
56064                 dev_err(&client->dev, "error initialize a media entity.\n");
56065 -               goto fail1;
56066 +               goto fail2;
56067         }
56069         flash->sd.entity.function = MEDIA_ENT_F_FLASH;
56070 @@ -884,14 +886,15 @@ static int lm3554_probe(struct i2c_client *client)
56071         err = lm3554_gpio_init(client);
56072         if (err) {
56073                 dev_err(&client->dev, "gpio request/direction_output fail");
56074 -               goto fail2;
56075 +               goto fail3;
56076         }
56077         return atomisp_register_i2c_module(&flash->sd, NULL, LED_FLASH);
56078 -fail2:
56079 +fail3:
56080         media_entity_cleanup(&flash->sd.entity);
56081         v4l2_ctrl_handler_free(&flash->ctrl_handler);
56082 -fail1:
56083 +fail2:
56084         v4l2_device_unregister_subdev(&flash->sd);
56085 +fail1:
56086         kfree(flash);
56088         return err;
56089 diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c
56090 index 453bb6913550..f1e6b2597853 100644
56091 --- a/drivers/staging/media/atomisp/pci/atomisp_fops.c
56092 +++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c
56093 @@ -221,6 +221,9 @@ int atomisp_q_video_buffers_to_css(struct atomisp_sub_device *asd,
56094         unsigned long irqflags;
56095         int err = 0;
56097 +       if (WARN_ON(css_pipe_id >= IA_CSS_PIPE_ID_NUM))
56098 +               return -EINVAL;
56100         while (pipe->buffers_in_css < ATOMISP_CSS_Q_DEPTH) {
56101                 struct videobuf_buffer *vb;
56103 diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
56104 index 2ae50decfc8b..9da82855552d 100644
56105 --- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
56106 +++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
56107 @@ -948,10 +948,8 @@ int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
56108                 dev_dbg(isp->dev, "allocating %d dis buffers\n", count);
56109                 while (count--) {
56110                         dis_buf = kzalloc(sizeof(struct atomisp_dis_buf), GFP_KERNEL);
56111 -                       if (!dis_buf) {
56112 -                               kfree(s3a_buf);
56113 +                       if (!dis_buf)
56114                                 goto error;
56115 -                       }
56116                         if (atomisp_css_allocate_stat_buffers(
56117                                 asd, stream_id, NULL, dis_buf, NULL)) {
56118                                 kfree(dis_buf);
56119 diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
56120 index f13af2329f48..0168f9839c90 100644
56121 --- a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
56122 +++ b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
56123 @@ -857,16 +857,17 @@ static void free_private_pages(struct hmm_buffer_object *bo,
56124         kfree(bo->page_obj);
56127 -static void free_user_pages(struct hmm_buffer_object *bo)
56128 +static void free_user_pages(struct hmm_buffer_object *bo,
56129 +                           unsigned int page_nr)
56131         int i;
56133         hmm_mem_stat.usr_size -= bo->pgnr;
56135         if (bo->mem_type == HMM_BO_MEM_TYPE_PFN) {
56136 -               unpin_user_pages(bo->pages, bo->pgnr);
56137 +               unpin_user_pages(bo->pages, page_nr);
56138         } else {
56139 -               for (i = 0; i < bo->pgnr; i++)
56140 +               for (i = 0; i < page_nr; i++)
56141                         put_page(bo->pages[i]);
56142         }
56143         kfree(bo->pages);
56144 @@ -942,6 +943,8 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
56145                 dev_err(atomisp_dev,
56146                         "get_user_pages err: bo->pgnr = %d, pgnr actually pinned = %d.\n",
56147                         bo->pgnr, page_nr);
56148 +               if (page_nr < 0)
56149 +                       page_nr = 0;
56150                 goto out_of_mem;
56151         }
56153 @@ -954,7 +957,7 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
56155  out_of_mem:
56157 -       free_user_pages(bo);
56158 +       free_user_pages(bo, page_nr);
56160         return -ENOMEM;
56162 @@ -1037,7 +1040,7 @@ void hmm_bo_free_pages(struct hmm_buffer_object *bo)
56163         if (bo->type == HMM_BO_PRIVATE)
56164                 free_private_pages(bo, &dynamic_pool, &reserved_pool);
56165         else if (bo->type == HMM_BO_USER)
56166 -               free_user_pages(bo);
56167 +               free_user_pages(bo, bo->pgnr);
56168         else
56169                 dev_err(atomisp_dev, "invalid buffer type.\n");
56170         mutex_unlock(&bo->mutex);
56171 diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
56172 index e10ce103a5b4..94a0467d673b 100644
56173 --- a/drivers/staging/media/imx/imx-media-capture.c
56174 +++ b/drivers/staging/media/imx/imx-media-capture.c
56175 @@ -557,7 +557,7 @@ static int capture_validate_fmt(struct capture_priv *priv)
56176                 priv->vdev.fmt.fmt.pix.height != f.fmt.pix.height ||
56177                 priv->vdev.cc->cs != cc->cs ||
56178                 priv->vdev.compose.width != compose.width ||
56179 -               priv->vdev.compose.height != compose.height) ? -EINVAL : 0;
56180 +               priv->vdev.compose.height != compose.height) ? -EPIPE : 0;
56183  static int capture_start_streaming(struct vb2_queue *vq, unsigned int count)
56184 diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
56185 index 60aa02eb7d2a..6d9c49b39531 100644
56186 --- a/drivers/staging/media/ipu3/ipu3-v4l2.c
56187 +++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
56188 @@ -686,6 +686,7 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
56190         dev_dbg(dev, "IPU3 pipe %u pipe_id = %u", pipe, css_pipe->pipe_id);
56192 +       css_q = imgu_node_to_queue(node);
56193         for (i = 0; i < IPU3_CSS_QUEUES; i++) {
56194                 unsigned int inode = imgu_map_node(imgu, i);
56196 @@ -693,6 +694,18 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
56197                 if (inode == IMGU_NODE_STAT_3A || inode == IMGU_NODE_PARAMS)
56198                         continue;
56200 +               /* CSS expects some format on OUT queue */
56201 +               if (i != IPU3_CSS_QUEUE_OUT &&
56202 +                   !imgu_pipe->nodes[inode].enabled) {
56203 +                       fmts[i] = NULL;
56204 +                       continue;
56205 +               }
56207 +               if (i == css_q) {
56208 +                       fmts[i] = &f->fmt.pix_mp;
56209 +                       continue;
56210 +               }
56212                 if (try) {
56213                         fmts[i] = kmemdup(&imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp,
56214                                           sizeof(struct v4l2_pix_format_mplane),
56215 @@ -705,10 +718,6 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
56216                         fmts[i] = &imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp;
56217                 }
56219 -               /* CSS expects some format on OUT queue */
56220 -               if (i != IPU3_CSS_QUEUE_OUT &&
56221 -                   !imgu_pipe->nodes[inode].enabled)
56222 -                       fmts[i] = NULL;
56223         }
56225         if (!try) {
56226 @@ -725,16 +734,10 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
56227                 rects[IPU3_CSS_RECT_GDC]->height = pad_fmt.height;
56228         }
56230 -       /*
56231 -        * imgu doesn't set the node to the value given by user
56232 -        * before we return success from this function, so set it here.
56233 -        */
56234 -       css_q = imgu_node_to_queue(node);
56235         if (!fmts[css_q]) {
56236                 ret = -EINVAL;
56237                 goto out;
56238         }
56239 -       *fmts[css_q] = f->fmt.pix_mp;
56241         if (try)
56242                 ret = imgu_css_fmt_try(&imgu->css, fmts, rects, pipe);
56243 @@ -745,15 +748,18 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
56244         if (ret < 0)
56245                 goto out;
56247 -       if (try)
56248 -               f->fmt.pix_mp = *fmts[css_q];
56249 -       else
56250 -               f->fmt = imgu_pipe->nodes[node].vdev_fmt.fmt;
56251 +       /*
56252 +        * imgu doesn't set the node to the value given by user
56253 +        * before we return success from this function, so set it here.
56254 +        */
56255 +       if (!try)
56256 +               imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp = f->fmt.pix_mp;
56258  out:
56259         if (try) {
56260                 for (i = 0; i < IPU3_CSS_QUEUES; i++)
56261 -                       kfree(fmts[i]);
56262 +                       if (i != css_q)
56263 +                               kfree(fmts[i]);
56264         }
56266         return ret;
56267 diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
56268 index dae9073e7d3c..085397045b36 100644
56269 --- a/drivers/staging/media/omap4iss/iss.c
56270 +++ b/drivers/staging/media/omap4iss/iss.c
56271 @@ -1236,8 +1236,10 @@ static int iss_probe(struct platform_device *pdev)
56272         if (ret < 0)
56273                 goto error;
56275 -       if (!omap4iss_get(iss))
56276 +       if (!omap4iss_get(iss)) {
56277 +               ret = -EINVAL;
56278                 goto error;
56279 +       }
56281         ret = iss_reset(iss);
56282         if (ret < 0)
56283 diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
56284 index d3eb81ee8dc2..d821661d30f3 100644
56285 --- a/drivers/staging/media/rkvdec/rkvdec.c
56286 +++ b/drivers/staging/media/rkvdec/rkvdec.c
56287 @@ -55,16 +55,13 @@ static const struct v4l2_ctrl_ops rkvdec_ctrl_ops = {
56289  static const struct rkvdec_ctrl_desc rkvdec_h264_ctrl_descs[] = {
56290         {
56291 -               .mandatory = true,
56292                 .cfg.id = V4L2_CID_STATELESS_H264_DECODE_PARAMS,
56293         },
56294         {
56295 -               .mandatory = true,
56296                 .cfg.id = V4L2_CID_STATELESS_H264_SPS,
56297                 .cfg.ops = &rkvdec_ctrl_ops,
56298         },
56299         {
56300 -               .mandatory = true,
56301                 .cfg.id = V4L2_CID_STATELESS_H264_PPS,
56302         },
56303         {
56304 @@ -585,25 +582,7 @@ static const struct vb2_ops rkvdec_queue_ops = {
56306  static int rkvdec_request_validate(struct media_request *req)
56308 -       struct media_request_object *obj;
56309 -       const struct rkvdec_ctrls *ctrls;
56310 -       struct v4l2_ctrl_handler *hdl;
56311 -       struct rkvdec_ctx *ctx = NULL;
56312 -       unsigned int count, i;
56313 -       int ret;
56315 -       list_for_each_entry(obj, &req->objects, list) {
56316 -               if (vb2_request_object_is_buffer(obj)) {
56317 -                       struct vb2_buffer *vb;
56319 -                       vb = container_of(obj, struct vb2_buffer, req_obj);
56320 -                       ctx = vb2_get_drv_priv(vb->vb2_queue);
56321 -                       break;
56322 -               }
56323 -       }
56325 -       if (!ctx)
56326 -               return -EINVAL;
56327 +       unsigned int count;
56329         count = vb2_request_buffer_cnt(req);
56330         if (!count)
56331 @@ -611,31 +590,6 @@ static int rkvdec_request_validate(struct media_request *req)
56332         else if (count > 1)
56333                 return -EINVAL;
56335 -       hdl = v4l2_ctrl_request_hdl_find(req, &ctx->ctrl_hdl);
56336 -       if (!hdl)
56337 -               return -ENOENT;
56339 -       ret = 0;
56340 -       ctrls = ctx->coded_fmt_desc->ctrls;
56341 -       for (i = 0; ctrls && i < ctrls->num_ctrls; i++) {
56342 -               u32 id = ctrls->ctrls[i].cfg.id;
56343 -               struct v4l2_ctrl *ctrl;
56345 -               if (!ctrls->ctrls[i].mandatory)
56346 -                       continue;
56348 -               ctrl = v4l2_ctrl_request_hdl_ctrl_find(hdl, id);
56349 -               if (!ctrl) {
56350 -                       ret = -ENOENT;
56351 -                       break;
56352 -               }
56353 -       }
56355 -       v4l2_ctrl_request_hdl_put(hdl);
56357 -       if (ret)
56358 -               return ret;
56360         return vb2_request_validate(req);
56363 @@ -1118,7 +1072,7 @@ static struct platform_driver rkvdec_driver = {
56364         .remove = rkvdec_remove,
56365         .driver = {
56366                    .name = "rkvdec",
56367 -                  .of_match_table = of_match_ptr(of_rkvdec_match),
56368 +                  .of_match_table = of_rkvdec_match,
56369                    .pm = &rkvdec_pm_ops,
56370         },
56371  };
56372 diff --git a/drivers/staging/media/rkvdec/rkvdec.h b/drivers/staging/media/rkvdec/rkvdec.h
56373 index 77a137cca88e..52ac3874c5e5 100644
56374 --- a/drivers/staging/media/rkvdec/rkvdec.h
56375 +++ b/drivers/staging/media/rkvdec/rkvdec.h
56376 @@ -25,7 +25,6 @@
56377  struct rkvdec_ctx;
56379  struct rkvdec_ctrl_desc {
56380 -       u32 mandatory : 1;
56381         struct v4l2_ctrl_config cfg;
56382  };
56384 diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
56385 index 7718c561823f..92ace87c1c7d 100644
56386 --- a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
56387 +++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
56388 @@ -443,16 +443,17 @@
56389  #define VE_DEC_H265_STATUS_STCD_BUSY           BIT(21)
56390  #define VE_DEC_H265_STATUS_WB_BUSY             BIT(20)
56391  #define VE_DEC_H265_STATUS_BS_DMA_BUSY         BIT(19)
56392 -#define VE_DEC_H265_STATUS_IQIT_BUSY           BIT(18)
56393 +#define VE_DEC_H265_STATUS_IT_BUSY             BIT(18)
56394  #define VE_DEC_H265_STATUS_INTER_BUSY          BIT(17)
56395  #define VE_DEC_H265_STATUS_MORE_DATA           BIT(16)
56396 -#define VE_DEC_H265_STATUS_VLD_BUSY            BIT(14)
56397 -#define VE_DEC_H265_STATUS_DEBLOCKING_BUSY     BIT(13)
56398 -#define VE_DEC_H265_STATUS_DEBLOCKING_DRAM_BUSY        BIT(12)
56399 -#define VE_DEC_H265_STATUS_INTRA_BUSY          BIT(11)
56400 -#define VE_DEC_H265_STATUS_SAO_BUSY            BIT(10)
56401 -#define VE_DEC_H265_STATUS_MVP_BUSY            BIT(9)
56402 -#define VE_DEC_H265_STATUS_SWDEC_BUSY          BIT(8)
56403 +#define VE_DEC_H265_STATUS_DBLK_BUSY           BIT(15)
56404 +#define VE_DEC_H265_STATUS_IREC_BUSY           BIT(14)
56405 +#define VE_DEC_H265_STATUS_INTRA_BUSY          BIT(13)
56406 +#define VE_DEC_H265_STATUS_MCRI_BUSY           BIT(12)
56407 +#define VE_DEC_H265_STATUS_IQIT_BUSY           BIT(11)
56408 +#define VE_DEC_H265_STATUS_MVP_BUSY            BIT(10)
56409 +#define VE_DEC_H265_STATUS_IS_BUSY             BIT(9)
56410 +#define VE_DEC_H265_STATUS_VLD_BUSY            BIT(8)
56411  #define VE_DEC_H265_STATUS_OVER_TIME           BIT(3)
56412  #define VE_DEC_H265_STATUS_VLD_DATA_REQ                BIT(2)
56413  #define VE_DEC_H265_STATUS_ERROR               BIT(1)
56414 diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
56415 index 5516be3af898..c1d52190e1bd 100644
56416 --- a/drivers/staging/qlge/qlge_main.c
56417 +++ b/drivers/staging/qlge/qlge_main.c
56418 @@ -4550,7 +4550,7 @@ static int qlge_probe(struct pci_dev *pdev,
56419         struct net_device *ndev = NULL;
56420         struct devlink *devlink;
56421         static int cards_found;
56422 -       int err = 0;
56423 +       int err;
56425         devlink = devlink_alloc(&qlge_devlink_ops, sizeof(struct qlge_adapter));
56426         if (!devlink)
56427 @@ -4561,8 +4561,10 @@ static int qlge_probe(struct pci_dev *pdev,
56428         ndev = alloc_etherdev_mq(sizeof(struct qlge_netdev_priv),
56429                                  min(MAX_CPUS,
56430                                      netif_get_num_default_rss_queues()));
56431 -       if (!ndev)
56432 +       if (!ndev) {
56433 +               err = -ENOMEM;
56434                 goto devlink_free;
56435 +       }
56437         ndev_priv = netdev_priv(ndev);
56438         ndev_priv->qdev = qdev;
56439 diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
56440 index 9fc4adc83d77..b5a313649f44 100644
56441 --- a/drivers/staging/rtl8192u/r8192U_core.c
56442 +++ b/drivers/staging/rtl8192u/r8192U_core.c
56443 @@ -3210,7 +3210,7 @@ static void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
56444                              u32 *TotalRxDataNum)
56446         u16                     SlotIndex;
56447 -       u8                      i;
56448 +       u16                     i;
56450         *TotalRxBcnNum = 0;
56451         *TotalRxDataNum = 0;
56452 diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
56453 index 898add4d1fc8..0aa9dd467349 100644
56454 --- a/drivers/staging/rts5208/rtsx.c
56455 +++ b/drivers/staging/rts5208/rtsx.c
56456 @@ -477,7 +477,7 @@ static int rtsx_polling_thread(void *__dev)
56458         for (;;) {
56459                 set_current_state(TASK_INTERRUPTIBLE);
56460 -               schedule_timeout(msecs_to_jiffies(POLLING_INTERVAL));
56461 +               schedule_msec_hrtimeout((POLLING_INTERVAL));
56463                 /* lock the device pointers */
56464                 mutex_lock(&dev->dev_mutex);
56465 diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
56466 index 0433536930a9..d8726f28843f 100644
56467 --- a/drivers/staging/unisys/visornic/visornic_main.c
56468 +++ b/drivers/staging/unisys/visornic/visornic_main.c
56469 @@ -549,7 +549,7 @@ static int visornic_disable_with_timeout(struct net_device *netdev,
56470                 }
56471                 set_current_state(TASK_INTERRUPTIBLE);
56472                 spin_unlock_irqrestore(&devdata->priv_lock, flags);
56473 -               wait += schedule_timeout(msecs_to_jiffies(10));
56474 +               wait += schedule_msec_hrtimeout((10));
56475                 spin_lock_irqsave(&devdata->priv_lock, flags);
56476         }
56478 @@ -560,7 +560,7 @@ static int visornic_disable_with_timeout(struct net_device *netdev,
56479                 while (1) {
56480                         set_current_state(TASK_INTERRUPTIBLE);
56481                         spin_unlock_irqrestore(&devdata->priv_lock, flags);
56482 -                       schedule_timeout(msecs_to_jiffies(10));
56483 +                       schedule_msec_hrtimeout((10));
56484                         spin_lock_irqsave(&devdata->priv_lock, flags);
56485                         if (atomic_read(&devdata->usage))
56486                                 break;
56487 @@ -714,7 +714,7 @@ static int visornic_enable_with_timeout(struct net_device *netdev,
56488                 }
56489                 set_current_state(TASK_INTERRUPTIBLE);
56490                 spin_unlock_irqrestore(&devdata->priv_lock, flags);
56491 -               wait += schedule_timeout(msecs_to_jiffies(10));
56492 +               wait += schedule_msec_hrtimeout((10));
56493                 spin_lock_irqsave(&devdata->priv_lock, flags);
56494         }
56496 diff --git a/drivers/staging/wimax/i2400m/op-rfkill.c b/drivers/staging/wimax/i2400m/op-rfkill.c
56497 index fbddf2e18c14..44698a1aae87 100644
56498 --- a/drivers/staging/wimax/i2400m/op-rfkill.c
56499 +++ b/drivers/staging/wimax/i2400m/op-rfkill.c
56500 @@ -86,7 +86,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
56501         if (cmd == NULL)
56502                 goto error_alloc;
56503         cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_RF_CONTROL);
56504 -       cmd->hdr.length = sizeof(cmd->sw_rf);
56505 +       cmd->hdr.length = cpu_to_le16(sizeof(cmd->sw_rf));
56506         cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION);
56507         cmd->sw_rf.hdr.type = cpu_to_le16(I2400M_TLV_RF_OPERATION);
56508         cmd->sw_rf.hdr.length = cpu_to_le16(sizeof(cmd->sw_rf.status));
56509 diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
56510 index 9ee797b8cb7e..508b49b0eaf5 100644
56511 --- a/drivers/target/target_core_pscsi.c
56512 +++ b/drivers/target/target_core_pscsi.c
56513 @@ -620,8 +620,9 @@ static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status,
56514                         unsigned char *buf;
56516                         buf = transport_kmap_data_sg(cmd);
56517 -                       if (!buf)
56518 +                       if (!buf) {
56519                                 ; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
56520 +                       }
56522                         if (cdb[0] == MODE_SENSE_10) {
56523                                 if (!(buf[3] & 0x80))
56524 diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
56525 index bf73cd5f4b04..6809c970be03 100644
56526 --- a/drivers/target/target_core_user.c
56527 +++ b/drivers/target/target_core_user.c
56528 @@ -1377,7 +1377,7 @@ static int tcmu_run_tmr_queue(struct tcmu_dev *udev)
56529         return 1;
56532 -static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
56533 +static bool tcmu_handle_completions(struct tcmu_dev *udev)
56535         struct tcmu_mailbox *mb;
56536         struct tcmu_cmd *cmd;
56537 @@ -1420,7 +1420,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
56538                         pr_err("cmd_id %u not found, ring is broken\n",
56539                                entry->hdr.cmd_id);
56540                         set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
56541 -                       break;
56542 +                       return false;
56543                 }
56545                 tcmu_handle_completion(cmd, entry);
56546 diff --git a/drivers/tee/amdtee/amdtee_private.h b/drivers/tee/amdtee/amdtee_private.h
56547 index 337c8d82f74e..6d0f7062bb87 100644
56548 --- a/drivers/tee/amdtee/amdtee_private.h
56549 +++ b/drivers/tee/amdtee/amdtee_private.h
56550 @@ -21,6 +21,7 @@
56551  #define TEEC_SUCCESS                   0x00000000
56552  #define TEEC_ERROR_GENERIC             0xFFFF0000
56553  #define TEEC_ERROR_BAD_PARAMETERS      0xFFFF0006
56554 +#define TEEC_ERROR_OUT_OF_MEMORY       0xFFFF000C
56555  #define TEEC_ERROR_COMMUNICATION       0xFFFF000E
56557  #define TEEC_ORIGIN_COMMS              0x00000002
56558 @@ -93,6 +94,18 @@ struct amdtee_shm_data {
56559         u32     buf_id;
56560  };
56562 +/**
56563 + * struct amdtee_ta_data - Keeps track of all TAs loaded in AMD Secure
56564 + *                        Processor
56565 + * @ta_handle: Handle to TA loaded in TEE
56566 + * @refcount:  Reference count for the loaded TA
56567 + */
56568 +struct amdtee_ta_data {
56569 +       struct list_head list_node;
56570 +       u32 ta_handle;
56571 +       u32 refcount;
56574  #define LOWER_TWO_BYTE_MASK    0x0000FFFF
56576  /**
56577 diff --git a/drivers/tee/amdtee/call.c b/drivers/tee/amdtee/call.c
56578 index 096dd4d92d39..07f36ac834c8 100644
56579 --- a/drivers/tee/amdtee/call.c
56580 +++ b/drivers/tee/amdtee/call.c
56581 @@ -121,15 +121,69 @@ static int amd_params_to_tee_params(struct tee_param *tee, u32 count,
56582         return ret;
56585 +static DEFINE_MUTEX(ta_refcount_mutex);
56586 +static struct list_head ta_list = LIST_HEAD_INIT(ta_list);
56588 +static u32 get_ta_refcount(u32 ta_handle)
56590 +       struct amdtee_ta_data *ta_data;
56591 +       u32 count = 0;
56593 +       /* Caller must hold a mutex */
56594 +       list_for_each_entry(ta_data, &ta_list, list_node)
56595 +               if (ta_data->ta_handle == ta_handle)
56596 +                       return ++ta_data->refcount;
56598 +       ta_data = kzalloc(sizeof(*ta_data), GFP_KERNEL);
56599 +       if (ta_data) {
56600 +               ta_data->ta_handle = ta_handle;
56601 +               ta_data->refcount = 1;
56602 +               count = ta_data->refcount;
56603 +               list_add(&ta_data->list_node, &ta_list);
56604 +       }
56606 +       return count;
56609 +static u32 put_ta_refcount(u32 ta_handle)
56611 +       struct amdtee_ta_data *ta_data;
56612 +       u32 count = 0;
56614 +       /* Caller must hold a mutex */
56615 +       list_for_each_entry(ta_data, &ta_list, list_node)
56616 +               if (ta_data->ta_handle == ta_handle) {
56617 +                       count = --ta_data->refcount;
56618 +                       if (count == 0) {
56619 +                               list_del(&ta_data->list_node);
56620 +                               kfree(ta_data);
56621 +                               break;
56622 +                       }
56623 +               }
56625 +       return count;
56628  int handle_unload_ta(u32 ta_handle)
56630         struct tee_cmd_unload_ta cmd = {0};
56631 -       u32 status;
56632 +       u32 status, count;
56633         int ret;
56635         if (!ta_handle)
56636                 return -EINVAL;
56638 +       mutex_lock(&ta_refcount_mutex);
56640 +       count = put_ta_refcount(ta_handle);
56642 +       if (count) {
56643 +               pr_debug("unload ta: not unloading %u count %u\n",
56644 +                        ta_handle, count);
56645 +               ret = -EBUSY;
56646 +               goto unlock;
56647 +       }
56649         cmd.ta_handle = ta_handle;
56651         ret = psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA, (void *)&cmd,
56652 @@ -137,8 +191,12 @@ int handle_unload_ta(u32 ta_handle)
56653         if (!ret && status != 0) {
56654                 pr_err("unload ta: status = 0x%x\n", status);
56655                 ret = -EBUSY;
56656 +       } else {
56657 +               pr_debug("unloaded ta handle %u\n", ta_handle);
56658         }
56660 +unlock:
56661 +       mutex_unlock(&ta_refcount_mutex);
56662         return ret;
56665 @@ -340,7 +398,8 @@ int handle_open_session(struct tee_ioctl_open_session_arg *arg, u32 *info,
56667  int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
56669 -       struct tee_cmd_load_ta cmd = {0};
56670 +       struct tee_cmd_unload_ta unload_cmd = {};
56671 +       struct tee_cmd_load_ta load_cmd = {};
56672         phys_addr_t blob;
56673         int ret;
56675 @@ -353,21 +412,36 @@ int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
56676                 return -EINVAL;
56677         }
56679 -       cmd.hi_addr = upper_32_bits(blob);
56680 -       cmd.low_addr = lower_32_bits(blob);
56681 -       cmd.size = size;
56682 +       load_cmd.hi_addr = upper_32_bits(blob);
56683 +       load_cmd.low_addr = lower_32_bits(blob);
56684 +       load_cmd.size = size;
56686 -       ret = psp_tee_process_cmd(TEE_CMD_ID_LOAD_TA, (void *)&cmd,
56687 -                                 sizeof(cmd), &arg->ret);
56688 +       mutex_lock(&ta_refcount_mutex);
56690 +       ret = psp_tee_process_cmd(TEE_CMD_ID_LOAD_TA, (void *)&load_cmd,
56691 +                                 sizeof(load_cmd), &arg->ret);
56692         if (ret) {
56693                 arg->ret_origin = TEEC_ORIGIN_COMMS;
56694                 arg->ret = TEEC_ERROR_COMMUNICATION;
56695 -       } else {
56696 -               set_session_id(cmd.ta_handle, 0, &arg->session);
56697 +       } else if (arg->ret == TEEC_SUCCESS) {
56698 +               ret = get_ta_refcount(load_cmd.ta_handle);
56699 +               if (!ret) {
56700 +                       arg->ret_origin = TEEC_ORIGIN_COMMS;
56701 +                       arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
56703 +                       /* Unload the TA on error */
56704 +                       unload_cmd.ta_handle = load_cmd.ta_handle;
56705 +                       psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA,
56706 +                                           (void *)&unload_cmd,
56707 +                                           sizeof(unload_cmd), &ret);
56708 +               } else {
56709 +                       set_session_id(load_cmd.ta_handle, 0, &arg->session);
56710 +               }
56711         }
56712 +       mutex_unlock(&ta_refcount_mutex);
56714         pr_debug("load TA: TA handle = 0x%x, RO = 0x%x, ret = 0x%x\n",
56715 -                cmd.ta_handle, arg->ret_origin, arg->ret);
56716 +                load_cmd.ta_handle, arg->ret_origin, arg->ret);
56718         return 0;
56720 diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
56721 index 8a6a8f30bb42..da6b88e80dc0 100644
56722 --- a/drivers/tee/amdtee/core.c
56723 +++ b/drivers/tee/amdtee/core.c
56724 @@ -59,10 +59,9 @@ static void release_session(struct amdtee_session *sess)
56725                         continue;
56727                 handle_close_session(sess->ta_handle, sess->session_info[i]);
56728 +               handle_unload_ta(sess->ta_handle);
56729         }
56731 -       /* Unload Trusted Application once all sessions are closed */
56732 -       handle_unload_ta(sess->ta_handle);
56733         kfree(sess);
56736 @@ -224,8 +223,6 @@ static void destroy_session(struct kref *ref)
56737         struct amdtee_session *sess = container_of(ref, struct amdtee_session,
56738                                                    refcount);
56740 -       /* Unload the TA from TEE */
56741 -       handle_unload_ta(sess->ta_handle);
56742         mutex_lock(&session_list_mutex);
56743         list_del(&sess->list_node);
56744         mutex_unlock(&session_list_mutex);
56745 @@ -238,7 +235,7 @@ int amdtee_open_session(struct tee_context *ctx,
56747         struct amdtee_context_data *ctxdata = ctx->data;
56748         struct amdtee_session *sess = NULL;
56749 -       u32 session_info;
56750 +       u32 session_info, ta_handle;
56751         size_t ta_size;
56752         int rc, i;
56753         void *ta;
56754 @@ -259,11 +256,14 @@ int amdtee_open_session(struct tee_context *ctx,
56755         if (arg->ret != TEEC_SUCCESS)
56756                 goto out;
56758 +       ta_handle = get_ta_handle(arg->session);
56760         mutex_lock(&session_list_mutex);
56761         sess = alloc_session(ctxdata, arg->session);
56762         mutex_unlock(&session_list_mutex);
56764         if (!sess) {
56765 +               handle_unload_ta(ta_handle);
56766                 rc = -ENOMEM;
56767                 goto out;
56768         }
56769 @@ -277,6 +277,7 @@ int amdtee_open_session(struct tee_context *ctx,
56771         if (i >= TEE_NUM_SESSIONS) {
56772                 pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
56773 +               handle_unload_ta(ta_handle);
56774                 kref_put(&sess->refcount, destroy_session);
56775                 rc = -ENOMEM;
56776                 goto out;
56777 @@ -289,12 +290,13 @@ int amdtee_open_session(struct tee_context *ctx,
56778                 spin_lock(&sess->lock);
56779                 clear_bit(i, sess->sess_mask);
56780                 spin_unlock(&sess->lock);
56781 +               handle_unload_ta(ta_handle);
56782                 kref_put(&sess->refcount, destroy_session);
56783                 goto out;
56784         }
56786         sess->session_info[i] = session_info;
56787 -       set_session_id(sess->ta_handle, i, &arg->session);
56788 +       set_session_id(ta_handle, i, &arg->session);
56789  out:
56790         free_pages((u64)ta, get_order(ta_size));
56791         return rc;
56792 @@ -329,6 +331,7 @@ int amdtee_close_session(struct tee_context *ctx, u32 session)
56794         /* Close the session */
56795         handle_close_session(ta_handle, session_info);
56796 +       handle_unload_ta(ta_handle);
56798         kref_put(&sess->refcount, destroy_session);
56800 diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
56801 index 319a1e701163..ddb8f9ecf307 100644
56802 --- a/drivers/tee/optee/core.c
56803 +++ b/drivers/tee/optee/core.c
56804 @@ -79,16 +79,6 @@ int optee_from_msg_param(struct tee_param *params, size_t num_params,
56805                                 return rc;
56806                         p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
56807                         p->u.memref.shm = shm;
56809 -                       /* Check that the memref is covered by the shm object */
56810 -                       if (p->u.memref.size) {
56811 -                               size_t o = p->u.memref.shm_offs +
56812 -                                          p->u.memref.size - 1;
56814 -                               rc = tee_shm_get_pa(shm, o, NULL);
56815 -                               if (rc)
56816 -                                       return rc;
56817 -                       }
56818                         break;
56819                 case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
56820                 case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
56821 diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
56822 index 10af3341e5ea..6956581ed7a4 100644
56823 --- a/drivers/thermal/cpufreq_cooling.c
56824 +++ b/drivers/thermal/cpufreq_cooling.c
56825 @@ -125,7 +125,7 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
56827         int i;
56829 -       for (i = cpufreq_cdev->max_level; i >= 0; i--) {
56830 +       for (i = cpufreq_cdev->max_level; i > 0; i--) {
56831                 if (power >= cpufreq_cdev->em->table[i].power)
56832                         break;
56833         }
56834 diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c
56835 index aaa07180ab48..645432ce6365 100644
56836 --- a/drivers/thermal/gov_fair_share.c
56837 +++ b/drivers/thermal/gov_fair_share.c
56838 @@ -82,6 +82,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
56839         int total_instance = 0;
56840         int cur_trip_level = get_trip_level(tz);
56842 +       mutex_lock(&tz->lock);
56844         list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
56845                 if (instance->trip != trip)
56846                         continue;
56847 @@ -110,6 +112,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
56848                 mutex_unlock(&instance->cdev->lock);
56849                 thermal_cdev_update(cdev);
56850         }
56852 +       mutex_unlock(&tz->lock);
56853         return 0;
56856 diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
56857 index d8ce3a687b80..3c4c0516e58a 100644
56858 --- a/drivers/thermal/qcom/tsens.c
56859 +++ b/drivers/thermal/qcom/tsens.c
56860 @@ -755,8 +755,10 @@ int __init init_common(struct tsens_priv *priv)
56861                 for (i = VER_MAJOR; i <= VER_STEP; i++) {
56862                         priv->rf[i] = devm_regmap_field_alloc(dev, priv->srot_map,
56863                                                               priv->fields[i]);
56864 -                       if (IS_ERR(priv->rf[i]))
56865 -                               return PTR_ERR(priv->rf[i]);
56866 +                       if (IS_ERR(priv->rf[i])) {
56867 +                               ret = PTR_ERR(priv->rf[i]);
56868 +                               goto err_put_device;
56869 +                       }
56870                 }
56871                 ret = regmap_field_read(priv->rf[VER_MINOR], &ver_minor);
56872                 if (ret)
56873 diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
56874 index 69ef12f852b7..5b76f9a1280d 100644
56875 --- a/drivers/thermal/thermal_of.c
56876 +++ b/drivers/thermal/thermal_of.c
56877 @@ -704,14 +704,17 @@ static int thermal_of_populate_bind_params(struct device_node *np,
56879         count = of_count_phandle_with_args(np, "cooling-device",
56880                                            "#cooling-cells");
56881 -       if (!count) {
56882 +       if (count <= 0) {
56883                 pr_err("Add a cooling_device property with at least one device\n");
56884 +               ret = -ENOENT;
56885                 goto end;
56886         }
56888         __tcbp = kcalloc(count, sizeof(*__tcbp), GFP_KERNEL);
56889 -       if (!__tcbp)
56890 +       if (!__tcbp) {
56891 +               ret = -ENOMEM;
56892                 goto end;
56893 +       }
56895         for (i = 0; i < count; i++) {
56896                 ret = of_parse_phandle_with_args(np, "cooling-device",
56897 diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
56898 index 18b78ea110ef..ecda5e18d23f 100644
56899 --- a/drivers/tty/amiserial.c
56900 +++ b/drivers/tty/amiserial.c
56901 @@ -970,6 +970,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
56902         if (!serial_isroot()) {
56903                 if ((ss->baud_base != state->baud_base) ||
56904                     (ss->close_delay != port->close_delay) ||
56905 +                   (ss->closing_wait != port->closing_wait) ||
56906                     (ss->xmit_fifo_size != state->xmit_fifo_size) ||
56907                     ((ss->flags & ~ASYNC_USR_MASK) !=
56908                      (port->flags & ~ASYNC_USR_MASK))) {
56909 diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
56910 index 9f13f7d49dd7..f9f14104bd2c 100644
56911 --- a/drivers/tty/moxa.c
56912 +++ b/drivers/tty/moxa.c
56913 @@ -2040,7 +2040,7 @@ static int moxa_get_serial_info(struct tty_struct *tty,
56914         ss->line = info->port.tty->index,
56915         ss->flags = info->port.flags,
56916         ss->baud_base = 921600,
56917 -       ss->close_delay = info->port.close_delay;
56918 +       ss->close_delay = jiffies_to_msecs(info->port.close_delay) / 10;
56919         mutex_unlock(&info->port.mutex);
56920         return 0;
56922 @@ -2050,6 +2050,7 @@ static int moxa_set_serial_info(struct tty_struct *tty,
56923                 struct serial_struct *ss)
56925         struct moxa_port *info = tty->driver_data;
56926 +       unsigned int close_delay;
56928         if (tty->index == MAX_PORTS)
56929                 return -EINVAL;
56930 @@ -2061,19 +2062,24 @@ static int moxa_set_serial_info(struct tty_struct *tty,
56931                         ss->baud_base != 921600)
56932                 return -EPERM;
56934 +       close_delay = msecs_to_jiffies(ss->close_delay * 10);
56936         mutex_lock(&info->port.mutex);
56937         if (!capable(CAP_SYS_ADMIN)) {
56938 -               if (((ss->flags & ~ASYNC_USR_MASK) !=
56939 +               if (close_delay != info->port.close_delay ||
56940 +                   ss->type != info->type ||
56941 +                   ((ss->flags & ~ASYNC_USR_MASK) !=
56942                      (info->port.flags & ~ASYNC_USR_MASK))) {
56943                         mutex_unlock(&info->port.mutex);
56944                         return -EPERM;
56945                 }
56946 -       }
56947 -       info->port.close_delay = ss->close_delay * HZ / 100;
56948 +       } else {
56949 +               info->port.close_delay = close_delay;
56951 -       MoxaSetFifo(info, ss->type == PORT_16550A);
56952 +               MoxaSetFifo(info, ss->type == PORT_16550A);
56954 -       info->type = ss->type;
56955 +               info->type = ss->type;
56956 +       }
56957         mutex_unlock(&info->port.mutex);
56958         return 0;
56960 diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
56961 index 4203b64bccdb..2d8e76263a25 100644
56962 --- a/drivers/tty/mxser.c
56963 +++ b/drivers/tty/mxser.c
56964 @@ -1208,19 +1208,26 @@ static int mxser_get_serial_info(struct tty_struct *tty,
56966         struct mxser_port *info = tty->driver_data;
56967         struct tty_port *port = &info->port;
56968 +       unsigned int closing_wait, close_delay;
56970         if (tty->index == MXSER_PORTS)
56971                 return -ENOTTY;
56973         mutex_lock(&port->mutex);
56975 +       close_delay = jiffies_to_msecs(info->port.close_delay) / 10;
56976 +       closing_wait = info->port.closing_wait;
56977 +       if (closing_wait != ASYNC_CLOSING_WAIT_NONE)
56978 +               closing_wait = jiffies_to_msecs(closing_wait) / 10;
56980         ss->type = info->type,
56981         ss->line = tty->index,
56982         ss->port = info->ioaddr,
56983         ss->irq = info->board->irq,
56984         ss->flags = info->port.flags,
56985         ss->baud_base = info->baud_base,
56986 -       ss->close_delay = info->port.close_delay,
56987 -       ss->closing_wait = info->port.closing_wait,
56988 +       ss->close_delay = close_delay;
56989 +       ss->closing_wait = closing_wait;
56990         ss->custom_divisor = info->custom_divisor,
56991         mutex_unlock(&port->mutex);
56992         return 0;
56993 @@ -1233,7 +1240,7 @@ static int mxser_set_serial_info(struct tty_struct *tty,
56994         struct tty_port *port = &info->port;
56995         speed_t baud;
56996         unsigned long sl_flags;
56997 -       unsigned int flags;
56998 +       unsigned int flags, close_delay, closing_wait;
56999         int retval = 0;
57001         if (tty->index == MXSER_PORTS)
57002 @@ -1255,9 +1262,15 @@ static int mxser_set_serial_info(struct tty_struct *tty,
57004         flags = port->flags & ASYNC_SPD_MASK;
57006 +       close_delay = msecs_to_jiffies(ss->close_delay * 10);
57007 +       closing_wait = ss->closing_wait;
57008 +       if (closing_wait != ASYNC_CLOSING_WAIT_NONE)
57009 +               closing_wait = msecs_to_jiffies(closing_wait * 10);
57011         if (!capable(CAP_SYS_ADMIN)) {
57012                 if ((ss->baud_base != info->baud_base) ||
57013 -                               (ss->close_delay != info->port.close_delay) ||
57014 +                               (close_delay != info->port.close_delay) ||
57015 +                               (closing_wait != info->port.closing_wait) ||
57016                                 ((ss->flags & ~ASYNC_USR_MASK) != (info->port.flags & ~ASYNC_USR_MASK))) {
57017                         mutex_unlock(&port->mutex);
57018                         return -EPERM;
57019 @@ -1271,8 +1284,8 @@ static int mxser_set_serial_info(struct tty_struct *tty,
57020                  */
57021                 port->flags = ((port->flags & ~ASYNC_FLAGS) |
57022                                 (ss->flags & ASYNC_FLAGS));
57023 -               port->close_delay = ss->close_delay * HZ / 100;
57024 -               port->closing_wait = ss->closing_wait * HZ / 100;
57025 +               port->close_delay = close_delay;
57026 +               port->closing_wait = closing_wait;
57027                 if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST &&
57028                                 (ss->baud_base != info->baud_base ||
57029                                 ss->custom_divisor !=
57030 @@ -1284,11 +1297,11 @@ static int mxser_set_serial_info(struct tty_struct *tty,
57031                         baud = ss->baud_base / ss->custom_divisor;
57032                         tty_encode_baud_rate(tty, baud, baud);
57033                 }
57034 -       }
57036 -       info->type = ss->type;
57037 +               info->type = ss->type;
57039 -       process_txrx_fifo(info);
57040 +               process_txrx_fifo(info);
57041 +       }
57043         if (tty_port_initialized(port)) {
57044                 if (flags != (port->flags & ASYNC_SPD_MASK)) {
57045 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
57046 index 51dafc06f541..2406653d38b7 100644
57047 --- a/drivers/tty/n_gsm.c
57048 +++ b/drivers/tty/n_gsm.c
57049 @@ -2384,8 +2384,18 @@ static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
57050                 /* Don't register device 0 - this is the control channel and not
57051                    a usable tty interface */
57052                 base = mux_num_to_base(gsm); /* Base for this MUX */
57053 -               for (i = 1; i < NUM_DLCI; i++)
57054 -                       tty_register_device(gsm_tty_driver, base + i, NULL);
57055 +               for (i = 1; i < NUM_DLCI; i++) {
57056 +                       struct device *dev;
57058 +                       dev = tty_register_device(gsm_tty_driver,
57059 +                                                       base + i, NULL);
57060 +                       if (IS_ERR(dev)) {
57061 +                               for (i--; i >= 1; i--)
57062 +                                       tty_unregister_device(gsm_tty_driver,
57063 +                                                               base + i);
57064 +                               return PTR_ERR(dev);
57065 +                       }
57066 +               }
57067         }
57068         return ret;
57070 diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c
57071 index 64842f3539e1..0b06770642cb 100644
57072 --- a/drivers/tty/serial/liteuart.c
57073 +++ b/drivers/tty/serial/liteuart.c
57074 @@ -270,8 +270,8 @@ static int liteuart_probe(struct platform_device *pdev)
57076         /* get membase */
57077         port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
57078 -       if (!port->membase)
57079 -               return -ENXIO;
57080 +       if (IS_ERR(port->membase))
57081 +               return PTR_ERR(port->membase);
57083         /* values not from device tree */
57084         port->dev = &pdev->dev;
57085 diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
57086 index e0c00a1b0763..51b0ecabf2ec 100644
57087 --- a/drivers/tty/serial/mvebu-uart.c
57088 +++ b/drivers/tty/serial/mvebu-uart.c
57089 @@ -818,9 +818,6 @@ static int mvebu_uart_probe(struct platform_device *pdev)
57090                 return -EINVAL;
57091         }
57093 -       if (!match)
57094 -               return -ENODEV;
57096         /* Assume that all UART ports have a DT alias or none has */
57097         id = of_alias_get_id(pdev->dev.of_node, "serial");
57098         if (!pdev->dev.of_node || id < 0)
57099 diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
57100 index 76b94d0ff586..84e8158088cd 100644
57101 --- a/drivers/tty/serial/omap-serial.c
57102 +++ b/drivers/tty/serial/omap-serial.c
57103 @@ -159,6 +159,8 @@ struct uart_omap_port {
57104         u32                     calc_latency;
57105         struct work_struct      qos_work;
57106         bool                    is_suspending;
57108 +       unsigned int            rs485_tx_filter_count;
57109  };
57111  #define to_uart_omap_port(p) ((container_of((p), struct uart_omap_port, port)))
57112 @@ -302,7 +304,8 @@ static void serial_omap_stop_tx(struct uart_port *port)
57113                         serial_out(up, UART_OMAP_SCR, up->scr);
57114                         res = (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) ?
57115                                 1 : 0;
57116 -                       if (gpiod_get_value(up->rts_gpiod) != res) {
57117 +                       if (up->rts_gpiod &&
57118 +                           gpiod_get_value(up->rts_gpiod) != res) {
57119                                 if (port->rs485.delay_rts_after_send > 0)
57120                                         mdelay(
57121                                         port->rs485.delay_rts_after_send);
57122 @@ -328,19 +331,6 @@ static void serial_omap_stop_tx(struct uart_port *port)
57123                 serial_out(up, UART_IER, up->ier);
57124         }
57126 -       if ((port->rs485.flags & SER_RS485_ENABLED) &&
57127 -           !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
57128 -               /*
57129 -                * Empty the RX FIFO, we are not interested in anything
57130 -                * received during the half-duplex transmission.
57131 -                */
57132 -               serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_RCVR);
57133 -               /* Re-enable RX interrupts */
57134 -               up->ier |= UART_IER_RLSI | UART_IER_RDI;
57135 -               up->port.read_status_mask |= UART_LSR_DR;
57136 -               serial_out(up, UART_IER, up->ier);
57137 -       }
57139         pm_runtime_mark_last_busy(up->dev);
57140         pm_runtime_put_autosuspend(up->dev);
57142 @@ -366,6 +356,10 @@ static void transmit_chars(struct uart_omap_port *up, unsigned int lsr)
57143                 serial_out(up, UART_TX, up->port.x_char);
57144                 up->port.icount.tx++;
57145                 up->port.x_char = 0;
57146 +               if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
57147 +                   !(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
57148 +                       up->rs485_tx_filter_count++;
57150                 return;
57151         }
57152         if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
57153 @@ -377,6 +371,10 @@ static void transmit_chars(struct uart_omap_port *up, unsigned int lsr)
57154                 serial_out(up, UART_TX, xmit->buf[xmit->tail]);
57155                 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
57156                 up->port.icount.tx++;
57157 +               if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
57158 +                   !(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
57159 +                       up->rs485_tx_filter_count++;
57161                 if (uart_circ_empty(xmit))
57162                         break;
57163         } while (--count > 0);
57164 @@ -411,7 +409,7 @@ static void serial_omap_start_tx(struct uart_port *port)
57166                 /* if rts not already enabled */
57167                 res = (port->rs485.flags & SER_RS485_RTS_ON_SEND) ? 1 : 0;
57168 -               if (gpiod_get_value(up->rts_gpiod) != res) {
57169 +               if (up->rts_gpiod && gpiod_get_value(up->rts_gpiod) != res) {
57170                         gpiod_set_value(up->rts_gpiod, res);
57171                         if (port->rs485.delay_rts_before_send > 0)
57172                                 mdelay(port->rs485.delay_rts_before_send);
57173 @@ -420,7 +418,7 @@ static void serial_omap_start_tx(struct uart_port *port)
57175         if ((port->rs485.flags & SER_RS485_ENABLED) &&
57176             !(port->rs485.flags & SER_RS485_RX_DURING_TX))
57177 -               serial_omap_stop_rx(port);
57178 +               up->rs485_tx_filter_count = 0;
57180         serial_omap_enable_ier_thri(up);
57181         pm_runtime_mark_last_busy(up->dev);
57182 @@ -491,8 +489,13 @@ static void serial_omap_rlsi(struct uart_omap_port *up, unsigned int lsr)
57183          * Read one data character out to avoid stalling the receiver according
57184          * to the table 23-246 of the omap4 TRM.
57185          */
57186 -       if (likely(lsr & UART_LSR_DR))
57187 +       if (likely(lsr & UART_LSR_DR)) {
57188                 serial_in(up, UART_RX);
57189 +               if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
57190 +                   !(up->port.rs485.flags & SER_RS485_RX_DURING_TX) &&
57191 +                   up->rs485_tx_filter_count)
57192 +                       up->rs485_tx_filter_count--;
57193 +       }
57195         up->port.icount.rx++;
57196         flag = TTY_NORMAL;
57197 @@ -543,6 +546,13 @@ static void serial_omap_rdi(struct uart_omap_port *up, unsigned int lsr)
57198                 return;
57200         ch = serial_in(up, UART_RX);
57201 +       if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
57202 +           !(up->port.rs485.flags & SER_RS485_RX_DURING_TX) &&
57203 +           up->rs485_tx_filter_count) {
57204 +               up->rs485_tx_filter_count--;
57205 +               return;
57206 +       }
57208         flag = TTY_NORMAL;
57209         up->port.icount.rx++;
57211 @@ -1407,18 +1417,13 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
57212         /* store new config */
57213         port->rs485 = *rs485;
57215 -       /*
57216 -        * Just as a precaution, only allow rs485
57217 -        * to be enabled if the gpio pin is valid
57218 -        */
57219         if (up->rts_gpiod) {
57220                 /* enable / disable rts */
57221                 val = (port->rs485.flags & SER_RS485_ENABLED) ?
57222                         SER_RS485_RTS_AFTER_SEND : SER_RS485_RTS_ON_SEND;
57223                 val = (port->rs485.flags & val) ? 1 : 0;
57224                 gpiod_set_value(up->rts_gpiod, val);
57225 -       } else
57226 -               port->rs485.flags &= ~SER_RS485_ENABLED;
57227 +       }
57229         /* Enable interrupts */
57230         up->ier = mode;
57231 diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
57232 index f86ec2d2635b..9adb8362578c 100644
57233 --- a/drivers/tty/serial/sc16is7xx.c
57234 +++ b/drivers/tty/serial/sc16is7xx.c
57235 @@ -1196,7 +1196,7 @@ static int sc16is7xx_probe(struct device *dev,
57236         ret = regmap_read(regmap,
57237                           SC16IS7XX_LSR_REG << SC16IS7XX_REG_SHIFT, &val);
57238         if (ret < 0)
57239 -               return ret;
57240 +               return -EPROBE_DEFER;
57242         /* Alloc port structure */
57243         s = devm_kzalloc(dev, struct_size(s, p, devtype->nr_uart), GFP_KERNEL);
57244 diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
57245 index ba31e97d3d96..43f02ed055d5 100644
57246 --- a/drivers/tty/serial/serial_core.c
57247 +++ b/drivers/tty/serial/serial_core.c
57248 @@ -1305,7 +1305,7 @@ static int uart_set_rs485_config(struct uart_port *port,
57249         unsigned long flags;
57251         if (!port->rs485_config)
57252 -               return -ENOIOCTLCMD;
57253 +               return -ENOTTY;
57255         if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user)))
57256                 return -EFAULT;
57257 @@ -1329,7 +1329,7 @@ static int uart_get_iso7816_config(struct uart_port *port,
57258         struct serial_iso7816 aux;
57260         if (!port->iso7816_config)
57261 -               return -ENOIOCTLCMD;
57262 +               return -ENOTTY;
57264         spin_lock_irqsave(&port->lock, flags);
57265         aux = port->iso7816;
57266 @@ -1349,7 +1349,7 @@ static int uart_set_iso7816_config(struct uart_port *port,
57267         unsigned long flags;
57269         if (!port->iso7816_config)
57270 -               return -ENOIOCTLCMD;
57271 +               return -ENOTTY;
57273         if (copy_from_user(&iso7816, iso7816_user, sizeof(*iso7816_user)))
57274                 return -EFAULT;
57275 diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
57276 index b3675cf25a69..99dfa884cbef 100644
57277 --- a/drivers/tty/serial/stm32-usart.c
57278 +++ b/drivers/tty/serial/stm32-usart.c
57279 @@ -214,12 +214,14 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
57280         struct tty_port *tport = &port->state->port;
57281         struct stm32_port *stm32_port = to_stm32_port(port);
57282         const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
57283 -       unsigned long c;
57284 +       unsigned long c, flags;
57285         u32 sr;
57286         char flag;
57288 -       if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
57289 -               pm_wakeup_event(tport->tty->dev, 0);
57290 +       if (threaded)
57291 +               spin_lock_irqsave(&port->lock, flags);
57292 +       else
57293 +               spin_lock(&port->lock);
57295         while (stm32_usart_pending_rx(port, &sr, &stm32_port->last_res,
57296                                       threaded)) {
57297 @@ -276,9 +278,12 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
57298                 uart_insert_char(port, sr, USART_SR_ORE, c, flag);
57299         }
57301 -       spin_unlock(&port->lock);
57302 +       if (threaded)
57303 +               spin_unlock_irqrestore(&port->lock, flags);
57304 +       else
57305 +               spin_unlock(&port->lock);
57307         tty_flip_buffer_push(tport);
57308 -       spin_lock(&port->lock);
57311  static void stm32_usart_tx_dma_complete(void *arg)
57312 @@ -286,12 +291,16 @@ static void stm32_usart_tx_dma_complete(void *arg)
57313         struct uart_port *port = arg;
57314         struct stm32_port *stm32port = to_stm32_port(port);
57315         const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
57316 +       unsigned long flags;
57318 +       dmaengine_terminate_async(stm32port->tx_ch);
57319         stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
57320         stm32port->tx_dma_busy = false;
57322         /* Let's see if we have pending data to send */
57323 +       spin_lock_irqsave(&port->lock, flags);
57324         stm32_usart_transmit_chars(port);
57325 +       spin_unlock_irqrestore(&port->lock, flags);
57328  static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
57329 @@ -455,29 +464,34 @@ static void stm32_usart_transmit_chars(struct uart_port *port)
57330  static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
57332         struct uart_port *port = ptr;
57333 +       struct tty_port *tport = &port->state->port;
57334         struct stm32_port *stm32_port = to_stm32_port(port);
57335         const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
57336         u32 sr;
57338 -       spin_lock(&port->lock);
57340         sr = readl_relaxed(port->membase + ofs->isr);
57342         if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
57343                 writel_relaxed(USART_ICR_RTOCF,
57344                                port->membase + ofs->icr);
57346 -       if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG)
57347 +       if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
57348 +               /* Clear wake up flag and disable wake up interrupt */
57349                 writel_relaxed(USART_ICR_WUCF,
57350                                port->membase + ofs->icr);
57351 +               stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
57352 +               if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
57353 +                       pm_wakeup_event(tport->tty->dev, 0);
57354 +       }
57356         if ((sr & USART_SR_RXNE) && !(stm32_port->rx_ch))
57357                 stm32_usart_receive_chars(port, false);
57359 -       if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch))
57360 +       if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
57361 +               spin_lock(&port->lock);
57362                 stm32_usart_transmit_chars(port);
57364 -       spin_unlock(&port->lock);
57365 +               spin_unlock(&port->lock);
57366 +       }
57368         if (stm32_port->rx_ch)
57369                 return IRQ_WAKE_THREAD;
57370 @@ -490,13 +504,9 @@ static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr)
57371         struct uart_port *port = ptr;
57372         struct stm32_port *stm32_port = to_stm32_port(port);
57374 -       spin_lock(&port->lock);
57376         if (stm32_port->rx_ch)
57377                 stm32_usart_receive_chars(port, true);
57379 -       spin_unlock(&port->lock);
57381         return IRQ_HANDLED;
57384 @@ -505,7 +515,10 @@ static unsigned int stm32_usart_tx_empty(struct uart_port *port)
57385         struct stm32_port *stm32_port = to_stm32_port(port);
57386         const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
57388 -       return readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE;
57389 +       if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
57390 +               return TIOCSER_TEMT;
57392 +       return 0;
57395  static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
57396 @@ -634,6 +647,7 @@ static int stm32_usart_startup(struct uart_port *port)
57398         struct stm32_port *stm32_port = to_stm32_port(port);
57399         const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
57400 +       const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
57401         const char *name = to_platform_device(port->dev)->name;
57402         u32 val;
57403         int ret;
57404 @@ -646,21 +660,10 @@ static int stm32_usart_startup(struct uart_port *port)
57406         /* RX FIFO Flush */
57407         if (ofs->rqr != UNDEF_REG)
57408 -               stm32_usart_set_bits(port, ofs->rqr, USART_RQR_RXFRQ);
57410 -       /* Tx and RX FIFO configuration */
57411 -       if (stm32_port->fifoen) {
57412 -               val = readl_relaxed(port->membase + ofs->cr3);
57413 -               val &= ~(USART_CR3_TXFTCFG_MASK | USART_CR3_RXFTCFG_MASK);
57414 -               val |= USART_CR3_TXFTCFG_HALF << USART_CR3_TXFTCFG_SHIFT;
57415 -               val |= USART_CR3_RXFTCFG_HALF << USART_CR3_RXFTCFG_SHIFT;
57416 -               writel_relaxed(val, port->membase + ofs->cr3);
57417 -       }
57418 +               writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr);
57420 -       /* RX FIFO enabling */
57421 -       val = stm32_port->cr1_irq | USART_CR1_RE;
57422 -       if (stm32_port->fifoen)
57423 -               val |= USART_CR1_FIFOEN;
57424 +       /* RX enabling */
57425 +       val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit);
57426         stm32_usart_set_bits(port, ofs->cr1, val);
57428         return 0;
57429 @@ -691,6 +694,11 @@ static void stm32_usart_shutdown(struct uart_port *port)
57430         if (ret)
57431                 dev_err(port->dev, "Transmission is not complete\n");
57433 +       /* flush RX & TX FIFO */
57434 +       if (ofs->rqr != UNDEF_REG)
57435 +               writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
57436 +                              port->membase + ofs->rqr);
57438         stm32_usart_clr_bits(port, ofs->cr1, val);
57440         free_irq(port->irq, port);
57441 @@ -737,8 +745,9 @@ static void stm32_usart_set_termios(struct uart_port *port,
57442         unsigned int baud, bits;
57443         u32 usartdiv, mantissa, fraction, oversampling;
57444         tcflag_t cflag = termios->c_cflag;
57445 -       u32 cr1, cr2, cr3;
57446 +       u32 cr1, cr2, cr3, isr;
57447         unsigned long flags;
57448 +       int ret;
57450         if (!stm32_port->hw_flow_control)
57451                 cflag &= ~CRTSCTS;
57452 @@ -747,21 +756,36 @@ static void stm32_usart_set_termios(struct uart_port *port,
57454         spin_lock_irqsave(&port->lock, flags);
57456 +       ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
57457 +                                               isr,
57458 +                                               (isr & USART_SR_TC),
57459 +                                               10, 100000);
57461 +       /* Send the TC error message only when ISR_TC is not set. */
57462 +       if (ret)
57463 +               dev_err(port->dev, "Transmission is not complete\n");
57465         /* Stop serial port and reset value */
57466         writel_relaxed(0, port->membase + ofs->cr1);
57468         /* flush RX & TX FIFO */
57469         if (ofs->rqr != UNDEF_REG)
57470 -               stm32_usart_set_bits(port, ofs->rqr,
57471 -                                    USART_RQR_TXFRQ | USART_RQR_RXFRQ);
57472 +               writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
57473 +                              port->membase + ofs->rqr);
57475         cr1 = USART_CR1_TE | USART_CR1_RE;
57476         if (stm32_port->fifoen)
57477                 cr1 |= USART_CR1_FIFOEN;
57478         cr2 = 0;
57480 +       /* Tx and RX FIFO configuration */
57481         cr3 = readl_relaxed(port->membase + ofs->cr3);
57482 -       cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTCFG_MASK | USART_CR3_RXFTIE
57483 -               | USART_CR3_TXFTCFG_MASK;
57484 +       cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE;
57485 +       if (stm32_port->fifoen) {
57486 +               cr3 &= ~(USART_CR3_TXFTCFG_MASK | USART_CR3_RXFTCFG_MASK);
57487 +               cr3 |= USART_CR3_TXFTCFG_HALF << USART_CR3_TXFTCFG_SHIFT;
57488 +               cr3 |= USART_CR3_RXFTCFG_HALF << USART_CR3_RXFTCFG_SHIFT;
57489 +       }
57491         if (cflag & CSTOPB)
57492                 cr2 |= USART_CR2_STOP_2B;
57493 @@ -817,12 +841,6 @@ static void stm32_usart_set_termios(struct uart_port *port,
57494                 cr3 |= USART_CR3_CTSE | USART_CR3_RTSE;
57495         }
57497 -       /* Handle modem control interrupts */
57498 -       if (UART_ENABLE_MS(port, termios->c_cflag))
57499 -               stm32_usart_enable_ms(port);
57500 -       else
57501 -               stm32_usart_disable_ms(port);
57503         usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
57505         /*
57506 @@ -892,12 +910,24 @@ static void stm32_usart_set_termios(struct uart_port *port,
57507                 cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
57508         }
57510 +       /* Configure wake up from low power on start bit detection */
57511 +       if (stm32_port->wakeirq > 0) {
57512 +               cr3 &= ~USART_CR3_WUS_MASK;
57513 +               cr3 |= USART_CR3_WUS_START_BIT;
57514 +       }
57516         writel_relaxed(cr3, port->membase + ofs->cr3);
57517         writel_relaxed(cr2, port->membase + ofs->cr2);
57518         writel_relaxed(cr1, port->membase + ofs->cr1);
57520         stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
57521         spin_unlock_irqrestore(&port->lock, flags);
57523 +       /* Handle modem control interrupts */
57524 +       if (UART_ENABLE_MS(port, termios->c_cflag))
57525 +               stm32_usart_enable_ms(port);
57526 +       else
57527 +               stm32_usart_disable_ms(port);
57530  static const char *stm32_usart_type(struct uart_port *port)
57531 @@ -1252,10 +1282,6 @@ static int stm32_usart_serial_probe(struct platform_device *pdev)
57532                 device_set_wakeup_enable(&pdev->dev, false);
57533         }
57535 -       ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
57536 -       if (ret)
57537 -               goto err_wirq;
57539         ret = stm32_usart_of_dma_rx_probe(stm32port, pdev);
57540         if (ret)
57541                 dev_info(&pdev->dev, "interrupt mode used for rx (no dma)\n");
57542 @@ -1269,11 +1295,40 @@ static int stm32_usart_serial_probe(struct platform_device *pdev)
57543         pm_runtime_get_noresume(&pdev->dev);
57544         pm_runtime_set_active(&pdev->dev);
57545         pm_runtime_enable(&pdev->dev);
57547 +       ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
57548 +       if (ret)
57549 +               goto err_port;
57551         pm_runtime_put_sync(&pdev->dev);
57553         return 0;
57555 -err_wirq:
57556 +err_port:
57557 +       pm_runtime_disable(&pdev->dev);
57558 +       pm_runtime_set_suspended(&pdev->dev);
57559 +       pm_runtime_put_noidle(&pdev->dev);
57561 +       if (stm32port->rx_ch) {
57562 +               dmaengine_terminate_async(stm32port->rx_ch);
57563 +               dma_release_channel(stm32port->rx_ch);
57564 +       }
57566 +       if (stm32port->rx_dma_buf)
57567 +               dma_free_coherent(&pdev->dev,
57568 +                                 RX_BUF_L, stm32port->rx_buf,
57569 +                                 stm32port->rx_dma_buf);
57571 +       if (stm32port->tx_ch) {
57572 +               dmaengine_terminate_async(stm32port->tx_ch);
57573 +               dma_release_channel(stm32port->tx_ch);
57574 +       }
57576 +       if (stm32port->tx_dma_buf)
57577 +               dma_free_coherent(&pdev->dev,
57578 +                                 TX_BUF_L, stm32port->tx_buf,
57579 +                                 stm32port->tx_dma_buf);
57581         if (stm32port->wakeirq > 0)
57582                 dev_pm_clear_wake_irq(&pdev->dev);
57584 @@ -1295,11 +1350,20 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
57585         int err;
57587         pm_runtime_get_sync(&pdev->dev);
57588 +       err = uart_remove_one_port(&stm32_usart_driver, port);
57589 +       if (err)
57590 +               return(err);
57592 +       pm_runtime_disable(&pdev->dev);
57593 +       pm_runtime_set_suspended(&pdev->dev);
57594 +       pm_runtime_put_noidle(&pdev->dev);
57596         stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
57598 -       if (stm32_port->rx_ch)
57599 +       if (stm32_port->rx_ch) {
57600 +               dmaengine_terminate_async(stm32_port->rx_ch);
57601                 dma_release_channel(stm32_port->rx_ch);
57602 +       }
57604         if (stm32_port->rx_dma_buf)
57605                 dma_free_coherent(&pdev->dev,
57606 @@ -1308,8 +1372,10 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
57608         stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
57610 -       if (stm32_port->tx_ch)
57611 +       if (stm32_port->tx_ch) {
57612 +               dmaengine_terminate_async(stm32_port->tx_ch);
57613                 dma_release_channel(stm32_port->tx_ch);
57614 +       }
57616         if (stm32_port->tx_dma_buf)
57617                 dma_free_coherent(&pdev->dev,
57618 @@ -1323,12 +1389,7 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
57620         stm32_usart_deinit_port(stm32_port);
57622 -       err = uart_remove_one_port(&stm32_usart_driver, port);
57624 -       pm_runtime_disable(&pdev->dev);
57625 -       pm_runtime_put_noidle(&pdev->dev);
57627 -       return err;
57628 +       return 0;
57631  #ifdef CONFIG_SERIAL_STM32_CONSOLE
57632 @@ -1436,23 +1497,20 @@ static void __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
57634         struct stm32_port *stm32_port = to_stm32_port(port);
57635         const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
57636 -       const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
57637 -       u32 val;
57639         if (stm32_port->wakeirq <= 0)
57640                 return;
57642 +       /*
57643 +        * Enable low-power wake-up and wake-up irq if argument is set to
57644 +        * "enable", disable low-power wake-up and wake-up irq otherwise
57645 +        */
57646         if (enable) {
57647 -               stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
57648                 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM);
57649 -               val = readl_relaxed(port->membase + ofs->cr3);
57650 -               val &= ~USART_CR3_WUS_MASK;
57651 -               /* Enable Wake up interrupt from low power on start bit */
57652 -               val |= USART_CR3_WUS_START_BIT | USART_CR3_WUFIE;
57653 -               writel_relaxed(val, port->membase + ofs->cr3);
57654 -               stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
57655 +               stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE);
57656         } else {
57657                 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM);
57658 +               stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
57659         }
57662 diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
57663 index cb4f327c46db..94b568aa46bb 100644
57664 --- a/drivers/tty/serial/stm32-usart.h
57665 +++ b/drivers/tty/serial/stm32-usart.h
57666 @@ -127,9 +127,6 @@ struct stm32_usart_info stm32h7_info = {
57667  /* Dummy bits */
57668  #define USART_SR_DUMMY_RX      BIT(16)
57670 -/* USART_ICR (F7) */
57671 -#define USART_CR_TC            BIT(6)
57673  /* USART_DR */
57674  #define USART_DR_MASK          GENMASK(8, 0)
57676 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
57677 index 391bada4cedb..adbcbfa11b29 100644
57678 --- a/drivers/tty/tty_io.c
57679 +++ b/drivers/tty/tty_io.c
57680 @@ -2530,14 +2530,14 @@ static int send_break(struct tty_struct *tty, unsigned int duration)
57681   *     @p: pointer to result
57682   *
57683   *     Obtain the modem status bits from the tty driver if the feature
57684 - *     is supported. Return -EINVAL if it is not available.
57685 + *     is supported. Return -ENOTTY if it is not available.
57686   *
57687   *     Locking: none (up to the driver)
57688   */
57690  static int tty_tiocmget(struct tty_struct *tty, int __user *p)
57692 -       int retval = -EINVAL;
57693 +       int retval = -ENOTTY;
57695         if (tty->ops->tiocmget) {
57696                 retval = tty->ops->tiocmget(tty);
57697 @@ -2555,7 +2555,7 @@ static int tty_tiocmget(struct tty_struct *tty, int __user *p)
57698   *     @p: pointer to desired bits
57699   *
57700   *     Set the modem status bits from the tty driver if the feature
57701 - *     is supported. Return -EINVAL if it is not available.
57702 + *     is supported. Return -ENOTTY if it is not available.
57703   *
57704   *     Locking: none (up to the driver)
57705   */
57706 @@ -2567,7 +2567,7 @@ static int tty_tiocmset(struct tty_struct *tty, unsigned int cmd,
57707         unsigned int set, clear, val;
57709         if (tty->ops->tiocmset == NULL)
57710 -               return -EINVAL;
57711 +               return -ENOTTY;
57713         retval = get_user(val, p);
57714         if (retval)
57715 @@ -2607,7 +2607,7 @@ int tty_get_icount(struct tty_struct *tty,
57716         if (tty->ops->get_icount)
57717                 return tty->ops->get_icount(tty, icount);
57718         else
57719 -               return -EINVAL;
57720 +               return -ENOTTY;
57722  EXPORT_SYMBOL_GPL(tty_get_icount);
57724 diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
57725 index 4de1c6ddb8ff..803da2d111c8 100644
57726 --- a/drivers/tty/tty_ioctl.c
57727 +++ b/drivers/tty/tty_ioctl.c
57728 @@ -774,8 +774,8 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
57729         case TCSETX:
57730         case TCSETXW:
57731         case TCSETXF:
57732 -               return -EINVAL;
57733 -#endif         
57734 +               return -ENOTTY;
57735 +#endif
57736         case TIOCGSOFTCAR:
57737                 copy_termios(real_tty, &kterm);
57738                 ret = put_user((kterm.c_cflag & CLOCAL) ? 1 : 0,
57739 diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
57740 index 284b07224c55..53cbf2c3f033 100644
57741 --- a/drivers/tty/vt/vt.c
57742 +++ b/drivers/tty/vt/vt.c
57743 @@ -1171,7 +1171,7 @@ static inline int resize_screen(struct vc_data *vc, int width, int height,
57744         /* Resizes the resolution of the display adapater */
57745         int err = 0;
57747 -       if (vc->vc_mode != KD_GRAPHICS && vc->vc_sw->con_resize)
57748 +       if (vc->vc_sw->con_resize)
57749                 err = vc->vc_sw->con_resize(vc, width, height, user);
57751         return err;
57752 @@ -1381,6 +1381,7 @@ struct vc_data *vc_deallocate(unsigned int currcons)
57753                 atomic_notifier_call_chain(&vt_notifier_list, VT_DEALLOCATE, &param);
57754                 vcs_remove_sysfs(currcons);
57755                 visual_deinit(vc);
57756 +               con_free_unimap(vc);
57757                 put_pid(vc->vt_pid);
57758                 vc_uniscr_set(vc, NULL);
57759                 kfree(vc->vc_screenbuf);
57760 diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
57761 index 89aeaf3c1bca..0e0cd9e9e589 100644
57762 --- a/drivers/tty/vt/vt_ioctl.c
57763 +++ b/drivers/tty/vt/vt_ioctl.c
57764 @@ -671,21 +671,58 @@ static int vt_resizex(struct vc_data *vc, struct vt_consize __user *cs)
57765         if (copy_from_user(&v, cs, sizeof(struct vt_consize)))
57766                 return -EFAULT;
57768 -       if (v.v_vlin)
57769 -               pr_info_once("\"struct vt_consize\"->v_vlin is ignored. Please report if you need this.\n");
57770 -       if (v.v_clin)
57771 -               pr_info_once("\"struct vt_consize\"->v_clin is ignored. Please report if you need this.\n");
57772 +       /* FIXME: Should check the copies properly */
57773 +       if (!v.v_vlin)
57774 +               v.v_vlin = vc->vc_scan_lines;
57776 +       if (v.v_clin) {
57777 +               int rows = v.v_vlin / v.v_clin;
57778 +               if (v.v_rows != rows) {
57779 +                       if (v.v_rows) /* Parameters don't add up */
57780 +                               return -EINVAL;
57781 +                       v.v_rows = rows;
57782 +               }
57783 +       }
57785 +       if (v.v_vcol && v.v_ccol) {
57786 +               int cols = v.v_vcol / v.v_ccol;
57787 +               if (v.v_cols != cols) {
57788 +                       if (v.v_cols)
57789 +                               return -EINVAL;
57790 +                       v.v_cols = cols;
57791 +               }
57792 +       }
57794 +       if (v.v_clin > 32)
57795 +               return -EINVAL;
57797 -       console_lock();
57798         for (i = 0; i < MAX_NR_CONSOLES; i++) {
57799 -               vc = vc_cons[i].d;
57800 +               struct vc_data *vcp;
57802 -               if (vc) {
57803 -                       vc->vc_resize_user = 1;
57804 -                       vc_resize(vc, v.v_cols, v.v_rows);
57805 +               if (!vc_cons[i].d)
57806 +                       continue;
57807 +               console_lock();
57808 +               vcp = vc_cons[i].d;
57809 +               if (vcp) {
57810 +                       int ret;
57811 +                       int save_scan_lines = vcp->vc_scan_lines;
57812 +                       int save_cell_height = vcp->vc_cell_height;
57814 +                       if (v.v_vlin)
57815 +                               vcp->vc_scan_lines = v.v_vlin;
57816 +                       if (v.v_clin)
57817 +                               vcp->vc_cell_height = v.v_clin;
57818 +                       vcp->vc_resize_user = 1;
57819 +                       ret = vc_resize(vcp, v.v_cols, v.v_rows);
57820 +                       if (ret) {
57821 +                               vcp->vc_scan_lines = save_scan_lines;
57822 +                               vcp->vc_cell_height = save_cell_height;
57823 +                               console_unlock();
57824 +                               return ret;
57825 +                       }
57826                 }
57827 +               console_unlock();
57828         }
57829 -       console_unlock();
57831         return 0;
57833 diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
57834 index 0330ba99730e..652fe2547587 100644
57835 --- a/drivers/uio/uio_hv_generic.c
57836 +++ b/drivers/uio/uio_hv_generic.c
57837 @@ -291,13 +291,15 @@ hv_uio_probe(struct hv_device *dev,
57838         pdata->recv_buf = vzalloc(RECV_BUFFER_SIZE);
57839         if (pdata->recv_buf == NULL) {
57840                 ret = -ENOMEM;
57841 -               goto fail_close;
57842 +               goto fail_free_ring;
57843         }
57845         ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
57846                                     RECV_BUFFER_SIZE, &pdata->recv_gpadl);
57847 -       if (ret)
57848 +       if (ret) {
57849 +               vfree(pdata->recv_buf);
57850                 goto fail_close;
57851 +       }
57853         /* put Global Physical Address Label in name */
57854         snprintf(pdata->recv_name, sizeof(pdata->recv_name),
57855 @@ -316,8 +318,10 @@ hv_uio_probe(struct hv_device *dev,
57857         ret = vmbus_establish_gpadl(channel, pdata->send_buf,
57858                                     SEND_BUFFER_SIZE, &pdata->send_gpadl);
57859 -       if (ret)
57860 +       if (ret) {
57861 +               vfree(pdata->send_buf);
57862                 goto fail_close;
57863 +       }
57865         snprintf(pdata->send_name, sizeof(pdata->send_name),
57866                  "send:%u", pdata->send_gpadl);
57867 @@ -347,6 +351,8 @@ hv_uio_probe(struct hv_device *dev,
57869  fail_close:
57870         hv_uio_cleanup(dev, pdata);
57871 +fail_free_ring:
57872 +       vmbus_free_ring(dev->channel);
57874         return ret;
57876 diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c
57877 index c7d681fef198..3bb0b0075467 100644
57878 --- a/drivers/uio/uio_pci_generic.c
57879 +++ b/drivers/uio/uio_pci_generic.c
57880 @@ -82,7 +82,7 @@ static int probe(struct pci_dev *pdev,
57881         }
57883         if (pdev->irq && !pci_intx_mask_supported(pdev))
57884 -               return -ENOMEM;
57885 +               return -ENODEV;
57887         gdev = devm_kzalloc(&pdev->dev, sizeof(struct uio_pci_generic_dev), GFP_KERNEL);
57888         if (!gdev)
57889 diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
57890 index d7d4bdd57f46..56707b6b0f57 100644
57891 --- a/drivers/usb/cdns3/cdnsp-gadget.c
57892 +++ b/drivers/usb/cdns3/cdnsp-gadget.c
57893 @@ -727,7 +727,7 @@ int cdnsp_reset_device(struct cdnsp_device *pdev)
57894          * are in Disabled state.
57895          */
57896         for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i)
57897 -               pdev->eps[i].ep_state |= EP_STOPPED;
57898 +               pdev->eps[i].ep_state |= EP_STOPPED | EP_UNCONFIGURED;
57900         trace_cdnsp_handle_cmd_reset_dev(slot_ctx);
57902 @@ -942,6 +942,7 @@ static int cdnsp_gadget_ep_enable(struct usb_ep *ep,
57904         pep = to_cdnsp_ep(ep);
57905         pdev = pep->pdev;
57906 +       pep->ep_state &= ~EP_UNCONFIGURED;
57908         if (dev_WARN_ONCE(pdev->dev, pep->ep_state & EP_ENABLED,
57909                           "%s is already enabled\n", pep->name))
57910 @@ -1023,9 +1024,13 @@ static int cdnsp_gadget_ep_disable(struct usb_ep *ep)
57911                 goto finish;
57912         }
57914 -       cdnsp_cmd_stop_ep(pdev, pep);
57915         pep->ep_state |= EP_DIS_IN_RROGRESS;
57916 -       cdnsp_cmd_flush_ep(pdev, pep);
57918 +       /* Endpoint was unconfigured by Reset Device command. */
57919 +       if (!(pep->ep_state & EP_UNCONFIGURED)) {
57920 +               cdnsp_cmd_stop_ep(pdev, pep);
57921 +               cdnsp_cmd_flush_ep(pdev, pep);
57922 +       }
57924         /* Remove all queued USB requests. */
57925         while (!list_empty(&pep->pending_list)) {
57926 @@ -1043,10 +1048,12 @@ static int cdnsp_gadget_ep_disable(struct usb_ep *ep)
57928         cdnsp_endpoint_zero(pdev, pep);
57930 -       ret = cdnsp_update_eps_configuration(pdev, pep);
57931 +       if (!(pep->ep_state & EP_UNCONFIGURED))
57932 +               ret = cdnsp_update_eps_configuration(pdev, pep);
57934         cdnsp_free_endpoint_rings(pdev, pep);
57936 -       pep->ep_state &= ~EP_ENABLED;
57937 +       pep->ep_state &= ~(EP_ENABLED | EP_UNCONFIGURED);
57938         pep->ep_state |= EP_STOPPED;
57940  finish:
57941 diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h
57942 index 6bbb26548c04..783ca8ffde00 100644
57943 --- a/drivers/usb/cdns3/cdnsp-gadget.h
57944 +++ b/drivers/usb/cdns3/cdnsp-gadget.h
57945 @@ -835,6 +835,7 @@ struct cdnsp_ep {
57946  #define EP_WEDGE               BIT(4)
57947  #define EP0_HALTED_STATUS      BIT(5)
57948  #define EP_HAS_STREAMS         BIT(6)
57949 +#define EP_UNCONFIGURED                BIT(7)
57951         bool skip;
57952  };
57953 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
57954 index 3fda1ec961d7..c103961c3fae 100644
57955 --- a/drivers/usb/class/cdc-acm.c
57956 +++ b/drivers/usb/class/cdc-acm.c
57957 @@ -929,8 +929,7 @@ static int get_serial_info(struct tty_struct *tty, struct serial_struct *ss)
57959         struct acm *acm = tty->driver_data;
57961 -       ss->xmit_fifo_size = acm->writesize;
57962 -       ss->baud_base = le32_to_cpu(acm->line.dwDTERate);
57963 +       ss->line = acm->minor;
57964         ss->close_delay = jiffies_to_msecs(acm->port.close_delay) / 10;
57965         ss->closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
57966                                 ASYNC_CLOSING_WAIT_NONE :
57967 @@ -942,7 +941,6 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
57969         struct acm *acm = tty->driver_data;
57970         unsigned int closing_wait, close_delay;
57971 -       unsigned int old_closing_wait, old_close_delay;
57972         int retval = 0;
57974         close_delay = msecs_to_jiffies(ss->close_delay * 10);
57975 @@ -950,20 +948,12 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
57976                         ASYNC_CLOSING_WAIT_NONE :
57977                         msecs_to_jiffies(ss->closing_wait * 10);
57979 -       /* we must redo the rounding here, so that the values match */
57980 -       old_close_delay = jiffies_to_msecs(acm->port.close_delay) / 10;
57981 -       old_closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
57982 -                               ASYNC_CLOSING_WAIT_NONE :
57983 -                               jiffies_to_msecs(acm->port.closing_wait) / 10;
57985         mutex_lock(&acm->port.mutex);
57987         if (!capable(CAP_SYS_ADMIN)) {
57988 -               if ((ss->close_delay != old_close_delay) ||
57989 -                   (ss->closing_wait != old_closing_wait))
57990 +               if ((close_delay != acm->port.close_delay) ||
57991 +                   (closing_wait != acm->port.closing_wait))
57992                         retval = -EPERM;
57993 -               else
57994 -                       retval = -EOPNOTSUPP;
57995         } else {
57996                 acm->port.close_delay  = close_delay;
57997                 acm->port.closing_wait = closing_wait;
57998 @@ -1634,12 +1624,13 @@ static int acm_resume(struct usb_interface *intf)
57999         struct urb *urb;
58000         int rv = 0;
58002 -       acm_unpoison_urbs(acm);
58003         spin_lock_irq(&acm->write_lock);
58005         if (--acm->susp_count)
58006                 goto out;
58008 +       acm_unpoison_urbs(acm);
58010         if (tty_port_initialized(&acm->port)) {
58011                 rv = usb_submit_urb(acm->ctrlurb, GFP_ATOMIC);
58013 diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
58014 index 508b1c3f8b73..d1e4a7379beb 100644
58015 --- a/drivers/usb/class/cdc-wdm.c
58016 +++ b/drivers/usb/class/cdc-wdm.c
58017 @@ -321,12 +321,23 @@ static void wdm_int_callback(struct urb *urb)
58021 -static void kill_urbs(struct wdm_device *desc)
58022 +static void poison_urbs(struct wdm_device *desc)
58024         /* the order here is essential */
58025 -       usb_kill_urb(desc->command);
58026 -       usb_kill_urb(desc->validity);
58027 -       usb_kill_urb(desc->response);
58028 +       usb_poison_urb(desc->command);
58029 +       usb_poison_urb(desc->validity);
58030 +       usb_poison_urb(desc->response);
58033 +static void unpoison_urbs(struct wdm_device *desc)
58035 +       /*
58036 +        *  the order here is not essential
58037 +        *  it is symmetrical just to be nice
58038 +        */
58039 +       usb_unpoison_urb(desc->response);
58040 +       usb_unpoison_urb(desc->validity);
58041 +       usb_unpoison_urb(desc->command);
58044  static void free_urbs(struct wdm_device *desc)
58045 @@ -741,11 +752,12 @@ static int wdm_release(struct inode *inode, struct file *file)
58046         if (!desc->count) {
58047                 if (!test_bit(WDM_DISCONNECTING, &desc->flags)) {
58048                         dev_dbg(&desc->intf->dev, "wdm_release: cleanup\n");
58049 -                       kill_urbs(desc);
58050 +                       poison_urbs(desc);
58051                         spin_lock_irq(&desc->iuspin);
58052                         desc->resp_count = 0;
58053                         spin_unlock_irq(&desc->iuspin);
58054                         desc->manage_power(desc->intf, 0);
58055 +                       unpoison_urbs(desc);
58056                 } else {
58057                         /* must avoid dev_printk here as desc->intf is invalid */
58058                         pr_debug(KBUILD_MODNAME " %s: device gone - cleaning up\n", __func__);
58059 @@ -1037,9 +1049,9 @@ static void wdm_disconnect(struct usb_interface *intf)
58060         wake_up_all(&desc->wait);
58061         mutex_lock(&desc->rlock);
58062         mutex_lock(&desc->wlock);
58063 +       poison_urbs(desc);
58064         cancel_work_sync(&desc->rxwork);
58065         cancel_work_sync(&desc->service_outs_intr);
58066 -       kill_urbs(desc);
58067         mutex_unlock(&desc->wlock);
58068         mutex_unlock(&desc->rlock);
58070 @@ -1080,9 +1092,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
58071                 set_bit(WDM_SUSPENDING, &desc->flags);
58072                 spin_unlock_irq(&desc->iuspin);
58073                 /* callback submits work - order is essential */
58074 -               kill_urbs(desc);
58075 +               poison_urbs(desc);
58076                 cancel_work_sync(&desc->rxwork);
58077                 cancel_work_sync(&desc->service_outs_intr);
58078 +               unpoison_urbs(desc);
58079         }
58080         if (!PMSG_IS_AUTO(message)) {
58081                 mutex_unlock(&desc->wlock);
58082 @@ -1140,7 +1153,7 @@ static int wdm_pre_reset(struct usb_interface *intf)
58083         wake_up_all(&desc->wait);
58084         mutex_lock(&desc->rlock);
58085         mutex_lock(&desc->wlock);
58086 -       kill_urbs(desc);
58087 +       poison_urbs(desc);
58088         cancel_work_sync(&desc->rxwork);
58089         cancel_work_sync(&desc->service_outs_intr);
58090         return 0;
58091 @@ -1151,6 +1164,7 @@ static int wdm_post_reset(struct usb_interface *intf)
58092         struct wdm_device *desc = wdm_find_device(intf);
58093         int rv;
58095 +       unpoison_urbs(desc);
58096         clear_bit(WDM_OVERFLOW, &desc->flags);
58097         clear_bit(WDM_RESETTING, &desc->flags);
58098         rv = recover_from_urb_loss(desc);
58099 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
58100 index 7f71218cc1e5..13fe37fbbd2c 100644
58101 --- a/drivers/usb/core/hub.c
58102 +++ b/drivers/usb/core/hub.c
58103 @@ -3556,7 +3556,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
58104         u16             portchange, portstatus;
58106         if (!test_and_set_bit(port1, hub->child_usage_bits)) {
58107 -               status = pm_runtime_get_sync(&port_dev->dev);
58108 +               status = pm_runtime_resume_and_get(&port_dev->dev);
58109                 if (status < 0) {
58110                         dev_dbg(&udev->dev, "can't resume usb port, status %d\n",
58111                                         status);
58112 @@ -3593,9 +3593,6 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
58113                  * sequence.
58114                  */
58115                 status = hub_port_status(hub, port1, &portstatus, &portchange);
58117 -               /* TRSMRCY = 10 msec */
58118 -               msleep(10);
58119         }
58121   SuspendCleared:
58122 @@ -3610,6 +3607,9 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
58123                                 usb_clear_port_feature(hub->hdev, port1,
58124                                                 USB_PORT_FEAT_C_SUSPEND);
58125                 }
58127 +               /* TRSMRCY = 10 msec */
58128 +               msleep(10);
58129         }
58131         if (udev->persist_enabled)
58132 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
58133 index 76ac5d6555ae..21e7522655ac 100644
58134 --- a/drivers/usb/core/quirks.c
58135 +++ b/drivers/usb/core/quirks.c
58136 @@ -406,6 +406,7 @@ static const struct usb_device_id usb_quirk_list[] = {
58138         /* Realtek hub in Dell WD19 (Type-C) */
58139         { USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM },
58140 +       { USB_DEVICE(0x0bda, 0x5487), .driver_info = USB_QUIRK_RESET_RESUME },
58142         /* Generic RTL8153 based ethernet adapters */
58143         { USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM },
58144 @@ -438,6 +439,9 @@ static const struct usb_device_id usb_quirk_list[] = {
58145         { USB_DEVICE(0x17ef, 0xa012), .driver_info =
58146                         USB_QUIRK_DISCONNECT_SUSPEND },
58148 +       /* Lenovo ThinkPad USB-C Dock Gen2 Ethernet (RTL8153 GigE) */
58149 +       { USB_DEVICE(0x17ef, 0xa387), .driver_info = USB_QUIRK_NO_LPM },
58151         /* BUILDWIN Photo Frame */
58152         { USB_DEVICE(0x1908, 0x1315), .driver_info =
58153                         USB_QUIRK_HONOR_BNUMINTERFACES },
58154 diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
58155 index 7161344c6522..641e4251cb7f 100644
58156 --- a/drivers/usb/dwc2/core.h
58157 +++ b/drivers/usb/dwc2/core.h
58158 @@ -112,6 +112,7 @@ struct dwc2_hsotg_req;
58159   * @debugfs: File entry for debugfs file for this endpoint.
58160   * @dir_in: Set to true if this endpoint is of the IN direction, which
58161   *          means that it is sending data to the Host.
58162 + * @map_dir: Set to the value of dir_in when the DMA buffer is mapped.
58163   * @index: The index for the endpoint registers.
58164   * @mc: Multi Count - number of transactions per microframe
58165   * @interval: Interval for periodic endpoints, in frames or microframes.
58166 @@ -161,6 +162,7 @@ struct dwc2_hsotg_ep {
58167         unsigned short          fifo_index;
58169         unsigned char           dir_in;
58170 +       unsigned char           map_dir;
58171         unsigned char           index;
58172         unsigned char           mc;
58173         u16                     interval;
58174 diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
58175 index 55f1d14fc414..510fd0572feb 100644
58176 --- a/drivers/usb/dwc2/core_intr.c
58177 +++ b/drivers/usb/dwc2/core_intr.c
58178 @@ -307,6 +307,7 @@ static void dwc2_handle_conn_id_status_change_intr(struct dwc2_hsotg *hsotg)
58179  static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
58181         int ret;
58182 +       u32 hprt0;
58184         /* Clear interrupt */
58185         dwc2_writel(hsotg, GINTSTS_SESSREQINT, GINTSTS);
58186 @@ -327,6 +328,13 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
58187                  * established
58188                  */
58189                 dwc2_hsotg_disconnect(hsotg);
58190 +       } else {
58191 +               /* Turn on the port power bit. */
58192 +               hprt0 = dwc2_read_hprt0(hsotg);
58193 +               hprt0 |= HPRT0_PWR;
58194 +               dwc2_writel(hsotg, hprt0, HPRT0);
58195 +               /* Connect hcd after port power is set. */
58196 +               dwc2_hcd_connect(hsotg);
58197         }
58200 @@ -652,6 +660,71 @@ static u32 dwc2_read_common_intr(struct dwc2_hsotg *hsotg)
58201                 return 0;
58204 +/**
58205 + * dwc_handle_gpwrdn_disc_det() - Handles the gpwrdn disconnect detect.
58206 + * Exits hibernation without restoring registers.
58207 + *
58208 + * @hsotg: Programming view of DWC_otg controller
58209 + * @gpwrdn: GPWRDN register
58210 + */
58211 +static inline void dwc_handle_gpwrdn_disc_det(struct dwc2_hsotg *hsotg,
58212 +                                             u32 gpwrdn)
58214 +       u32 gpwrdn_tmp;
58216 +       /* Switch-on voltage to the core */
58217 +       gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
58218 +       gpwrdn_tmp &= ~GPWRDN_PWRDNSWTCH;
58219 +       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
58220 +       udelay(5);
58222 +       /* Reset core */
58223 +       gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
58224 +       gpwrdn_tmp &= ~GPWRDN_PWRDNRSTN;
58225 +       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
58226 +       udelay(5);
58228 +       /* Disable Power Down Clamp */
58229 +       gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
58230 +       gpwrdn_tmp &= ~GPWRDN_PWRDNCLMP;
58231 +       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
58232 +       udelay(5);
58234 +       /* Deassert reset core */
58235 +       gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
58236 +       gpwrdn_tmp |= GPWRDN_PWRDNRSTN;
58237 +       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
58238 +       udelay(5);
58240 +       /* Disable PMU interrupt */
58241 +       gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
58242 +       gpwrdn_tmp &= ~GPWRDN_PMUINTSEL;
58243 +       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
58245 +       /* De-assert Wakeup Logic */
58246 +       gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
58247 +       gpwrdn_tmp &= ~GPWRDN_PMUACTV;
58248 +       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
58250 +       hsotg->hibernated = 0;
58251 +       hsotg->bus_suspended = 0;
58253 +       if (gpwrdn & GPWRDN_IDSTS) {
58254 +               hsotg->op_state = OTG_STATE_B_PERIPHERAL;
58255 +               dwc2_core_init(hsotg, false);
58256 +               dwc2_enable_global_interrupts(hsotg);
58257 +               dwc2_hsotg_core_init_disconnected(hsotg, false);
58258 +               dwc2_hsotg_core_connect(hsotg);
58259 +       } else {
58260 +               hsotg->op_state = OTG_STATE_A_HOST;
58262 +               /* Initialize the Core for Host mode */
58263 +               dwc2_core_init(hsotg, false);
58264 +               dwc2_enable_global_interrupts(hsotg);
58265 +               dwc2_hcd_start(hsotg);
58266 +       }
58269  /*
58270   * GPWRDN interrupt handler.
58271   *
58272 @@ -673,64 +746,14 @@ static void dwc2_handle_gpwrdn_intr(struct dwc2_hsotg *hsotg)
58274         if ((gpwrdn & GPWRDN_DISCONN_DET) &&
58275             (gpwrdn & GPWRDN_DISCONN_DET_MSK) && !linestate) {
58276 -               u32 gpwrdn_tmp;
58278                 dev_dbg(hsotg->dev, "%s: GPWRDN_DISCONN_DET\n", __func__);
58280 -               /* Switch-on voltage to the core */
58281 -               gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
58282 -               gpwrdn_tmp &= ~GPWRDN_PWRDNSWTCH;
58283 -               dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
58284 -               udelay(10);
58286 -               /* Reset core */
58287 -               gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
58288 -               gpwrdn_tmp &= ~GPWRDN_PWRDNRSTN;
58289 -               dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
58290 -               udelay(10);
58292 -               /* Disable Power Down Clamp */
58293 -               gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
58294 -               gpwrdn_tmp &= ~GPWRDN_PWRDNCLMP;
58295 -               dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
58296 -               udelay(10);
58298 -               /* Deassert reset core */
58299 -               gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
58300 -               gpwrdn_tmp |= GPWRDN_PWRDNRSTN;
58301 -               dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
58302 -               udelay(10);
58304 -               /* Disable PMU interrupt */
58305 -               gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
58306 -               gpwrdn_tmp &= ~GPWRDN_PMUINTSEL;
58307 -               dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
58309 -               /* De-assert Wakeup Logic */
58310 -               gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
58311 -               gpwrdn_tmp &= ~GPWRDN_PMUACTV;
58312 -               dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
58314 -               hsotg->hibernated = 0;
58316 -               if (gpwrdn & GPWRDN_IDSTS) {
58317 -                       hsotg->op_state = OTG_STATE_B_PERIPHERAL;
58318 -                       dwc2_core_init(hsotg, false);
58319 -                       dwc2_enable_global_interrupts(hsotg);
58320 -                       dwc2_hsotg_core_init_disconnected(hsotg, false);
58321 -                       dwc2_hsotg_core_connect(hsotg);
58322 -               } else {
58323 -                       hsotg->op_state = OTG_STATE_A_HOST;
58325 -                       /* Initialize the Core for Host mode */
58326 -                       dwc2_core_init(hsotg, false);
58327 -                       dwc2_enable_global_interrupts(hsotg);
58328 -                       dwc2_hcd_start(hsotg);
58329 -               }
58330 -       }
58332 -       if ((gpwrdn & GPWRDN_LNSTSCHG) &&
58333 -           (gpwrdn & GPWRDN_LNSTSCHG_MSK) && linestate) {
58334 +               /*
58335 +                * Call disconnect detect function to exit from
58336 +                * hibernation
58337 +                */
58338 +               dwc_handle_gpwrdn_disc_det(hsotg, gpwrdn);
58339 +       } else if ((gpwrdn & GPWRDN_LNSTSCHG) &&
58340 +                  (gpwrdn & GPWRDN_LNSTSCHG_MSK) && linestate) {
58341                 dev_dbg(hsotg->dev, "%s: GPWRDN_LNSTSCHG\n", __func__);
58342                 if (hsotg->hw_params.hibernation &&
58343                     hsotg->hibernated) {
58344 @@ -741,24 +764,21 @@ static void dwc2_handle_gpwrdn_intr(struct dwc2_hsotg *hsotg)
58345                                 dwc2_exit_hibernation(hsotg, 1, 0, 1);
58346                         }
58347                 }
58348 -       }
58349 -       if ((gpwrdn & GPWRDN_RST_DET) && (gpwrdn & GPWRDN_RST_DET_MSK)) {
58350 +       } else if ((gpwrdn & GPWRDN_RST_DET) &&
58351 +                  (gpwrdn & GPWRDN_RST_DET_MSK)) {
58352                 dev_dbg(hsotg->dev, "%s: GPWRDN_RST_DET\n", __func__);
58353                 if (!linestate && (gpwrdn & GPWRDN_BSESSVLD))
58354                         dwc2_exit_hibernation(hsotg, 0, 1, 0);
58355 -       }
58356 -       if ((gpwrdn & GPWRDN_STS_CHGINT) &&
58357 -           (gpwrdn & GPWRDN_STS_CHGINT_MSK) && linestate) {
58358 +       } else if ((gpwrdn & GPWRDN_STS_CHGINT) &&
58359 +                  (gpwrdn & GPWRDN_STS_CHGINT_MSK)) {
58360                 dev_dbg(hsotg->dev, "%s: GPWRDN_STS_CHGINT\n", __func__);
58361 -               if (hsotg->hw_params.hibernation &&
58362 -                   hsotg->hibernated) {
58363 -                       if (gpwrdn & GPWRDN_IDSTS) {
58364 -                               dwc2_exit_hibernation(hsotg, 0, 0, 0);
58365 -                               call_gadget(hsotg, resume);
58366 -                       } else {
58367 -                               dwc2_exit_hibernation(hsotg, 1, 0, 1);
58368 -                       }
58369 -               }
58370 +               /*
58371 +                * As GPWRDN_STS_CHGINT exit from hibernation flow is
58372 +                * the same as in GPWRDN_DISCONN_DET flow. Call
58373 +                * disconnect detect helper function to exit from
58374 +                * hibernation.
58375 +                */
58376 +               dwc_handle_gpwrdn_disc_det(hsotg, gpwrdn);
58377         }
58380 diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
58381 index ad4c94366dad..d2f623d83bf7 100644
58382 --- a/drivers/usb/dwc2/gadget.c
58383 +++ b/drivers/usb/dwc2/gadget.c
58384 @@ -422,7 +422,7 @@ static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
58386         struct usb_request *req = &hs_req->req;
58388 -       usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
58389 +       usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->map_dir);
58392  /*
58393 @@ -1242,6 +1242,7 @@ static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
58395         int ret;
58397 +       hs_ep->map_dir = hs_ep->dir_in;
58398         ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
58399         if (ret)
58400                 goto dma_error;
58401 diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
58402 index 1a9789ec5847..6af1dcbc3656 100644
58403 --- a/drivers/usb/dwc2/hcd.c
58404 +++ b/drivers/usb/dwc2/hcd.c
58405 @@ -5580,7 +5580,15 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
58406                 return ret;
58407         }
58409 -       dwc2_hcd_rem_wakeup(hsotg);
58410 +       if (rem_wakeup) {
58411 +               dwc2_hcd_rem_wakeup(hsotg);
58412 +               /*
58413 +                * Change "port_connect_status_change" flag to re-enumerate,
58414 +                * because after exit from hibernation port connection status
58415 +                * is not detected.
58416 +                */
58417 +               hsotg->flags.b.port_connect_status_change = 1;
58418 +       }
58420         hsotg->hibernated = 0;
58421         hsotg->bus_suspended = 0;
58422 diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
58423 index f2448d0a9d39..126f0e10b3ef 100644
58424 --- a/drivers/usb/dwc3/core.c
58425 +++ b/drivers/usb/dwc3/core.c
58426 @@ -114,6 +114,8 @@ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
58427         dwc->current_dr_role = mode;
58430 +static int dwc3_core_soft_reset(struct dwc3 *dwc);
58432  static void __dwc3_set_mode(struct work_struct *work)
58434         struct dwc3 *dwc = work_to_dwc(work);
58435 @@ -121,6 +123,8 @@ static void __dwc3_set_mode(struct work_struct *work)
58436         int ret;
58437         u32 reg;
58439 +       mutex_lock(&dwc->mutex);
58441         pm_runtime_get_sync(dwc->dev);
58443         if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG)
58444 @@ -154,6 +158,25 @@ static void __dwc3_set_mode(struct work_struct *work)
58445                 break;
58446         }
58448 +       /* For DRD host or device mode only */
58449 +       if (dwc->desired_dr_role != DWC3_GCTL_PRTCAP_OTG) {
58450 +               reg = dwc3_readl(dwc->regs, DWC3_GCTL);
58451 +               reg |= DWC3_GCTL_CORESOFTRESET;
58452 +               dwc3_writel(dwc->regs, DWC3_GCTL, reg);
58454 +               /*
58455 +                * Wait for internal clocks to synchronized. DWC_usb31 and
58456 +                * DWC_usb32 may need at least 50ms (less for DWC_usb3). To
58457 +                * keep it consistent across different IPs, let's wait up to
58458 +                * 100ms before clearing GCTL.CORESOFTRESET.
58459 +                */
58460 +               msleep(100);
58462 +               reg = dwc3_readl(dwc->regs, DWC3_GCTL);
58463 +               reg &= ~DWC3_GCTL_CORESOFTRESET;
58464 +               dwc3_writel(dwc->regs, DWC3_GCTL, reg);
58465 +       }
58467         spin_lock_irqsave(&dwc->lock, flags);
58469         dwc3_set_prtcap(dwc, dwc->desired_dr_role);
58470 @@ -178,6 +201,8 @@ static void __dwc3_set_mode(struct work_struct *work)
58471                 }
58472                 break;
58473         case DWC3_GCTL_PRTCAP_DEVICE:
58474 +               dwc3_core_soft_reset(dwc);
58476                 dwc3_event_buffers_setup(dwc);
58478                 if (dwc->usb2_phy)
58479 @@ -200,6 +225,7 @@ static void __dwc3_set_mode(struct work_struct *work)
58480  out:
58481         pm_runtime_mark_last_busy(dwc->dev);
58482         pm_runtime_put_autosuspend(dwc->dev);
58483 +       mutex_unlock(&dwc->mutex);
58486  void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
58487 @@ -1277,6 +1303,8 @@ static void dwc3_get_properties(struct dwc3 *dwc)
58488                                 "snps,usb3_lpm_capable");
58489         dwc->usb2_lpm_disable = device_property_read_bool(dev,
58490                                 "snps,usb2-lpm-disable");
58491 +       dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev,
58492 +                               "snps,usb2-gadget-lpm-disable");
58493         device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd",
58494                                 &rx_thr_num_pkt_prd);
58495         device_property_read_u8(dev, "snps,rx-max-burst-prd",
58496 @@ -1543,6 +1571,7 @@ static int dwc3_probe(struct platform_device *pdev)
58497         dwc3_cache_hwparams(dwc);
58499         spin_lock_init(&dwc->lock);
58500 +       mutex_init(&dwc->mutex);
58502         pm_runtime_set_active(dev);
58503         pm_runtime_use_autosuspend(dev);
58504 diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
58505 index 052b20d52651..453cfebd4d04 100644
58506 --- a/drivers/usb/dwc3/core.h
58507 +++ b/drivers/usb/dwc3/core.h
58508 @@ -13,6 +13,7 @@
58510  #include <linux/device.h>
58511  #include <linux/spinlock.h>
58512 +#include <linux/mutex.h>
58513  #include <linux/ioport.h>
58514  #include <linux/list.h>
58515  #include <linux/bitops.h>
58516 @@ -946,6 +947,7 @@ struct dwc3_scratchpad_array {
58517   * @scratch_addr: dma address of scratchbuf
58518   * @ep0_in_setup: one control transfer is completed and enter setup phase
58519   * @lock: for synchronizing
58520 + * @mutex: for mode switching
58521   * @dev: pointer to our struct device
58522   * @sysdev: pointer to the DMA-capable device
58523   * @xhci: pointer to our xHCI child
58524 @@ -1034,7 +1036,8 @@ struct dwc3_scratchpad_array {
58525   * @dis_start_transfer_quirk: set if start_transfer failure SW workaround is
58526   *                     not needed for DWC_usb31 version 1.70a-ea06 and below
58527   * @usb3_lpm_capable: set if hadrware supports Link Power Management
58528 - * @usb2_lpm_disable: set to disable usb2 lpm
58529 + * @usb2_lpm_disable: set to disable usb2 lpm for host
58530 + * @usb2_gadget_lpm_disable: set to disable usb2 lpm for gadget
58531   * @disable_scramble_quirk: set if we enable the disable scramble quirk
58532   * @u2exit_lfps_quirk: set if we enable u2exit lfps quirk
58533   * @u2ss_inp3_quirk: set if we enable P3 OK for U2/SS Inactive quirk
58534 @@ -1085,6 +1088,9 @@ struct dwc3 {
58535         /* device lock */
58536         spinlock_t              lock;
58538 +       /* mode switching lock */
58539 +       struct mutex            mutex;
58541         struct device           *dev;
58542         struct device           *sysdev;
58544 @@ -1238,6 +1244,7 @@ struct dwc3 {
58545         unsigned                dis_start_transfer_quirk:1;
58546         unsigned                usb3_lpm_capable:1;
58547         unsigned                usb2_lpm_disable:1;
58548 +       unsigned                usb2_gadget_lpm_disable:1;
58550         unsigned                disable_scramble_quirk:1;
58551         unsigned                u2exit_lfps_quirk:1;
58552 diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c
58553 index 75f0042b998b..84c1a4ac2444 100644
58554 --- a/drivers/usb/dwc3/dwc3-imx8mp.c
58555 +++ b/drivers/usb/dwc3/dwc3-imx8mp.c
58556 @@ -167,6 +167,7 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
58558         dwc3_np = of_get_child_by_name(node, "dwc3");
58559         if (!dwc3_np) {
58560 +               err = -ENODEV;
58561                 dev_err(dev, "failed to find dwc3 core child\n");
58562                 goto disable_rpm;
58563         }
58564 diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
58565 index 3db17806e92e..e196673f5c64 100644
58566 --- a/drivers/usb/dwc3/dwc3-omap.c
58567 +++ b/drivers/usb/dwc3/dwc3-omap.c
58568 @@ -437,8 +437,13 @@ static int dwc3_omap_extcon_register(struct dwc3_omap *omap)
58570                 if (extcon_get_state(edev, EXTCON_USB) == true)
58571                         dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
58572 +               else
58573 +                       dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF);
58575                 if (extcon_get_state(edev, EXTCON_USB_HOST) == true)
58576                         dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
58577 +               else
58578 +                       dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT);
58580                 omap->edev = edev;
58581         }
58582 diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
58583 index 4c5c6972124a..19789e94bbd0 100644
58584 --- a/drivers/usb/dwc3/dwc3-pci.c
58585 +++ b/drivers/usb/dwc3/dwc3-pci.c
58586 @@ -41,6 +41,7 @@
58587  #define PCI_DEVICE_ID_INTEL_TGPH               0x43ee
58588  #define PCI_DEVICE_ID_INTEL_JSP                        0x4dee
58589  #define PCI_DEVICE_ID_INTEL_ADLP               0x51ee
58590 +#define PCI_DEVICE_ID_INTEL_ADLM               0x54ee
58591  #define PCI_DEVICE_ID_INTEL_ADLS               0x7ae1
58592  #define PCI_DEVICE_ID_INTEL_TGL                        0x9a15
58594 @@ -122,6 +123,7 @@ static const struct property_entry dwc3_pci_mrfld_properties[] = {
58595         PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
58596         PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
58597         PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
58598 +       PROPERTY_ENTRY_BOOL("snps,usb2-gadget-lpm-disable"),
58599         PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
58600         {}
58601  };
58602 @@ -388,6 +390,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
58603         { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLP),
58604           (kernel_ulong_t) &dwc3_pci_intel_swnode, },
58606 +       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLM),
58607 +         (kernel_ulong_t) &dwc3_pci_intel_swnode, },
58609         { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS),
58610           (kernel_ulong_t) &dwc3_pci_intel_swnode, },
58612 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
58613 index c7ef218e7a8c..8585b56d9f2d 100644
58614 --- a/drivers/usb/dwc3/gadget.c
58615 +++ b/drivers/usb/dwc3/gadget.c
58616 @@ -308,13 +308,12 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
58617         }
58619         if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
58620 -               int             needs_wakeup;
58621 +               int link_state;
58623 -               needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
58624 -                               dwc->link_state == DWC3_LINK_STATE_U2 ||
58625 -                               dwc->link_state == DWC3_LINK_STATE_U3);
58627 -               if (unlikely(needs_wakeup)) {
58628 +               link_state = dwc3_gadget_get_link_state(dwc);
58629 +               if (link_state == DWC3_LINK_STATE_U1 ||
58630 +                   link_state == DWC3_LINK_STATE_U2 ||
58631 +                   link_state == DWC3_LINK_STATE_U3) {
58632                         ret = __dwc3_gadget_wakeup(dwc);
58633                         dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
58634                                         ret);
58635 @@ -608,12 +607,14 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
58636                 u8 bInterval_m1;
58638                 /*
58639 -                * Valid range for DEPCFG.bInterval_m1 is from 0 to 13, and it
58640 -                * must be set to 0 when the controller operates in full-speed.
58641 +                * Valid range for DEPCFG.bInterval_m1 is from 0 to 13.
58642 +                *
58643 +                * NOTE: The programming guide incorrectly stated bInterval_m1
58644 +                * must be set to 0 when operating in fullspeed. Internally the
58645 +                * controller does not have this limitation. See DWC_usb3x
58646 +                * programming guide section 3.2.2.1.
58647                  */
58648                 bInterval_m1 = min_t(u8, desc->bInterval - 1, 13);
58649 -               if (dwc->gadget->speed == USB_SPEED_FULL)
58650 -                       bInterval_m1 = 0;
58652                 if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
58653                     dwc->gadget->speed == USB_SPEED_FULL)
58654 @@ -1675,7 +1676,9 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
58655                 }
58656         }
58658 -       return __dwc3_gadget_kick_transfer(dep);
58659 +       __dwc3_gadget_kick_transfer(dep);
58661 +       return 0;
58664  static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
58665 @@ -1973,6 +1976,8 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
58666         case DWC3_LINK_STATE_RESET:
58667         case DWC3_LINK_STATE_RX_DET:    /* in HS, means Early Suspend */
58668         case DWC3_LINK_STATE_U3:        /* in HS, means SUSPEND */
58669 +       case DWC3_LINK_STATE_U2:        /* in HS, means Sleep (L1) */
58670 +       case DWC3_LINK_STATE_U1:
58671         case DWC3_LINK_STATE_RESUME:
58672                 break;
58673         default:
58674 @@ -2299,6 +2304,10 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
58675         if (DWC3_VER_IS_PRIOR(DWC3, 250A))
58676                 reg |= DWC3_DEVTEN_ULSTCNGEN;
58678 +       /* On 2.30a and above this bit enables U3/L2-L1 Suspend Events */
58679 +       if (!DWC3_VER_IS_PRIOR(DWC3, 230A))
58680 +               reg |= DWC3_DEVTEN_EOPFEN;
58682         dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
58685 @@ -3322,6 +3331,15 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
58687         u32                     reg;
58689 +       /*
58690 +        * Ideally, dwc3_reset_gadget() would trigger the function
58691 +        * drivers to stop any active transfers through ep disable.
58692 +        * However, for functions which defer ep disable, such as mass
58693 +        * storage, we will need to rely on the call to stop active
58694 +        * transfers here, and avoid allowing of request queuing.
58695 +        */
58696 +       dwc->connected = false;
58698         /*
58699          * WORKAROUND: DWC3 revisions <1.88a have an issue which
58700          * would cause a missing Disconnect Event if there's a
58701 @@ -3460,6 +3478,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
58702         /* Enable USB2 LPM Capability */
58704         if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A) &&
58705 +           !dwc->usb2_gadget_lpm_disable &&
58706             (speed != DWC3_DSTS_SUPERSPEED) &&
58707             (speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
58708                 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
58709 @@ -3486,6 +3505,12 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
58711                 dwc3_gadget_dctl_write_safe(dwc, reg);
58712         } else {
58713 +               if (dwc->usb2_gadget_lpm_disable) {
58714 +                       reg = dwc3_readl(dwc->regs, DWC3_DCFG);
58715 +                       reg &= ~DWC3_DCFG_LPM_CAP;
58716 +                       dwc3_writel(dwc->regs, DWC3_DCFG, reg);
58717 +               }
58719                 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
58720                 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
58721                 dwc3_gadget_dctl_write_safe(dwc, reg);
58722 @@ -3934,7 +3959,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
58723         dwc->gadget->ssp_rate           = USB_SSP_GEN_UNKNOWN;
58724         dwc->gadget->sg_supported       = true;
58725         dwc->gadget->name               = "dwc3-gadget";
58726 -       dwc->gadget->lpm_capable        = true;
58727 +       dwc->gadget->lpm_capable        = !dwc->usb2_gadget_lpm_disable;
58729         /*
58730          * FIXME We might be setting max_speed to <SUPER, however versions
58731 @@ -4005,8 +4030,9 @@ int dwc3_gadget_init(struct dwc3 *dwc)
58733  void dwc3_gadget_exit(struct dwc3 *dwc)
58735 -       usb_del_gadget_udc(dwc->gadget);
58736 +       usb_del_gadget(dwc->gadget);
58737         dwc3_gadget_free_endpoints(dwc);
58738 +       usb_put_gadget(dwc->gadget);
58739         dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
58740                           dwc->bounce_addr);
58741         kfree(dwc->setup_buf);
58742 diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
58743 index 2d115353424c..8bb25773b61e 100644
58744 --- a/drivers/usb/gadget/config.c
58745 +++ b/drivers/usb/gadget/config.c
58746 @@ -194,9 +194,13 @@ EXPORT_SYMBOL_GPL(usb_assign_descriptors);
58747  void usb_free_all_descriptors(struct usb_function *f)
58749         usb_free_descriptors(f->fs_descriptors);
58750 +       f->fs_descriptors = NULL;
58751         usb_free_descriptors(f->hs_descriptors);
58752 +       f->hs_descriptors = NULL;
58753         usb_free_descriptors(f->ss_descriptors);
58754 +       f->ss_descriptors = NULL;
58755         usb_free_descriptors(f->ssp_descriptors);
58756 +       f->ssp_descriptors = NULL;
58758  EXPORT_SYMBOL_GPL(usb_free_all_descriptors);
58760 diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
58761 index 801a8b668a35..10a5d9f0f2b9 100644
58762 --- a/drivers/usb/gadget/function/f_fs.c
58763 +++ b/drivers/usb/gadget/function/f_fs.c
58764 @@ -2640,6 +2640,7 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
58766         do { /* lang_count > 0 so we can use do-while */
58767                 unsigned needed = needed_count;
58768 +               u32 str_per_lang = str_count;
58770                 if (len < 3)
58771                         goto error_free;
58772 @@ -2675,7 +2676,7 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
58774                         data += length + 1;
58775                         len -= length + 1;
58776 -               } while (--str_count);
58777 +               } while (--str_per_lang);
58779                 s->id = 0;   /* terminator */
58780                 s->s = NULL;
58781 diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
58782 index 560382e0a8f3..e65f474ad7b3 100644
58783 --- a/drivers/usb/gadget/function/f_uac1.c
58784 +++ b/drivers/usb/gadget/function/f_uac1.c
58785 @@ -19,6 +19,9 @@
58786  #include "u_audio.h"
58787  #include "u_uac1.h"
58789 +/* UAC1 spec: 3.7.2.3 Audio Channel Cluster Format */
58790 +#define UAC1_CHANNEL_MASK 0x0FFF
58792  struct f_uac1 {
58793         struct g_audio g_audio;
58794         u8 ac_intf, as_in_intf, as_out_intf;
58795 @@ -30,6 +33,11 @@ static inline struct f_uac1 *func_to_uac1(struct usb_function *f)
58796         return container_of(f, struct f_uac1, g_audio.func);
58799 +static inline struct f_uac1_opts *g_audio_to_uac1_opts(struct g_audio *audio)
58801 +       return container_of(audio->func.fi, struct f_uac1_opts, func_inst);
58804  /*
58805   * DESCRIPTORS ... most are static, but strings and full
58806   * configuration descriptors are built on demand.
58807 @@ -505,11 +513,42 @@ static void f_audio_disable(struct usb_function *f)
58809  /*-------------------------------------------------------------------------*/
58811 +static int f_audio_validate_opts(struct g_audio *audio, struct device *dev)
58813 +       struct f_uac1_opts *opts = g_audio_to_uac1_opts(audio);
58815 +       if (!opts->p_chmask && !opts->c_chmask) {
58816 +               dev_err(dev, "Error: no playback and capture channels\n");
58817 +               return -EINVAL;
58818 +       } else if (opts->p_chmask & ~UAC1_CHANNEL_MASK) {
58819 +               dev_err(dev, "Error: unsupported playback channels mask\n");
58820 +               return -EINVAL;
58821 +       } else if (opts->c_chmask & ~UAC1_CHANNEL_MASK) {
58822 +               dev_err(dev, "Error: unsupported capture channels mask\n");
58823 +               return -EINVAL;
58824 +       } else if ((opts->p_ssize < 1) || (opts->p_ssize > 4)) {
58825 +               dev_err(dev, "Error: incorrect playback sample size\n");
58826 +               return -EINVAL;
58827 +       } else if ((opts->c_ssize < 1) || (opts->c_ssize > 4)) {
58828 +               dev_err(dev, "Error: incorrect capture sample size\n");
58829 +               return -EINVAL;
58830 +       } else if (!opts->p_srate) {
58831 +               dev_err(dev, "Error: incorrect playback sampling rate\n");
58832 +               return -EINVAL;
58833 +       } else if (!opts->c_srate) {
58834 +               dev_err(dev, "Error: incorrect capture sampling rate\n");
58835 +               return -EINVAL;
58836 +       }
58838 +       return 0;
58841  /* audio function driver setup/binding */
58842  static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
58844         struct usb_composite_dev        *cdev = c->cdev;
58845         struct usb_gadget               *gadget = cdev->gadget;
58846 +       struct device                   *dev = &gadget->dev;
58847         struct f_uac1                   *uac1 = func_to_uac1(f);
58848         struct g_audio                  *audio = func_to_g_audio(f);
58849         struct f_uac1_opts              *audio_opts;
58850 @@ -519,6 +558,10 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
58851         int                             rate;
58852         int                             status;
58854 +       status = f_audio_validate_opts(audio, dev);
58855 +       if (status)
58856 +               return status;
58858         audio_opts = container_of(f->fi, struct f_uac1_opts, func_inst);
58860         us = usb_gstrings_attach(cdev, uac1_strings, ARRAY_SIZE(strings_uac1));
58861 diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
58862 index 6f03e944e0e3..dd960cea642f 100644
58863 --- a/drivers/usb/gadget/function/f_uac2.c
58864 +++ b/drivers/usb/gadget/function/f_uac2.c
58865 @@ -14,6 +14,9 @@
58866  #include "u_audio.h"
58867  #include "u_uac2.h"
58869 +/* UAC2 spec: 4.1 Audio Channel Cluster Descriptor */
58870 +#define UAC2_CHANNEL_MASK 0x07FFFFFF
58872  /*
58873   * The driver implements a simple UAC_2 topology.
58874   * USB-OUT -> IT_1 -> OT_3 -> ALSA_Capture
58875 @@ -604,6 +607,36 @@ static void setup_descriptor(struct f_uac2_opts *opts)
58876         hs_audio_desc[i] = NULL;
58879 +static int afunc_validate_opts(struct g_audio *agdev, struct device *dev)
58881 +       struct f_uac2_opts *opts = g_audio_to_uac2_opts(agdev);
58883 +       if (!opts->p_chmask && !opts->c_chmask) {
58884 +               dev_err(dev, "Error: no playback and capture channels\n");
58885 +               return -EINVAL;
58886 +       } else if (opts->p_chmask & ~UAC2_CHANNEL_MASK) {
58887 +               dev_err(dev, "Error: unsupported playback channels mask\n");
58888 +               return -EINVAL;
58889 +       } else if (opts->c_chmask & ~UAC2_CHANNEL_MASK) {
58890 +               dev_err(dev, "Error: unsupported capture channels mask\n");
58891 +               return -EINVAL;
58892 +       } else if ((opts->p_ssize < 1) || (opts->p_ssize > 4)) {
58893 +               dev_err(dev, "Error: incorrect playback sample size\n");
58894 +               return -EINVAL;
58895 +       } else if ((opts->c_ssize < 1) || (opts->c_ssize > 4)) {
58896 +               dev_err(dev, "Error: incorrect capture sample size\n");
58897 +               return -EINVAL;
58898 +       } else if (!opts->p_srate) {
58899 +               dev_err(dev, "Error: incorrect playback sampling rate\n");
58900 +               return -EINVAL;
58901 +       } else if (!opts->c_srate) {
58902 +               dev_err(dev, "Error: incorrect capture sampling rate\n");
58903 +               return -EINVAL;
58904 +       }
58906 +       return 0;
58909  static int
58910  afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
58912 @@ -612,11 +645,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
58913         struct usb_composite_dev *cdev = cfg->cdev;
58914         struct usb_gadget *gadget = cdev->gadget;
58915         struct device *dev = &gadget->dev;
58916 -       struct f_uac2_opts *uac2_opts;
58917 +       struct f_uac2_opts *uac2_opts = g_audio_to_uac2_opts(agdev);
58918         struct usb_string *us;
58919         int ret;
58921 -       uac2_opts = container_of(fn->fi, struct f_uac2_opts, func_inst);
58922 +       ret = afunc_validate_opts(agdev, dev);
58923 +       if (ret)
58924 +               return ret;
58926         us = usb_gstrings_attach(cdev, fn_strings, ARRAY_SIZE(strings_fn));
58927         if (IS_ERR(us))
58928 diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
58929 index 44b4352a2676..f48a00e49794 100644
58930 --- a/drivers/usb/gadget/function/f_uvc.c
58931 +++ b/drivers/usb/gadget/function/f_uvc.c
58932 @@ -633,7 +633,12 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
58934         uvc_hs_streaming_ep.wMaxPacketSize =
58935                 cpu_to_le16(max_packet_size | ((max_packet_mult - 1) << 11));
58936 -       uvc_hs_streaming_ep.bInterval = opts->streaming_interval;
58938 +       /* A high-bandwidth endpoint must specify a bInterval value of 1 */
58939 +       if (max_packet_mult > 1)
58940 +               uvc_hs_streaming_ep.bInterval = 1;
58941 +       else
58942 +               uvc_hs_streaming_ep.bInterval = opts->streaming_interval;
58944         uvc_ss_streaming_ep.wMaxPacketSize = cpu_to_le16(max_packet_size);
58945         uvc_ss_streaming_ep.bInterval = opts->streaming_interval;
58946 @@ -817,6 +822,7 @@ static struct usb_function_instance *uvc_alloc_inst(void)
58947         pd->bmControls[0]               = 1;
58948         pd->bmControls[1]               = 0;
58949         pd->iProcessing                 = 0;
58950 +       pd->bmVideoStandards            = 0;
58952         od = &opts->uvc_output_terminal;
58953         od->bLength                     = UVC_DT_OUTPUT_TERMINAL_SIZE;
58954 diff --git a/drivers/usb/gadget/legacy/webcam.c b/drivers/usb/gadget/legacy/webcam.c
58955 index a9f8eb8e1c76..2c9eab2b863d 100644
58956 --- a/drivers/usb/gadget/legacy/webcam.c
58957 +++ b/drivers/usb/gadget/legacy/webcam.c
58958 @@ -125,6 +125,7 @@ static const struct uvc_processing_unit_descriptor uvc_processing = {
58959         .bmControls[0]          = 1,
58960         .bmControls[1]          = 0,
58961         .iProcessing            = 0,
58962 +       .bmVideoStandards       = 0,
58963  };
58965  static const struct uvc_output_terminal_descriptor uvc_output_terminal = {
58966 diff --git a/drivers/usb/gadget/udc/aspeed-vhub/core.c b/drivers/usb/gadget/udc/aspeed-vhub/core.c
58967 index be7bb64e3594..d11d3d14313f 100644
58968 --- a/drivers/usb/gadget/udc/aspeed-vhub/core.c
58969 +++ b/drivers/usb/gadget/udc/aspeed-vhub/core.c
58970 @@ -36,6 +36,7 @@ void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
58971                    int status)
58973         bool internal = req->internal;
58974 +       struct ast_vhub *vhub = ep->vhub;
58976         EPVDBG(ep, "completing request @%p, status %d\n", req, status);
58978 @@ -46,7 +47,7 @@ void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
58980         if (req->req.dma) {
58981                 if (!WARN_ON(!ep->dev))
58982 -                       usb_gadget_unmap_request(&ep->dev->gadget,
58983 +                       usb_gadget_unmap_request_by_dev(&vhub->pdev->dev,
58984                                                  &req->req, ep->epn.is_in);
58985                 req->req.dma = 0;
58986         }
58987 diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
58988 index 02d8bfae58fb..cb164c615e6f 100644
58989 --- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
58990 +++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
58991 @@ -376,7 +376,7 @@ static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
58992         if (ep->epn.desc_mode ||
58993             ((((unsigned long)u_req->buf & 7) == 0) &&
58994              (ep->epn.is_in || !(u_req->length & (u_ep->maxpacket - 1))))) {
58995 -               rc = usb_gadget_map_request(&ep->dev->gadget, u_req,
58996 +               rc = usb_gadget_map_request_by_dev(&vhub->pdev->dev, u_req,
58997                                             ep->epn.is_in);
58998                 if (rc) {
58999                         dev_warn(&vhub->pdev->dev,
59000 diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
59001 index 57067763b100..5f474ffe2be1 100644
59002 --- a/drivers/usb/gadget/udc/dummy_hcd.c
59003 +++ b/drivers/usb/gadget/udc/dummy_hcd.c
59004 @@ -903,6 +903,21 @@ static int dummy_pullup(struct usb_gadget *_gadget, int value)
59005         spin_lock_irqsave(&dum->lock, flags);
59006         dum->pullup = (value != 0);
59007         set_link_state(dum_hcd);
59008 +       if (value == 0) {
59009 +               /*
59010 +                * Emulate synchronize_irq(): wait for callbacks to finish.
59011 +                * This seems to be the best place to emulate the call to
59012 +                * synchronize_irq() that's in usb_gadget_remove_driver().
59013 +                * Doing it in dummy_udc_stop() would be too late since it
59014 +                * is called after the unbind callback and unbind shouldn't
59015 +                * be invoked until all the other callbacks are finished.
59016 +                */
59017 +               while (dum->callback_usage > 0) {
59018 +                       spin_unlock_irqrestore(&dum->lock, flags);
59019 +                       usleep_range(1000, 2000);
59020 +                       spin_lock_irqsave(&dum->lock, flags);
59021 +               }
59022 +       }
59023         spin_unlock_irqrestore(&dum->lock, flags);
59025         usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd));
59026 @@ -1004,14 +1019,6 @@ static int dummy_udc_stop(struct usb_gadget *g)
59027         spin_lock_irq(&dum->lock);
59028         dum->ints_enabled = 0;
59029         stop_activity(dum);
59031 -       /* emulate synchronize_irq(): wait for callbacks to finish */
59032 -       while (dum->callback_usage > 0) {
59033 -               spin_unlock_irq(&dum->lock);
59034 -               usleep_range(1000, 2000);
59035 -               spin_lock_irq(&dum->lock);
59036 -       }
59038         dum->driver = NULL;
59039         spin_unlock_irq(&dum->lock);
59041 diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
59042 index d6ca50f01985..75bf446f4a66 100644
59043 --- a/drivers/usb/gadget/udc/fotg210-udc.c
59044 +++ b/drivers/usb/gadget/udc/fotg210-udc.c
59045 @@ -338,15 +338,16 @@ static void fotg210_start_dma(struct fotg210_ep *ep,
59046                 } else {
59047                         buffer = req->req.buf + req->req.actual;
59048                         length = ioread32(ep->fotg210->reg +
59049 -                                       FOTG210_FIBCR(ep->epnum - 1));
59050 -                       length &= FIBCR_BCFX;
59051 +                                       FOTG210_FIBCR(ep->epnum - 1)) & FIBCR_BCFX;
59052 +                       if (length > req->req.length - req->req.actual)
59053 +                               length = req->req.length - req->req.actual;
59054                 }
59055         } else {
59056                 buffer = req->req.buf + req->req.actual;
59057                 if (req->req.length - req->req.actual > ep->ep.maxpacket)
59058                         length = ep->ep.maxpacket;
59059                 else
59060 -                       length = req->req.length;
59061 +                       length = req->req.length - req->req.actual;
59062         }
59064         d = dma_map_single(dev, buffer, length,
59065 @@ -379,8 +380,7 @@ static void fotg210_ep0_queue(struct fotg210_ep *ep,
59066         }
59067         if (ep->dir_in) { /* if IN */
59068                 fotg210_start_dma(ep, req);
59069 -               if ((req->req.length == req->req.actual) ||
59070 -                   (req->req.actual < ep->ep.maxpacket))
59071 +               if (req->req.length == req->req.actual)
59072                         fotg210_done(ep, req, 0);
59073         } else { /* OUT */
59074                 u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR0);
59075 @@ -820,7 +820,7 @@ static void fotg210_ep0in(struct fotg210_udc *fotg210)
59076                 if (req->req.length)
59077                         fotg210_start_dma(ep, req);
59079 -               if ((req->req.length - req->req.actual) < ep->ep.maxpacket)
59080 +               if (req->req.actual == req->req.length)
59081                         fotg210_done(ep, req, 0);
59082         } else {
59083                 fotg210_set_cxdone(fotg210);
59084 @@ -849,12 +849,16 @@ static void fotg210_out_fifo_handler(struct fotg210_ep *ep)
59086         struct fotg210_request *req = list_entry(ep->queue.next,
59087                                                  struct fotg210_request, queue);
59088 +       int disgr1 = ioread32(ep->fotg210->reg + FOTG210_DISGR1);
59090         fotg210_start_dma(ep, req);
59092 -       /* finish out transfer */
59093 +       /* Complete the request when it's full or a short packet arrived.
59094 +        * Like other drivers, short_not_ok isn't handled.
59095 +        */
59097         if (req->req.length == req->req.actual ||
59098 -           req->req.actual < ep->ep.maxpacket)
59099 +           (disgr1 & DISGR1_SPK_INT(ep->epnum - 1)))
59100                 fotg210_done(ep, req, 0);
59103 @@ -1027,6 +1031,12 @@ static void fotg210_init(struct fotg210_udc *fotg210)
59104         value &= ~DMCR_GLINT_EN;
59105         iowrite32(value, fotg210->reg + FOTG210_DMCR);
59107 +       /* enable only grp2 irqs we handle */
59108 +       iowrite32(~(DISGR2_DMA_ERROR | DISGR2_RX0BYTE_INT | DISGR2_TX0BYTE_INT
59109 +                   | DISGR2_ISO_SEQ_ABORT_INT | DISGR2_ISO_SEQ_ERR_INT
59110 +                   | DISGR2_RESM_INT | DISGR2_SUSP_INT | DISGR2_USBRST_INT),
59111 +                 fotg210->reg + FOTG210_DMISGR2);
59113         /* disable all fifo interrupt */
59114         iowrite32(~(u32)0, fotg210->reg + FOTG210_DMISGR1);
59116 diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
59117 index a3c1fc924268..fd3656d0f760 100644
59118 --- a/drivers/usb/gadget/udc/pch_udc.c
59119 +++ b/drivers/usb/gadget/udc/pch_udc.c
59120 @@ -7,12 +7,14 @@
59121  #include <linux/module.h>
59122  #include <linux/pci.h>
59123  #include <linux/delay.h>
59124 +#include <linux/dmi.h>
59125  #include <linux/errno.h>
59126 +#include <linux/gpio/consumer.h>
59127 +#include <linux/gpio/machine.h>
59128  #include <linux/list.h>
59129  #include <linux/interrupt.h>
59130  #include <linux/usb/ch9.h>
59131  #include <linux/usb/gadget.h>
59132 -#include <linux/gpio/consumer.h>
59133  #include <linux/irq.h>
59135  #define PCH_VBUS_PERIOD                3000    /* VBUS polling period (msec) */
59136 @@ -596,18 +598,22 @@ static void pch_udc_reconnect(struct pch_udc_dev *dev)
59137  static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
59138                                           int is_active)
59140 +       unsigned long           iflags;
59142 +       spin_lock_irqsave(&dev->lock, iflags);
59143         if (is_active) {
59144                 pch_udc_reconnect(dev);
59145                 dev->vbus_session = 1;
59146         } else {
59147                 if (dev->driver && dev->driver->disconnect) {
59148 -                       spin_lock(&dev->lock);
59149 +                       spin_unlock_irqrestore(&dev->lock, iflags);
59150                         dev->driver->disconnect(&dev->gadget);
59151 -                       spin_unlock(&dev->lock);
59152 +                       spin_lock_irqsave(&dev->lock, iflags);
59153                 }
59154                 pch_udc_set_disconnect(dev);
59155                 dev->vbus_session = 0;
59156         }
59157 +       spin_unlock_irqrestore(&dev->lock, iflags);
59160  /**
59161 @@ -1166,20 +1172,25 @@ static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
59162  static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
59164         struct pch_udc_dev      *dev;
59165 +       unsigned long           iflags;
59167         if (!gadget)
59168                 return -EINVAL;
59170         dev = container_of(gadget, struct pch_udc_dev, gadget);
59172 +       spin_lock_irqsave(&dev->lock, iflags);
59173         if (is_on) {
59174                 pch_udc_reconnect(dev);
59175         } else {
59176                 if (dev->driver && dev->driver->disconnect) {
59177 -                       spin_lock(&dev->lock);
59178 +                       spin_unlock_irqrestore(&dev->lock, iflags);
59179                         dev->driver->disconnect(&dev->gadget);
59180 -                       spin_unlock(&dev->lock);
59181 +                       spin_lock_irqsave(&dev->lock, iflags);
59182                 }
59183                 pch_udc_set_disconnect(dev);
59184         }
59185 +       spin_unlock_irqrestore(&dev->lock, iflags);
59187         return 0;
59189 @@ -1350,6 +1361,43 @@ static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
59190         return IRQ_HANDLED;
59193 +static struct gpiod_lookup_table minnowboard_udc_gpios = {
59194 +       .dev_id         = "0000:02:02.4",
59195 +       .table          = {
59196 +               GPIO_LOOKUP("sch_gpio.33158", 12, NULL, GPIO_ACTIVE_HIGH),
59197 +               {}
59198 +       },
59201 +static const struct dmi_system_id pch_udc_gpio_dmi_table[] = {
59202 +       {
59203 +               .ident = "MinnowBoard",
59204 +               .matches = {
59205 +                       DMI_MATCH(DMI_BOARD_NAME, "MinnowBoard"),
59206 +               },
59207 +               .driver_data = &minnowboard_udc_gpios,
59208 +       },
59209 +       { }
59212 +static void pch_vbus_gpio_remove_table(void *table)
59214 +       gpiod_remove_lookup_table(table);
59217 +static int pch_vbus_gpio_add_table(struct pch_udc_dev *dev)
59219 +       struct device *d = &dev->pdev->dev;
59220 +       const struct dmi_system_id *dmi;
59222 +       dmi = dmi_first_match(pch_udc_gpio_dmi_table);
59223 +       if (!dmi)
59224 +               return 0;
59226 +       gpiod_add_lookup_table(dmi->driver_data);
59227 +       return devm_add_action_or_reset(d, pch_vbus_gpio_remove_table, dmi->driver_data);
59230  /**
59231   * pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS.
59232   * @dev:               Reference to the driver structure
59233 @@ -1360,6 +1408,7 @@ static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
59234   */
59235  static int pch_vbus_gpio_init(struct pch_udc_dev *dev)
59237 +       struct device *d = &dev->pdev->dev;
59238         int err;
59239         int irq_num = 0;
59240         struct gpio_desc *gpiod;
59241 @@ -1367,8 +1416,12 @@ static int pch_vbus_gpio_init(struct pch_udc_dev *dev)
59242         dev->vbus_gpio.port = NULL;
59243         dev->vbus_gpio.intr = 0;
59245 +       err = pch_vbus_gpio_add_table(dev);
59246 +       if (err)
59247 +               return err;
59249         /* Retrieve the GPIO line from the USB gadget device */
59250 -       gpiod = devm_gpiod_get(dev->gadget.dev.parent, NULL, GPIOD_IN);
59251 +       gpiod = devm_gpiod_get_optional(d, NULL, GPIOD_IN);
59252         if (IS_ERR(gpiod))
59253                 return PTR_ERR(gpiod);
59254         gpiod_set_consumer_name(gpiod, "pch_vbus");
59255 @@ -1756,7 +1809,7 @@ static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
59256         }
59257         /* prevent from using desc. - set HOST BUSY */
59258         dma_desc->status |= PCH_UDC_BS_HST_BSY;
59259 -       dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID);
59260 +       dma_desc->dataptr = lower_32_bits(DMA_ADDR_INVALID);
59261         req->td_data = dma_desc;
59262         req->td_data_last = dma_desc;
59263         req->chain_len = 1;
59264 @@ -2298,6 +2351,21 @@ static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
59265                 pch_udc_set_dma(dev, DMA_DIR_RX);
59268 +static int pch_udc_gadget_setup(struct pch_udc_dev *dev)
59269 +       __must_hold(&dev->lock)
59271 +       int rc;
59273 +       /* In some cases we can get an interrupt before driver gets setup */
59274 +       if (!dev->driver)
59275 +               return -ESHUTDOWN;
59277 +       spin_unlock(&dev->lock);
59278 +       rc = dev->driver->setup(&dev->gadget, &dev->setup_data);
59279 +       spin_lock(&dev->lock);
59280 +       return rc;
59283  /**
59284   * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
59285   * @dev:       Reference to the device structure
59286 @@ -2369,15 +2437,12 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
59287                         dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
59288                 else /* OUT */
59289                         dev->gadget.ep0 = &ep->ep;
59290 -               spin_lock(&dev->lock);
59291                 /* If Mass storage Reset */
59292                 if ((dev->setup_data.bRequestType == 0x21) &&
59293                     (dev->setup_data.bRequest == 0xFF))
59294                         dev->prot_stall = 0;
59295                 /* call gadget with setup data received */
59296 -               setup_supported = dev->driver->setup(&dev->gadget,
59297 -                                                    &dev->setup_data);
59298 -               spin_unlock(&dev->lock);
59299 +               setup_supported = pch_udc_gadget_setup(dev);
59301                 if (dev->setup_data.bRequestType & USB_DIR_IN) {
59302                         ep->td_data->status = (ep->td_data->status &
59303 @@ -2625,9 +2690,7 @@ static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
59304                 dev->ep[i].halted = 0;
59305         }
59306         dev->stall = 0;
59307 -       spin_unlock(&dev->lock);
59308 -       dev->driver->setup(&dev->gadget, &dev->setup_data);
59309 -       spin_lock(&dev->lock);
59310 +       pch_udc_gadget_setup(dev);
59313  /**
59314 @@ -2662,9 +2725,7 @@ static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
59315         dev->stall = 0;
59317         /* call gadget zero with setup data received */
59318 -       spin_unlock(&dev->lock);
59319 -       dev->driver->setup(&dev->gadget, &dev->setup_data);
59320 -       spin_lock(&dev->lock);
59321 +       pch_udc_gadget_setup(dev);
59324  /**
59325 @@ -2870,14 +2931,20 @@ static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
59326   * @dev:       Reference to the driver structure
59327   *
59328   * Return codes:
59329 - *     0: Success
59330 + *     0:              Success
59331 + *     -%ERRNO:        All kind of errors when retrieving VBUS GPIO
59332   */
59333  static int pch_udc_pcd_init(struct pch_udc_dev *dev)
59335 +       int ret;
59337         pch_udc_init(dev);
59338         pch_udc_pcd_reinit(dev);
59339 -       pch_vbus_gpio_init(dev);
59340 -       return 0;
59342 +       ret = pch_vbus_gpio_init(dev);
59343 +       if (ret)
59344 +               pch_udc_exit(dev);
59345 +       return ret;
59348  /**
59349 @@ -2938,7 +3005,7 @@ static int init_dma_pools(struct pch_udc_dev *dev)
59350         dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
59351                                        UDC_EP0OUT_BUFF_SIZE * 4,
59352                                        DMA_FROM_DEVICE);
59353 -       return 0;
59354 +       return dma_mapping_error(&dev->pdev->dev, dev->dma_addr);
59357  static int pch_udc_start(struct usb_gadget *g,
59358 @@ -3063,6 +3130,7 @@ static int pch_udc_probe(struct pci_dev *pdev,
59359         if (retval)
59360                 return retval;
59362 +       dev->pdev = pdev;
59363         pci_set_drvdata(pdev, dev);
59365         /* Determine BAR based on PCI ID */
59366 @@ -3078,16 +3146,10 @@ static int pch_udc_probe(struct pci_dev *pdev,
59368         dev->base_addr = pcim_iomap_table(pdev)[bar];
59370 -       /*
59371 -        * FIXME: add a GPIO descriptor table to pdev.dev using
59372 -        * gpiod_add_descriptor_table() from <linux/gpio/machine.h> based on
59373 -        * the PCI subsystem ID. The system-dependent GPIO is necessary for
59374 -        * VBUS operation.
59375 -        */
59377         /* initialize the hardware */
59378 -       if (pch_udc_pcd_init(dev))
59379 -               return -ENODEV;
59380 +       retval = pch_udc_pcd_init(dev);
59381 +       if (retval)
59382 +               return retval;
59384         pci_enable_msi(pdev);
59386 @@ -3104,7 +3166,6 @@ static int pch_udc_probe(struct pci_dev *pdev,
59388         /* device struct setup */
59389         spin_lock_init(&dev->lock);
59390 -       dev->pdev = pdev;
59391         dev->gadget.ops = &pch_udc_ops;
59393         retval = init_dma_pools(dev);
59394 diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
59395 index 896c1a016d55..65cae4883454 100644
59396 --- a/drivers/usb/gadget/udc/r8a66597-udc.c
59397 +++ b/drivers/usb/gadget/udc/r8a66597-udc.c
59398 @@ -1849,6 +1849,8 @@ static int r8a66597_probe(struct platform_device *pdev)
59399                 return PTR_ERR(reg);
59401         ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
59402 +       if (!ires)
59403 +               return -EINVAL;
59404         irq = ires->start;
59405         irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
59407 diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
59408 index 1d3ebb07ccd4..b154b62abefa 100644
59409 --- a/drivers/usb/gadget/udc/s3c2410_udc.c
59410 +++ b/drivers/usb/gadget/udc/s3c2410_udc.c
59411 @@ -54,8 +54,6 @@ static struct clk             *udc_clock;
59412  static struct clk              *usb_bus_clock;
59413  static void __iomem            *base_addr;
59414  static int                     irq_usbd;
59415 -static u64                     rsrc_start;
59416 -static u64                     rsrc_len;
59417  static struct dentry           *s3c2410_udc_debugfs_root;
59419  static inline u32 udc_read(u32 reg)
59420 @@ -1752,7 +1750,8 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
59421         udc_clock = clk_get(NULL, "usb-device");
59422         if (IS_ERR(udc_clock)) {
59423                 dev_err(dev, "failed to get udc clock source\n");
59424 -               return PTR_ERR(udc_clock);
59425 +               retval = PTR_ERR(udc_clock);
59426 +               goto err_usb_bus_clk;
59427         }
59429         clk_prepare_enable(udc_clock);
59430 @@ -1775,7 +1774,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
59431         base_addr = devm_platform_ioremap_resource(pdev, 0);
59432         if (IS_ERR(base_addr)) {
59433                 retval = PTR_ERR(base_addr);
59434 -               goto err_mem;
59435 +               goto err_udc_clk;
59436         }
59438         the_controller = udc;
59439 @@ -1793,7 +1792,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
59440         if (retval != 0) {
59441                 dev_err(dev, "cannot get irq %i, err %d\n", irq_usbd, retval);
59442                 retval = -EBUSY;
59443 -               goto err_map;
59444 +               goto err_udc_clk;
59445         }
59447         dev_dbg(dev, "got irq %i\n", irq_usbd);
59448 @@ -1864,10 +1863,14 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
59449                 gpio_free(udc_info->vbus_pin);
59450  err_int:
59451         free_irq(irq_usbd, udc);
59452 -err_map:
59453 -       iounmap(base_addr);
59454 -err_mem:
59455 -       release_mem_region(rsrc_start, rsrc_len);
59456 +err_udc_clk:
59457 +       clk_disable_unprepare(udc_clock);
59458 +       clk_put(udc_clock);
59459 +       udc_clock = NULL;
59460 +err_usb_bus_clk:
59461 +       clk_disable_unprepare(usb_bus_clock);
59462 +       clk_put(usb_bus_clock);
59463 +       usb_bus_clock = NULL;
59465         return retval;
59467 @@ -1899,9 +1902,6 @@ static int s3c2410_udc_remove(struct platform_device *pdev)
59469         free_irq(irq_usbd, udc);
59471 -       iounmap(base_addr);
59472 -       release_mem_region(rsrc_start, rsrc_len);
59474         if (!IS_ERR(udc_clock) && udc_clock != NULL) {
59475                 clk_disable_unprepare(udc_clock);
59476                 clk_put(udc_clock);
59477 diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c
59478 index 32f1d3e90c26..99805d60a7ab 100644
59479 --- a/drivers/usb/gadget/udc/snps_udc_plat.c
59480 +++ b/drivers/usb/gadget/udc/snps_udc_plat.c
59481 @@ -114,8 +114,8 @@ static int udc_plat_probe(struct platform_device *pdev)
59483         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
59484         udc->virt_addr = devm_ioremap_resource(dev, res);
59485 -       if (IS_ERR(udc->regs))
59486 -               return PTR_ERR(udc->regs);
59487 +       if (IS_ERR(udc->virt_addr))
59488 +               return PTR_ERR(udc->virt_addr);
59490         /* udc csr registers base */
59491         udc->csr = udc->virt_addr + UDC_CSR_ADDR;
59492 diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
59493 index 580bef8eb4cb..2319c9737c2b 100644
59494 --- a/drivers/usb/gadget/udc/tegra-xudc.c
59495 +++ b/drivers/usb/gadget/udc/tegra-xudc.c
59496 @@ -3883,7 +3883,7 @@ static int tegra_xudc_remove(struct platform_device *pdev)
59498         pm_runtime_get_sync(xudc->dev);
59500 -       cancel_delayed_work(&xudc->plc_reset_work);
59501 +       cancel_delayed_work_sync(&xudc->plc_reset_work);
59502         cancel_work_sync(&xudc->usb_role_sw_work);
59504         usb_del_gadget_udc(&xudc->gadget);
59505 diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
59506 index b94f2a070c05..df9428f1dc5e 100644
59507 --- a/drivers/usb/host/Kconfig
59508 +++ b/drivers/usb/host/Kconfig
59509 @@ -272,6 +272,7 @@ config USB_EHCI_TEGRA
59510         select USB_CHIPIDEA
59511         select USB_CHIPIDEA_HOST
59512         select USB_CHIPIDEA_TEGRA
59513 +       select USB_GADGET
59514         help
59515           This option is deprecated now and the driver was removed, use
59516           USB_CHIPIDEA_TEGRA instead.
59517 diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
59518 index 5617ef30530a..f0e4a315cc81 100644
59519 --- a/drivers/usb/host/fotg210-hcd.c
59520 +++ b/drivers/usb/host/fotg210-hcd.c
59521 @@ -5568,7 +5568,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
59522         struct usb_hcd *hcd;
59523         struct resource *res;
59524         int irq;
59525 -       int retval = -ENODEV;
59526 +       int retval;
59527         struct fotg210_hcd *fotg210;
59529         if (usb_disabled())
59530 @@ -5588,7 +5588,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
59531         hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev,
59532                         dev_name(dev));
59533         if (!hcd) {
59534 -               dev_err(dev, "failed to create hcd with err %d\n", retval);
59535 +               dev_err(dev, "failed to create hcd\n");
59536                 retval = -ENOMEM;
59537                 goto fail_create_hcd;
59538         }
59539 diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
59540 index 115ced0d93e1..1be692d3cf90 100644
59541 --- a/drivers/usb/host/sl811-hcd.c
59542 +++ b/drivers/usb/host/sl811-hcd.c
59543 @@ -1287,11 +1287,10 @@ sl811h_hub_control(
59544                         goto error;
59545                 put_unaligned_le32(sl811->port1, buf);
59547 -#ifndef        VERBOSE
59548 -       if (*(u16*)(buf+2))     /* only if wPortChange is interesting */
59549 -#endif
59550 -               dev_dbg(hcd->self.controller, "GetPortStatus %08x\n",
59551 -                       sl811->port1);
59552 +               if (__is_defined(VERBOSE) ||
59553 +                   *(u16*)(buf+2)) /* only if wPortChange is interesting */
59554 +                       dev_dbg(hcd->self.controller, "GetPortStatus %08x\n",
59555 +                               sl811->port1);
59556                 break;
59557         case SetPortFeature:
59558                 if (wIndex != 1 || wLength != 0)
59559 diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
59560 index fa59b242cd51..e8af0a125f84 100644
59561 --- a/drivers/usb/host/xhci-ext-caps.h
59562 +++ b/drivers/usb/host/xhci-ext-caps.h
59563 @@ -7,8 +7,9 @@
59564   * Author: Sarah Sharp
59565   * Some code borrowed from the Linux EHCI driver.
59566   */
59567 -/* Up to 16 ms to halt an HC */
59568 -#define XHCI_MAX_HALT_USEC     (16*1000)
59570 +/* HC should halt within 16 ms, but use 32 ms as some hosts take longer */
59571 +#define XHCI_MAX_HALT_USEC     (32 * 1000)
59572  /* HC not running - set to 1 when run/stop bit is cleared. */
59573  #define XHCI_STS_HALT          (1<<0)
59575 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
59576 index f2c4ee7c4786..717c122f9449 100644
59577 --- a/drivers/usb/host/xhci-mem.c
59578 +++ b/drivers/usb/host/xhci-mem.c
59579 @@ -2129,6 +2129,15 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
59581         if (major_revision == 0x03) {
59582                 rhub = &xhci->usb3_rhub;
59583 +               /*
59584 +                * Some hosts incorrectly use sub-minor version for minor
59585 +                * version (i.e. 0x02 instead of 0x20 for bcdUSB 0x320 and 0x01
59586 +                * for bcdUSB 0x310). Since there is no USB release with sub
59587 +                * minor version 0x301 to 0x309, we can assume that they are
59588 +                * incorrect and fix it here.
59589 +                */
59590 +               if (minor_revision > 0x00 && minor_revision < 0x10)
59591 +                       minor_revision <<= 4;
59592         } else if (major_revision <= 0x02) {
59593                 rhub = &xhci->usb2_rhub;
59594         } else {
59595 @@ -2240,6 +2249,9 @@ static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
59596                 return;
59597         rhub->ports = kcalloc_node(rhub->num_ports, sizeof(*rhub->ports),
59598                         flags, dev_to_node(dev));
59599 +       if (!rhub->ports)
59600 +               return;
59602         for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
59603                 if (xhci->hw_ports[i].rhub != rhub ||
59604                     xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
59605 diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
59606 index b45e5bf08997..8950d1f10a7f 100644
59607 --- a/drivers/usb/host/xhci-mtk-sch.c
59608 +++ b/drivers/usb/host/xhci-mtk-sch.c
59609 @@ -378,6 +378,31 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
59610         sch_ep->allocated = used;
59613 +static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
59615 +       struct mu3h_sch_tt *tt = sch_ep->sch_tt;
59616 +       u32 num_esit, tmp;
59617 +       int base;
59618 +       int i, j;
59620 +       num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
59621 +       for (i = 0; i < num_esit; i++) {
59622 +               base = offset + i * sch_ep->esit;
59624 +               /*
59625 +                * Compared with hs bus, no matter what ep type,
59626 +                * the hub will always delay one uframe to send data
59627 +                */
59628 +               for (j = 0; j < sch_ep->cs_count; j++) {
59629 +                       tmp = tt->fs_bus_bw[base + j] + sch_ep->bw_cost_per_microframe;
59630 +                       if (tmp > FS_PAYLOAD_MAX)
59631 +                               return -ERANGE;
59632 +               }
59633 +       }
59635 +       return 0;
59638  static int check_sch_tt(struct usb_device *udev,
59639         struct mu3h_sch_ep_info *sch_ep, u32 offset)
59641 @@ -402,7 +427,7 @@ static int check_sch_tt(struct usb_device *udev,
59642                         return -ERANGE;
59644                 for (i = 0; i < sch_ep->cs_count; i++)
59645 -                       if (test_bit(offset + i, tt->split_bit_map))
59646 +                       if (test_bit(offset + i, tt->ss_bit_map))
59647                                 return -ERANGE;
59649         } else {
59650 @@ -432,7 +457,7 @@ static int check_sch_tt(struct usb_device *udev,
59651                         cs_count = 7; /* HW limit */
59653                 for (i = 0; i < cs_count + 2; i++) {
59654 -                       if (test_bit(offset + i, tt->split_bit_map))
59655 +                       if (test_bit(offset + i, tt->ss_bit_map))
59656                                 return -ERANGE;
59657                 }
59659 @@ -448,24 +473,44 @@ static int check_sch_tt(struct usb_device *udev,
59660                         sch_ep->num_budget_microframes = sch_ep->esit;
59661         }
59663 -       return 0;
59664 +       return check_fs_bus_bw(sch_ep, offset);
59667  static void update_sch_tt(struct usb_device *udev,
59668 -       struct mu3h_sch_ep_info *sch_ep)
59669 +       struct mu3h_sch_ep_info *sch_ep, bool used)
59671         struct mu3h_sch_tt *tt = sch_ep->sch_tt;
59672         u32 base, num_esit;
59673 +       int bw_updated;
59674 +       int bits;
59675         int i, j;
59677         num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
59678 +       bits = (sch_ep->ep_type == ISOC_OUT_EP) ? sch_ep->cs_count : 1;
59680 +       if (used)
59681 +               bw_updated = sch_ep->bw_cost_per_microframe;
59682 +       else
59683 +               bw_updated = -sch_ep->bw_cost_per_microframe;
59685         for (i = 0; i < num_esit; i++) {
59686                 base = sch_ep->offset + i * sch_ep->esit;
59687 -               for (j = 0; j < sch_ep->num_budget_microframes; j++)
59688 -                       set_bit(base + j, tt->split_bit_map);
59690 +               for (j = 0; j < bits; j++) {
59691 +                       if (used)
59692 +                               set_bit(base + j, tt->ss_bit_map);
59693 +                       else
59694 +                               clear_bit(base + j, tt->ss_bit_map);
59695 +               }
59697 +               for (j = 0; j < sch_ep->cs_count; j++)
59698 +                       tt->fs_bus_bw[base + j] += bw_updated;
59699         }
59701 -       list_add_tail(&sch_ep->tt_endpoint, &tt->ep_list);
59702 +       if (used)
59703 +               list_add_tail(&sch_ep->tt_endpoint, &tt->ep_list);
59704 +       else
59705 +               list_del(&sch_ep->tt_endpoint);
59708  static int check_sch_bw(struct usb_device *udev,
59709 @@ -535,7 +580,7 @@ static int check_sch_bw(struct usb_device *udev,
59710                 if (!tt_offset_ok)
59711                         return -ERANGE;
59713 -               update_sch_tt(udev, sch_ep);
59714 +               update_sch_tt(udev, sch_ep, 1);
59715         }
59717         /* update bus bandwidth info */
59718 @@ -548,15 +593,16 @@ static void destroy_sch_ep(struct usb_device *udev,
59719         struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep)
59721         /* only release ep bw check passed by check_sch_bw() */
59722 -       if (sch_ep->allocated)
59723 +       if (sch_ep->allocated) {
59724                 update_bus_bw(sch_bw, sch_ep, 0);
59725 +               if (sch_ep->sch_tt)
59726 +                       update_sch_tt(udev, sch_ep, 0);
59727 +       }
59729 -       list_del(&sch_ep->endpoint);
59731 -       if (sch_ep->sch_tt) {
59732 -               list_del(&sch_ep->tt_endpoint);
59733 +       if (sch_ep->sch_tt)
59734                 drop_tt(udev);
59735 -       }
59737 +       list_del(&sch_ep->endpoint);
59738         kfree(sch_ep);
59741 @@ -643,7 +689,7 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
59742                  */
59743                 if (usb_endpoint_xfer_int(&ep->desc)
59744                         || usb_endpoint_xfer_isoc(&ep->desc))
59745 -                       ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(1));
59746 +                       ep_ctx->reserved[0] = cpu_to_le32(EP_BPKTS(1));
59748                 return 0;
59749         }
59750 @@ -730,10 +776,10 @@ int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
59751                 list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
59753                 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
59754 -               ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
59755 +               ep_ctx->reserved[0] = cpu_to_le32(EP_BPKTS(sch_ep->pkts)
59756                         | EP_BCSCOUNT(sch_ep->cs_count)
59757                         | EP_BBM(sch_ep->burst_mode));
59758 -               ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
59759 +               ep_ctx->reserved[1] = cpu_to_le32(EP_BOFFSET(sch_ep->offset)
59760                         | EP_BREPEAT(sch_ep->repeat));
59762                 xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
59763 diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
59764 index 2f27dc0d9c6b..1c331577fca9 100644
59765 --- a/drivers/usb/host/xhci-mtk.c
59766 +++ b/drivers/usb/host/xhci-mtk.c
59767 @@ -397,6 +397,8 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
59768         xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
59769         if (mtk->lpm_support)
59770                 xhci->quirks |= XHCI_LPM_SUPPORT;
59771 +       if (mtk->u2_lpm_disable)
59772 +               xhci->quirks |= XHCI_HW_LPM_DISABLE;
59774         /*
59775          * MTK xHCI 0.96: PSA is 1 by default even if doesn't support stream,
59776 @@ -469,6 +471,7 @@ static int xhci_mtk_probe(struct platform_device *pdev)
59777                 return ret;
59779         mtk->lpm_support = of_property_read_bool(node, "usb3-lpm-capable");
59780 +       mtk->u2_lpm_disable = of_property_read_bool(node, "usb2-lpm-disable");
59781         /* optional property, ignore the error if it does not exist */
59782         of_property_read_u32(node, "mediatek,u3p-dis-msk",
59783                              &mtk->u3p_dis_msk);
59784 diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
59785 index cbb09dfea62e..2fc0568ba054 100644
59786 --- a/drivers/usb/host/xhci-mtk.h
59787 +++ b/drivers/usb/host/xhci-mtk.h
59788 @@ -20,13 +20,15 @@
59789  #define XHCI_MTK_MAX_ESIT      64
59791  /**
59792 - * @split_bit_map: used to avoid split microframes overlay
59793 + * @ss_bit_map: used to avoid start split microframes overlay
59794 + * @fs_bus_bw: array to keep track of bandwidth already used for FS
59795   * @ep_list: Endpoints using this TT
59796   * @usb_tt: usb TT related
59797   * @tt_port: TT port number
59798   */
59799  struct mu3h_sch_tt {
59800 -       DECLARE_BITMAP(split_bit_map, XHCI_MTK_MAX_ESIT);
59801 +       DECLARE_BITMAP(ss_bit_map, XHCI_MTK_MAX_ESIT);
59802 +       u32 fs_bus_bw[XHCI_MTK_MAX_ESIT];
59803         struct list_head ep_list;
59804         struct usb_tt *usb_tt;
59805         int tt_port;
59806 @@ -150,6 +152,7 @@ struct xhci_hcd_mtk {
59807         struct phy **phys;
59808         int num_phys;
59809         bool lpm_support;
59810 +       bool u2_lpm_disable;
59811         /* usb remote wakeup */
59812         bool uwk_en;
59813         struct regmap *uwk;
59814 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
59815 index 5bbccc9a0179..7bc18cf8042c 100644
59816 --- a/drivers/usb/host/xhci-pci.c
59817 +++ b/drivers/usb/host/xhci-pci.c
59818 @@ -57,6 +57,7 @@
59819  #define PCI_DEVICE_ID_INTEL_CML_XHCI                   0xa3af
59820  #define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI            0x9a13
59821  #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI           0x1138
59822 +#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI            0x461e
59824  #define PCI_DEVICE_ID_AMD_PROMONTORYA_4                        0x43b9
59825  #define PCI_DEVICE_ID_AMD_PROMONTORYA_3                        0x43ba
59826 @@ -166,8 +167,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
59827             (pdev->device == 0x15e0 || pdev->device == 0x15e1))
59828                 xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND;
59830 -       if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5)
59831 +       if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5) {
59832                 xhci->quirks |= XHCI_DISABLE_SPARSE;
59833 +               xhci->quirks |= XHCI_RESET_ON_RESUME;
59834 +       }
59836         if (pdev->vendor == PCI_VENDOR_ID_AMD)
59837                 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
59838 @@ -243,7 +246,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
59839              pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI ||
59840              pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI ||
59841              pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI ||
59842 -            pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI))
59843 +            pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI ||
59844 +            pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI))
59845                 xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
59847         if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
59848 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
59849 index ce38076901e2..6cdea0d00d19 100644
59850 --- a/drivers/usb/host/xhci-ring.c
59851 +++ b/drivers/usb/host/xhci-ring.c
59852 @@ -863,7 +863,7 @@ static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id,
59853         return ret;
59856 -static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
59857 +static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
59858                                 struct xhci_virt_ep *ep, unsigned int stream_id,
59859                                 struct xhci_td *td,
59860                                 enum xhci_ep_reset_type reset_type)
59861 @@ -876,7 +876,7 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
59862          * Device will be reset soon to recover the link so don't do anything
59863          */
59864         if (ep->vdev->flags & VDEV_PORT_ERROR)
59865 -               return;
59866 +               return -ENODEV;
59868         /* add td to cancelled list and let reset ep handler take care of it */
59869         if (reset_type == EP_HARD_RESET) {
59870 @@ -889,16 +889,18 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
59872         if (ep->ep_state & EP_HALTED) {
59873                 xhci_dbg(xhci, "Reset ep command already pending\n");
59874 -               return;
59875 +               return 0;
59876         }
59878         err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type);
59879         if (err)
59880 -               return;
59881 +               return err;
59883         ep->ep_state |= EP_HALTED;
59885         xhci_ring_cmd_db(xhci);
59887 +       return 0;
59890  /*
59891 @@ -1015,6 +1017,7 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
59892         struct xhci_td *td = NULL;
59893         enum xhci_ep_reset_type reset_type;
59894         struct xhci_command *command;
59895 +       int err;
59897         if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
59898                 if (!xhci->devs[slot_id])
59899 @@ -1059,7 +1062,10 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
59900                                         td->status = -EPROTO;
59901                         }
59902                         /* reset ep, reset handler cleans up cancelled tds */
59903 -                       xhci_handle_halted_endpoint(xhci, ep, 0, td, reset_type);
59904 +                       err = xhci_handle_halted_endpoint(xhci, ep, 0, td,
59905 +                                                         reset_type);
59906 +                       if (err)
59907 +                               break;
59908                         xhci_stop_watchdog_timer_in_irq(xhci, ep);
59909                         return;
59910                 case EP_STATE_RUNNING:
59911 @@ -2129,16 +2135,13 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
59912         return 0;
59915 -static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
59916 -       struct xhci_transfer_event *event, struct xhci_virt_ep *ep)
59917 +static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
59918 +                    struct xhci_ring *ep_ring, struct xhci_td *td,
59919 +                    u32 trb_comp_code)
59921         struct xhci_ep_ctx *ep_ctx;
59922 -       struct xhci_ring *ep_ring;
59923 -       u32 trb_comp_code;
59925 -       ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
59926         ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
59927 -       trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
59929         switch (trb_comp_code) {
59930         case COMP_STOPPED_LENGTH_INVALID:
59931 @@ -2234,9 +2237,9 @@ static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
59932  /*
59933   * Process control tds, update urb status and actual_length.
59934   */
59935 -static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
59936 -       union xhci_trb *ep_trb, struct xhci_transfer_event *event,
59937 -       struct xhci_virt_ep *ep)
59938 +static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
59939 +               struct xhci_ring *ep_ring,  struct xhci_td *td,
59940 +                          union xhci_trb *ep_trb, struct xhci_transfer_event *event)
59942         struct xhci_ep_ctx *ep_ctx;
59943         u32 trb_comp_code;
59944 @@ -2324,15 +2327,15 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
59945                 td->urb->actual_length = requested;
59947  finish_td:
59948 -       return finish_td(xhci, td, event, ep);
59949 +       return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
59952  /*
59953   * Process isochronous tds, update urb packet status and actual_length.
59954   */
59955 -static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
59956 -       union xhci_trb *ep_trb, struct xhci_transfer_event *event,
59957 -       struct xhci_virt_ep *ep)
59958 +static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
59959 +               struct xhci_ring *ep_ring, struct xhci_td *td,
59960 +               union xhci_trb *ep_trb, struct xhci_transfer_event *event)
59962         struct urb_priv *urb_priv;
59963         int idx;
59964 @@ -2409,7 +2412,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
59966         td->urb->actual_length += frame->actual_length;
59968 -       return finish_td(xhci, td, event, ep);
59969 +       return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
59972  static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
59973 @@ -2441,17 +2444,15 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
59974  /*
59975   * Process bulk and interrupt tds, update urb status and actual_length.
59976   */
59977 -static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
59978 -       union xhci_trb *ep_trb, struct xhci_transfer_event *event,
59979 -       struct xhci_virt_ep *ep)
59980 +static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
59981 +               struct xhci_ring *ep_ring, struct xhci_td *td,
59982 +               union xhci_trb *ep_trb, struct xhci_transfer_event *event)
59984         struct xhci_slot_ctx *slot_ctx;
59985 -       struct xhci_ring *ep_ring;
59986         u32 trb_comp_code;
59987         u32 remaining, requested, ep_trb_len;
59989         slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
59990 -       ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
59991         trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
59992         remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
59993         ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
59994 @@ -2511,7 +2512,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
59995                           remaining);
59996                 td->urb->actual_length = 0;
59997         }
59998 -       return finish_td(xhci, td, event, ep);
60000 +       return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
60003  /*
60004 @@ -2854,11 +2856,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
60006                 /* update the urb's actual_length and give back to the core */
60007                 if (usb_endpoint_xfer_control(&td->urb->ep->desc))
60008 -                       process_ctrl_td(xhci, td, ep_trb, event, ep);
60009 +                       process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event);
60010                 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
60011 -                       process_isoc_td(xhci, td, ep_trb, event, ep);
60012 +                       process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event);
60013                 else
60014 -                       process_bulk_intr_td(xhci, td, ep_trb, event, ep);
60015 +                       process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event);
60016  cleanup:
60017                 handling_skipped_tds = ep->skip &&
60018                         trb_comp_code != COMP_MISSED_SERVICE_ERROR &&
60019 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
60020 index 1975016f46bf..0d2f1c37ab74 100644
60021 --- a/drivers/usb/host/xhci.c
60022 +++ b/drivers/usb/host/xhci.c
60023 @@ -228,6 +228,7 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
60024         struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
60025         int err, i;
60026         u64 val;
60027 +       u32 intrs;
60029         /*
60030          * Some Renesas controllers get into a weird state if they are
60031 @@ -266,7 +267,10 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
60032         if (upper_32_bits(val))
60033                 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
60035 -       for (i = 0; i < HCS_MAX_INTRS(xhci->hcs_params1); i++) {
60036 +       intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1),
60037 +                     ARRAY_SIZE(xhci->run_regs->ir_set));
60039 +       for (i = 0; i < intrs; i++) {
60040                 struct xhci_intr_reg __iomem *ir;
60042                 ir = &xhci->run_regs->ir_set[i];
60043 @@ -1510,7 +1514,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
60044   * we need to issue an evaluate context command and wait on it.
60045   */
60046  static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
60047 -               unsigned int ep_index, struct urb *urb)
60048 +               unsigned int ep_index, struct urb *urb, gfp_t mem_flags)
60050         struct xhci_container_ctx *out_ctx;
60051         struct xhci_input_control_ctx *ctrl_ctx;
60052 @@ -1541,7 +1545,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
60053                  * changes max packet sizes.
60054                  */
60056 -               command = xhci_alloc_command(xhci, true, GFP_KERNEL);
60057 +               command = xhci_alloc_command(xhci, true, mem_flags);
60058                 if (!command)
60059                         return -ENOMEM;
60061 @@ -1635,7 +1639,7 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
60062                  */
60063                 if (urb->dev->speed == USB_SPEED_FULL) {
60064                         ret = xhci_check_maxpacket(xhci, slot_id,
60065 -                                       ep_index, urb);
60066 +                                       ep_index, urb, mem_flags);
60067                         if (ret < 0) {
60068                                 xhci_urb_free_priv(urb_priv);
60069                                 urb->hcpriv = NULL;
60070 @@ -3269,6 +3273,14 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
60072         /* config ep command clears toggle if add and drop ep flags are set */
60073         ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
60074 +       if (!ctrl_ctx) {
60075 +               spin_unlock_irqrestore(&xhci->lock, flags);
60076 +               xhci_free_command(xhci, cfg_cmd);
60077 +               xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
60078 +                               __func__);
60079 +               goto cleanup;
60080 +       }
60082         xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
60083                                            ctrl_ctx, ep_flag, ep_flag);
60084         xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
60085 diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
60086 index eebeadd26946..6b92d037d8fc 100644
60087 --- a/drivers/usb/musb/mediatek.c
60088 +++ b/drivers/usb/musb/mediatek.c
60089 @@ -518,8 +518,8 @@ static int mtk_musb_probe(struct platform_device *pdev)
60091         glue->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
60092         if (IS_ERR(glue->xceiv)) {
60093 -               dev_err(dev, "fail to getting usb-phy %d\n", ret);
60094                 ret = PTR_ERR(glue->xceiv);
60095 +               dev_err(dev, "fail to getting usb-phy %d\n", ret);
60096                 goto err_unregister_usb_phy;
60097         }
60099 diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
60100 index fc0457db62e1..8f09a387b773 100644
60101 --- a/drivers/usb/musb/musb_core.c
60102 +++ b/drivers/usb/musb/musb_core.c
60103 @@ -2070,7 +2070,7 @@ static void musb_irq_work(struct work_struct *data)
60104         struct musb *musb = container_of(data, struct musb, irq_work.work);
60105         int error;
60107 -       error = pm_runtime_get_sync(musb->controller);
60108 +       error = pm_runtime_resume_and_get(musb->controller);
60109         if (error < 0) {
60110                 dev_err(musb->controller, "Could not enable: %i\n", error);
60112 diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
60113 index 97f37077b7f9..33b637d0d8d9 100644
60114 --- a/drivers/usb/roles/class.c
60115 +++ b/drivers/usb/roles/class.c
60116 @@ -189,6 +189,8 @@ usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
60117                 return NULL;
60119         dev = class_find_device_by_fwnode(role_class, fwnode);
60120 +       if (dev)
60121 +               WARN_ON(!try_module_get(dev->parent->driver->owner));
60123         return dev ? to_role_switch(dev) : NULL;
60125 diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
60126 index 7252b0ce75a6..fe1c13a8849c 100644
60127 --- a/drivers/usb/serial/ti_usb_3410_5052.c
60128 +++ b/drivers/usb/serial/ti_usb_3410_5052.c
60129 @@ -1418,14 +1418,19 @@ static int ti_set_serial_info(struct tty_struct *tty,
60130         struct serial_struct *ss)
60132         struct usb_serial_port *port = tty->driver_data;
60133 -       struct ti_port *tport = usb_get_serial_port_data(port);
60134 +       struct tty_port *tport = &port->port;
60135         unsigned cwait;
60137         cwait = ss->closing_wait;
60138         if (cwait != ASYNC_CLOSING_WAIT_NONE)
60139                 cwait = msecs_to_jiffies(10 * ss->closing_wait);
60141 -       tport->tp_port->port.closing_wait = cwait;
60142 +       if (!capable(CAP_SYS_ADMIN)) {
60143 +               if (cwait != tport->closing_wait)
60144 +                       return -EPERM;
60145 +       }
60147 +       tport->closing_wait = cwait;
60149         return 0;
60151 diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
60152 index 46d46a4f99c9..4e9c994a972a 100644
60153 --- a/drivers/usb/serial/usb_wwan.c
60154 +++ b/drivers/usb/serial/usb_wwan.c
60155 @@ -140,10 +140,10 @@ int usb_wwan_get_serial_info(struct tty_struct *tty,
60156         ss->line            = port->minor;
60157         ss->port            = port->port_number;
60158         ss->baud_base       = tty_get_baud_rate(port->port.tty);
60159 -       ss->close_delay     = port->port.close_delay / 10;
60160 +       ss->close_delay     = jiffies_to_msecs(port->port.close_delay) / 10;
60161         ss->closing_wait    = port->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
60162                                  ASYNC_CLOSING_WAIT_NONE :
60163 -                                port->port.closing_wait / 10;
60164 +                                jiffies_to_msecs(port->port.closing_wait) / 10;
60165         return 0;
60167  EXPORT_SYMBOL(usb_wwan_get_serial_info);
60168 @@ -155,9 +155,10 @@ int usb_wwan_set_serial_info(struct tty_struct *tty,
60169         unsigned int closing_wait, close_delay;
60170         int retval = 0;
60172 -       close_delay = ss->close_delay * 10;
60173 +       close_delay = msecs_to_jiffies(ss->close_delay * 10);
60174         closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
60175 -                       ASYNC_CLOSING_WAIT_NONE : ss->closing_wait * 10;
60176 +                       ASYNC_CLOSING_WAIT_NONE :
60177 +                       msecs_to_jiffies(ss->closing_wait * 10);
60179         mutex_lock(&port->port.mutex);
60181 diff --git a/drivers/usb/serial/xr_serial.c b/drivers/usb/serial/xr_serial.c
60182 index 0ca04906da4b..c59c8b47a120 100644
60183 --- a/drivers/usb/serial/xr_serial.c
60184 +++ b/drivers/usb/serial/xr_serial.c
60185 @@ -467,6 +467,11 @@ static void xr_set_termios(struct tty_struct *tty,
60186                 termios->c_cflag &= ~CSIZE;
60187                 if (old_termios)
60188                         termios->c_cflag |= old_termios->c_cflag & CSIZE;
60189 +               else
60190 +                       termios->c_cflag |= CS8;
60192 +               if (C_CSIZE(tty) == CS7)
60193 +                       bits |= XR21V141X_UART_DATA_7;
60194                 else
60195                         bits |= XR21V141X_UART_DATA_8;
60196                 break;
60197 diff --git a/drivers/usb/typec/stusb160x.c b/drivers/usb/typec/stusb160x.c
60198 index d21750bbbb44..6eaeba9b096e 100644
60199 --- a/drivers/usb/typec/stusb160x.c
60200 +++ b/drivers/usb/typec/stusb160x.c
60201 @@ -682,8 +682,8 @@ static int stusb160x_probe(struct i2c_client *client)
60202         }
60204         fwnode = device_get_named_child_node(chip->dev, "connector");
60205 -       if (IS_ERR(fwnode))
60206 -               return PTR_ERR(fwnode);
60207 +       if (!fwnode)
60208 +               return -ENODEV;
60210         /*
60211          * When both VDD and VSYS power supplies are present, the low power
60212 diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
60213 index a27deb0b5f03..027afd7dfdce 100644
60214 --- a/drivers/usb/typec/tcpm/tcpci.c
60215 +++ b/drivers/usb/typec/tcpm/tcpci.c
60216 @@ -24,6 +24,15 @@
60217  #define        AUTO_DISCHARGE_PD_HEADROOM_MV           850
60218  #define        AUTO_DISCHARGE_PPS_HEADROOM_MV          1250
60220 +#define tcpc_presenting_cc1_rd(reg) \
60221 +       (!(TCPC_ROLE_CTRL_DRP & (reg)) && \
60222 +        (((reg) & (TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT)) == \
60223 +         (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT)))
60224 +#define tcpc_presenting_cc2_rd(reg) \
60225 +       (!(TCPC_ROLE_CTRL_DRP & (reg)) && \
60226 +        (((reg) & (TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT)) == \
60227 +         (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT)))
60229  struct tcpci {
60230         struct device *dev;
60232 @@ -178,19 +187,25 @@ static int tcpci_get_cc(struct tcpc_dev *tcpc,
60233                         enum typec_cc_status *cc1, enum typec_cc_status *cc2)
60235         struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
60236 -       unsigned int reg;
60237 +       unsigned int reg, role_control;
60238         int ret;
60240 +       ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, &role_control);
60241 +       if (ret < 0)
60242 +               return ret;
60244         ret = regmap_read(tcpci->regmap, TCPC_CC_STATUS, &reg);
60245         if (ret < 0)
60246                 return ret;
60248         *cc1 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC1_SHIFT) &
60249                                  TCPC_CC_STATUS_CC1_MASK,
60250 -                                reg & TCPC_CC_STATUS_TERM);
60251 +                                reg & TCPC_CC_STATUS_TERM ||
60252 +                                tcpc_presenting_cc1_rd(role_control));
60253         *cc2 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC2_SHIFT) &
60254                                  TCPC_CC_STATUS_CC2_MASK,
60255 -                                reg & TCPC_CC_STATUS_TERM);
60256 +                                reg & TCPC_CC_STATUS_TERM ||
60257 +                                tcpc_presenting_cc2_rd(role_control));
60259         return 0;
60261 diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
60262 index ce7af398c7c1..52acc884a61f 100644
60263 --- a/drivers/usb/typec/tcpm/tcpm.c
60264 +++ b/drivers/usb/typec/tcpm/tcpm.c
60265 @@ -268,12 +268,27 @@ struct pd_mode_data {
60266         struct typec_altmode_desc altmode_desc[ALTMODE_DISCOVERY_MAX];
60267  };
60270 + * @min_volt: Actual min voltage at the local port
60271 + * @req_min_volt: Requested min voltage to the port partner
60272 + * @max_volt: Actual max voltage at the local port
60273 + * @req_max_volt: Requested max voltage to the port partner
60274 + * @max_curr: Actual max current at the local port
60275 + * @req_max_curr: Requested max current of the port partner
60276 + * @req_out_volt: Requested output voltage to the port partner
60277 + * @req_op_curr: Requested operating current to the port partner
60278 + * @supported: Parter has atleast one APDO hence supports PPS
60279 + * @active: PPS mode is active
60280 + */
60281  struct pd_pps_data {
60282         u32 min_volt;
60283 +       u32 req_min_volt;
60284         u32 max_volt;
60285 +       u32 req_max_volt;
60286         u32 max_curr;
60287 -       u32 out_volt;
60288 -       u32 op_curr;
60289 +       u32 req_max_curr;
60290 +       u32 req_out_volt;
60291 +       u32 req_op_curr;
60292         bool supported;
60293         bool active;
60294  };
60295 @@ -389,7 +404,10 @@ struct tcpm_port {
60296         unsigned int operating_snk_mw;
60297         bool update_sink_caps;
60299 -       /* Requested current / voltage */
60300 +       /* Requested current / voltage to the port partner */
60301 +       u32 req_current_limit;
60302 +       u32 req_supply_voltage;
60303 +       /* Actual current / voltage limit of the local port */
60304         u32 current_limit;
60305         u32 supply_voltage;
60307 @@ -438,6 +456,9 @@ struct tcpm_port {
60308         enum tcpm_ams next_ams;
60309         bool in_ams;
60311 +       /* Auto vbus discharge status */
60312 +       bool auto_vbus_discharge_enabled;
60314  #ifdef CONFIG_DEBUG_FS
60315         struct dentry *dentry;
60316         struct mutex logbuffer_lock;    /* log buffer access lock */
60317 @@ -507,6 +528,9 @@ static const char * const pd_rev[] = {
60318         (tcpm_port_is_sink(port) && \
60319         ((port)->cc1 == TYPEC_CC_RP_3_0 || (port)->cc2 == TYPEC_CC_RP_3_0))
60321 +#define tcpm_wait_for_discharge(port) \
60322 +       (((port)->auto_vbus_discharge_enabled && !(port)->vbus_vsafe0v) ? PD_T_SAFE_0V : 0)
60324  static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
60326         if (port->port_type == TYPEC_PORT_DRP) {
60327 @@ -1853,7 +1877,6 @@ static void vdm_run_state_machine(struct tcpm_port *port)
60328                         }
60330                         if (res < 0) {
60331 -                               port->vdm_sm_running = false;
60332                                 return;
60333                         }
60334                 }
60335 @@ -1869,6 +1892,7 @@ static void vdm_run_state_machine(struct tcpm_port *port)
60336                 port->vdo_data[0] = port->vdo_retry;
60337                 port->vdo_count = 1;
60338                 port->vdm_state = VDM_STATE_READY;
60339 +               tcpm_ams_finish(port);
60340                 break;
60341         case VDM_STATE_BUSY:
60342                 port->vdm_state = VDM_STATE_ERR_TMOUT;
60343 @@ -1934,7 +1958,7 @@ static void vdm_state_machine_work(struct kthread_work *work)
60344                  port->vdm_state != VDM_STATE_BUSY &&
60345                  port->vdm_state != VDM_STATE_SEND_MESSAGE);
60347 -       if (port->vdm_state == VDM_STATE_ERR_TMOUT)
60348 +       if (port->vdm_state < VDM_STATE_READY)
60349                 port->vdm_sm_running = false;
60351         mutex_unlock(&port->lock);
60352 @@ -2363,7 +2387,7 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
60353                 port->nr_sink_caps = cnt;
60354                 port->sink_cap_done = true;
60355                 if (port->ams == GET_SINK_CAPABILITIES)
60356 -                       tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
60357 +                       tcpm_set_state(port, ready_state(port), 0);
60358                 /* Unexpected Sink Capabilities */
60359                 else
60360                         tcpm_pd_handle_msg(port,
60361 @@ -2432,8 +2456,8 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
60362                 case SNK_TRANSITION_SINK:
60363                         if (port->vbus_present) {
60364                                 tcpm_set_current_limit(port,
60365 -                                                      port->current_limit,
60366 -                                                      port->supply_voltage);
60367 +                                                      port->req_current_limit,
60368 +                                                      port->req_supply_voltage);
60369                                 port->explicit_contract = true;
60370                                 tcpm_set_auto_vbus_discharge_threshold(port,
60371                                                                        TYPEC_PWR_MODE_PD,
60372 @@ -2492,8 +2516,8 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
60373                         break;
60374                 case SNK_NEGOTIATE_PPS_CAPABILITIES:
60375                         /* Revert data back from any requested PPS updates */
60376 -                       port->pps_data.out_volt = port->supply_voltage;
60377 -                       port->pps_data.op_curr = port->current_limit;
60378 +                       port->pps_data.req_out_volt = port->supply_voltage;
60379 +                       port->pps_data.req_op_curr = port->current_limit;
60380                         port->pps_status = (type == PD_CTRL_WAIT ?
60381                                             -EAGAIN : -EOPNOTSUPP);
60383 @@ -2525,6 +2549,16 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
60384                         port->sink_cap_done = true;
60385                         tcpm_set_state(port, ready_state(port), 0);
60386                         break;
60387 +               case SRC_READY:
60388 +               case SNK_READY:
60389 +                       if (port->vdm_state > VDM_STATE_READY) {
60390 +                               port->vdm_state = VDM_STATE_DONE;
60391 +                               if (tcpm_vdm_ams(port))
60392 +                                       tcpm_ams_finish(port);
60393 +                               mod_vdm_delayed_work(port, 0);
60394 +                               break;
60395 +                       }
60396 +                       fallthrough;
60397                 default:
60398                         tcpm_pd_handle_state(port,
60399                                              port->pwr_role == TYPEC_SOURCE ?
60400 @@ -2542,8 +2576,12 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
60401                         break;
60402                 case SNK_NEGOTIATE_PPS_CAPABILITIES:
60403                         port->pps_data.active = true;
60404 -                       port->supply_voltage = port->pps_data.out_volt;
60405 -                       port->current_limit = port->pps_data.op_curr;
60406 +                       port->pps_data.min_volt = port->pps_data.req_min_volt;
60407 +                       port->pps_data.max_volt = port->pps_data.req_max_volt;
60408 +                       port->pps_data.max_curr = port->pps_data.req_max_curr;
60409 +                       port->req_supply_voltage = port->pps_data.req_out_volt;
60410 +                       port->req_current_limit = port->pps_data.req_op_curr;
60411 +                       power_supply_changed(port->psy);
60412                         tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
60413                         break;
60414                 case SOFT_RESET_SEND:
60415 @@ -3102,17 +3140,16 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
60416                 src = port->source_caps[src_pdo];
60417                 snk = port->snk_pdo[snk_pdo];
60419 -               port->pps_data.min_volt = max(pdo_pps_apdo_min_voltage(src),
60420 -                                             pdo_pps_apdo_min_voltage(snk));
60421 -               port->pps_data.max_volt = min(pdo_pps_apdo_max_voltage(src),
60422 -                                             pdo_pps_apdo_max_voltage(snk));
60423 -               port->pps_data.max_curr = min_pps_apdo_current(src, snk);
60424 -               port->pps_data.out_volt = min(port->pps_data.max_volt,
60425 -                                             max(port->pps_data.min_volt,
60426 -                                                 port->pps_data.out_volt));
60427 -               port->pps_data.op_curr = min(port->pps_data.max_curr,
60428 -                                            port->pps_data.op_curr);
60429 -               power_supply_changed(port->psy);
60430 +               port->pps_data.req_min_volt = max(pdo_pps_apdo_min_voltage(src),
60431 +                                                 pdo_pps_apdo_min_voltage(snk));
60432 +               port->pps_data.req_max_volt = min(pdo_pps_apdo_max_voltage(src),
60433 +                                                 pdo_pps_apdo_max_voltage(snk));
60434 +               port->pps_data.req_max_curr = min_pps_apdo_current(src, snk);
60435 +               port->pps_data.req_out_volt = min(port->pps_data.req_max_volt,
60436 +                                                 max(port->pps_data.req_min_volt,
60437 +                                                     port->pps_data.req_out_volt));
60438 +               port->pps_data.req_op_curr = min(port->pps_data.req_max_curr,
60439 +                                                port->pps_data.req_op_curr);
60440         }
60442         return src_pdo;
60443 @@ -3192,8 +3229,8 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
60444                          flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
60445         }
60447 -       port->current_limit = ma;
60448 -       port->supply_voltage = mv;
60449 +       port->req_current_limit = ma;
60450 +       port->req_supply_voltage = mv;
60452         return 0;
60454 @@ -3239,10 +3276,10 @@ static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
60455                         tcpm_log(port, "Invalid APDO selected!");
60456                         return -EINVAL;
60457                 }
60458 -               max_mv = port->pps_data.max_volt;
60459 -               max_ma = port->pps_data.max_curr;
60460 -               out_mv = port->pps_data.out_volt;
60461 -               op_ma = port->pps_data.op_curr;
60462 +               max_mv = port->pps_data.req_max_volt;
60463 +               max_ma = port->pps_data.req_max_curr;
60464 +               out_mv = port->pps_data.req_out_volt;
60465 +               op_ma = port->pps_data.req_op_curr;
60466                 break;
60467         default:
60468                 tcpm_log(port, "Invalid PDO selected!");
60469 @@ -3289,8 +3326,8 @@ static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
60470         tcpm_log(port, "Requesting APDO %d: %u mV, %u mA",
60471                  src_pdo_index, out_mv, op_ma);
60473 -       port->pps_data.op_curr = op_ma;
60474 -       port->pps_data.out_volt = out_mv;
60475 +       port->pps_data.req_op_curr = op_ma;
60476 +       port->pps_data.req_out_volt = out_mv;
60478         return 0;
60480 @@ -3418,6 +3455,8 @@ static int tcpm_src_attach(struct tcpm_port *port)
60481         if (port->tcpc->enable_auto_vbus_discharge) {
60482                 ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, true);
60483                 tcpm_log_force(port, "enable vbus discharge ret:%d", ret);
60484 +               if (!ret)
60485 +                       port->auto_vbus_discharge_enabled = true;
60486         }
60488         ret = tcpm_set_roles(port, true, TYPEC_SOURCE, tcpm_data_role_for_source(port));
60489 @@ -3500,6 +3539,8 @@ static void tcpm_reset_port(struct tcpm_port *port)
60490         if (port->tcpc->enable_auto_vbus_discharge) {
60491                 ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, false);
60492                 tcpm_log_force(port, "Disable vbus discharge ret:%d", ret);
60493 +               if (!ret)
60494 +                       port->auto_vbus_discharge_enabled = false;
60495         }
60496         port->in_ams = false;
60497         port->ams = NONE_AMS;
60498 @@ -3533,8 +3574,6 @@ static void tcpm_reset_port(struct tcpm_port *port)
60499         port->sink_cap_done = false;
60500         if (port->tcpc->enable_frs)
60501                 port->tcpc->enable_frs(port->tcpc, false);
60503 -       power_supply_changed(port->psy);
60506  static void tcpm_detach(struct tcpm_port *port)
60507 @@ -3574,6 +3613,8 @@ static int tcpm_snk_attach(struct tcpm_port *port)
60508                 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
60509                 ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, true);
60510                 tcpm_log_force(port, "enable vbus discharge ret:%d", ret);
60511 +               if (!ret)
60512 +                       port->auto_vbus_discharge_enabled = true;
60513         }
60515         ret = tcpm_set_roles(port, true, TYPEC_SINK, tcpm_data_role_for_sink(port));
60516 @@ -4103,6 +4144,23 @@ static void run_state_machine(struct tcpm_port *port)
60517                 }
60518                 break;
60519         case SNK_TRANSITION_SINK:
60520 +               /* From the USB PD spec:
60521 +                * "The Sink Shall transition to Sink Standby before a positive or
60522 +                * negative voltage transition of VBUS. During Sink Standby
60523 +                * the Sink Shall reduce its power draw to pSnkStdby."
60524 +                *
60525 +                * This is not applicable to PPS though as the port can continue
60526 +                * to draw negotiated power without switching to standby.
60527 +                */
60528 +               if (port->supply_voltage != port->req_supply_voltage && !port->pps_data.active &&
60529 +                   port->current_limit * port->supply_voltage / 1000 > PD_P_SNK_STDBY_MW) {
60530 +                       u32 stdby_ma = PD_P_SNK_STDBY_MW * 1000 / port->supply_voltage;
60532 +                       tcpm_log(port, "Setting standby current %u mV @ %u mA",
60533 +                                port->supply_voltage, stdby_ma);
60534 +                       tcpm_set_current_limit(port, stdby_ma, port->supply_voltage);
60535 +               }
60536 +               fallthrough;
60537         case SNK_TRANSITION_SINK_VBUS:
60538                 tcpm_set_state(port, hard_reset_state(port),
60539                                PD_T_PS_TRANSITION);
60540 @@ -4676,9 +4734,9 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
60541                 if (tcpm_port_is_disconnected(port) ||
60542                     !tcpm_port_is_source(port)) {
60543                         if (port->port_type == TYPEC_PORT_SRC)
60544 -                               tcpm_set_state(port, SRC_UNATTACHED, 0);
60545 +                               tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
60546                         else
60547 -                               tcpm_set_state(port, SNK_UNATTACHED, 0);
60548 +                               tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
60549                 }
60550                 break;
60551         case SNK_UNATTACHED:
60552 @@ -4709,7 +4767,23 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
60553                         tcpm_set_state(port, SNK_DEBOUNCED, 0);
60554                 break;
60555         case SNK_READY:
60556 -               if (tcpm_port_is_disconnected(port))
60557 +               /*
60558 +                * EXIT condition is based primarily on vbus disconnect and CC is secondary.
60559 +                * "A port that has entered into USB PD communications with the Source and
60560 +                * has seen the CC voltage exceed vRd-USB may monitor the CC pin to detect
60561 +                * cable disconnect in addition to monitoring VBUS.
60562 +                *
60563 +                * A port that is monitoring the CC voltage for disconnect (but is not in
60564 +                * the process of a USB PD PR_Swap or USB PD FR_Swap) shall transition to
60565 +                * Unattached.SNK within tSinkDisconnect after the CC voltage remains below
60566 +                * vRd-USB for tPDDebounce."
60567 +                *
60568 +                * When set_auto_vbus_discharge_threshold is enabled, CC pins go
60569 +                * away before vbus decays to disconnect threshold. Allow
60570 +                * disconnect to be driven by vbus disconnect when auto vbus
60571 +                * discharge is enabled.
60572 +                */
60573 +               if (!port->auto_vbus_discharge_enabled && tcpm_port_is_disconnected(port))
60574                         tcpm_set_state(port, unattached_state(port), 0);
60575                 else if (!port->pd_capable &&
60576                          (cc1 != old_cc1 || cc2 != old_cc2))
60577 @@ -4808,9 +4882,13 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
60578                  * Ignore CC changes here.
60579                  */
60580                 break;
60582         default:
60583 -               if (tcpm_port_is_disconnected(port))
60584 +               /*
60585 +                * While acting as sink and auto vbus discharge is enabled, Allow disconnect
60586 +                * to be driven by vbus disconnect.
60587 +                */
60588 +               if (tcpm_port_is_disconnected(port) && !(port->pwr_role == TYPEC_SINK &&
60589 +                                                        port->auto_vbus_discharge_enabled))
60590                         tcpm_set_state(port, unattached_state(port), 0);
60591                 break;
60592         }
60593 @@ -4974,8 +5052,16 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
60594         case SRC_TRANSITION_SUPPLY:
60595         case SRC_READY:
60596         case SRC_WAIT_NEW_CAPABILITIES:
60597 -               /* Force to unattached state to re-initiate connection */
60598 -               tcpm_set_state(port, SRC_UNATTACHED, 0);
60599 +               /*
60600 +                * Force to unattached state to re-initiate connection.
60601 +                * DRP port should move to Unattached.SNK instead of Unattached.SRC if
60602 +                * sink removed. Although sink removal here is due to source's vbus collapse,
60603 +                * treat it the same way for consistency.
60604 +                */
60605 +               if (port->port_type == TYPEC_PORT_SRC)
60606 +                       tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
60607 +               else
60608 +                       tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
60609                 break;
60611         case PORT_RESET:
60612 @@ -4994,9 +5080,8 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
60613                 break;
60615         default:
60616 -               if (port->pwr_role == TYPEC_SINK &&
60617 -                   port->attached)
60618 -                       tcpm_set_state(port, SNK_UNATTACHED, 0);
60619 +               if (port->pwr_role == TYPEC_SINK && port->attached)
60620 +                       tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
60621                 break;
60622         }
60624 @@ -5018,7 +5103,23 @@ static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
60625                         tcpm_set_state(port, tcpm_try_snk(port) ? SNK_TRY : SRC_ATTACHED,
60626                                        PD_T_CC_DEBOUNCE);
60627                 break;
60628 +       case SRC_STARTUP:
60629 +       case SRC_SEND_CAPABILITIES:
60630 +       case SRC_SEND_CAPABILITIES_TIMEOUT:
60631 +       case SRC_NEGOTIATE_CAPABILITIES:
60632 +       case SRC_TRANSITION_SUPPLY:
60633 +       case SRC_READY:
60634 +       case SRC_WAIT_NEW_CAPABILITIES:
60635 +               if (port->auto_vbus_discharge_enabled) {
60636 +                       if (port->port_type == TYPEC_PORT_SRC)
60637 +                               tcpm_set_state(port, SRC_UNATTACHED, 0);
60638 +                       else
60639 +                               tcpm_set_state(port, SNK_UNATTACHED, 0);
60640 +               }
60641 +               break;
60642         default:
60643 +               if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
60644 +                       tcpm_set_state(port, SNK_UNATTACHED, 0);
60645                 break;
60646         }
60648 @@ -5374,7 +5475,7 @@ static int tcpm_try_role(struct typec_port *p, int role)
60649         return ret;
60652 -static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
60653 +static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 req_op_curr)
60655         unsigned int target_mw;
60656         int ret;
60657 @@ -5392,12 +5493,12 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
60658                 goto port_unlock;
60659         }
60661 -       if (op_curr > port->pps_data.max_curr) {
60662 +       if (req_op_curr > port->pps_data.max_curr) {
60663                 ret = -EINVAL;
60664                 goto port_unlock;
60665         }
60667 -       target_mw = (op_curr * port->pps_data.out_volt) / 1000;
60668 +       target_mw = (req_op_curr * port->supply_voltage) / 1000;
60669         if (target_mw < port->operating_snk_mw) {
60670                 ret = -EINVAL;
60671                 goto port_unlock;
60672 @@ -5411,10 +5512,10 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
60673         }
60675         /* Round down operating current to align with PPS valid steps */
60676 -       op_curr = op_curr - (op_curr % RDO_PROG_CURR_MA_STEP);
60677 +       req_op_curr = req_op_curr - (req_op_curr % RDO_PROG_CURR_MA_STEP);
60679         reinit_completion(&port->pps_complete);
60680 -       port->pps_data.op_curr = op_curr;
60681 +       port->pps_data.req_op_curr = req_op_curr;
60682         port->pps_status = 0;
60683         port->pps_pending = true;
60684         mutex_unlock(&port->lock);
60685 @@ -5435,7 +5536,7 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
60686         return ret;
60689 -static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
60690 +static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt)
60692         unsigned int target_mw;
60693         int ret;
60694 @@ -5453,13 +5554,13 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
60695                 goto port_unlock;
60696         }
60698 -       if (out_volt < port->pps_data.min_volt ||
60699 -           out_volt > port->pps_data.max_volt) {
60700 +       if (req_out_volt < port->pps_data.min_volt ||
60701 +           req_out_volt > port->pps_data.max_volt) {
60702                 ret = -EINVAL;
60703                 goto port_unlock;
60704         }
60706 -       target_mw = (port->pps_data.op_curr * out_volt) / 1000;
60707 +       target_mw = (port->current_limit * req_out_volt) / 1000;
60708         if (target_mw < port->operating_snk_mw) {
60709                 ret = -EINVAL;
60710                 goto port_unlock;
60711 @@ -5473,10 +5574,10 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
60712         }
60714         /* Round down output voltage to align with PPS valid steps */
60715 -       out_volt = out_volt - (out_volt % RDO_PROG_VOLT_MV_STEP);
60716 +       req_out_volt = req_out_volt - (req_out_volt % RDO_PROG_VOLT_MV_STEP);
60718         reinit_completion(&port->pps_complete);
60719 -       port->pps_data.out_volt = out_volt;
60720 +       port->pps_data.req_out_volt = req_out_volt;
60721         port->pps_status = 0;
60722         port->pps_pending = true;
60723         mutex_unlock(&port->lock);
60724 @@ -5534,8 +5635,8 @@ static int tcpm_pps_activate(struct tcpm_port *port, bool activate)
60726         /* Trigger PPS request or move back to standard PDO contract */
60727         if (activate) {
60728 -               port->pps_data.out_volt = port->supply_voltage;
60729 -               port->pps_data.op_curr = port->current_limit;
60730 +               port->pps_data.req_out_volt = port->supply_voltage;
60731 +               port->pps_data.req_op_curr = port->current_limit;
60732         }
60733         mutex_unlock(&port->lock);
60735 diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
60736 index 29bd1c5a283c..4038104568f5 100644
60737 --- a/drivers/usb/typec/tps6598x.c
60738 +++ b/drivers/usb/typec/tps6598x.c
60739 @@ -614,8 +614,8 @@ static int tps6598x_probe(struct i2c_client *client)
60740                 return ret;
60742         fwnode = device_get_named_child_node(&client->dev, "connector");
60743 -       if (IS_ERR(fwnode))
60744 -               return PTR_ERR(fwnode);
60745 +       if (!fwnode)
60746 +               return -ENODEV;
60748         tps->role_sw = fwnode_usb_role_switch_get(fwnode);
60749         if (IS_ERR(tps->role_sw)) {
60750 diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
60751 index 244270755ae6..1e266f083bf8 100644
60752 --- a/drivers/usb/typec/ucsi/ucsi.c
60753 +++ b/drivers/usb/typec/ucsi/ucsi.c
60754 @@ -495,7 +495,8 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient)
60755         }
60758 -static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner)
60759 +static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner,
60760 +                        u32 *pdos, int offset, int num_pdos)
60762         struct ucsi *ucsi = con->ucsi;
60763         u64 command;
60764 @@ -503,17 +504,39 @@ static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner)
60766         command = UCSI_COMMAND(UCSI_GET_PDOS) | UCSI_CONNECTOR_NUMBER(con->num);
60767         command |= UCSI_GET_PDOS_PARTNER_PDO(is_partner);
60768 -       command |= UCSI_GET_PDOS_NUM_PDOS(UCSI_MAX_PDOS - 1);
60769 +       command |= UCSI_GET_PDOS_PDO_OFFSET(offset);
60770 +       command |= UCSI_GET_PDOS_NUM_PDOS(num_pdos - 1);
60771         command |= UCSI_GET_PDOS_SRC_PDOS;
60772 -       ret = ucsi_send_command(ucsi, command, con->src_pdos,
60773 -                              sizeof(con->src_pdos));
60774 -       if (ret < 0) {
60775 +       ret = ucsi_send_command(ucsi, command, pdos + offset,
60776 +                               num_pdos * sizeof(u32));
60777 +       if (ret < 0)
60778                 dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret);
60779 +       if (ret == 0 && offset == 0)
60780 +               dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
60782 +       return ret;
60785 +static void ucsi_get_src_pdos(struct ucsi_connector *con, int is_partner)
60787 +       int ret;
60789 +       /* UCSI max payload means only getting at most 4 PDOs at a time */
60790 +       ret = ucsi_get_pdos(con, 1, con->src_pdos, 0, UCSI_MAX_PDOS);
60791 +       if (ret < 0)
60792                 return;
60793 -       }
60795         con->num_pdos = ret / sizeof(u32); /* number of bytes to 32-bit PDOs */
60796 -       if (ret == 0)
60797 -               dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
60798 +       if (con->num_pdos < UCSI_MAX_PDOS)
60799 +               return;
60801 +       /* get the remaining PDOs, if any */
60802 +       ret = ucsi_get_pdos(con, 1, con->src_pdos, UCSI_MAX_PDOS,
60803 +                           PDO_MAX_OBJECTS - UCSI_MAX_PDOS);
60804 +       if (ret < 0)
60805 +               return;
60807 +       con->num_pdos += ret / sizeof(u32);
60810  static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
60811 @@ -522,7 +545,7 @@ static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
60812         case UCSI_CONSTAT_PWR_OPMODE_PD:
60813                 con->rdo = con->status.request_data_obj;
60814                 typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_PD);
60815 -               ucsi_get_pdos(con, 1);
60816 +               ucsi_get_src_pdos(con, 1);
60817                 break;
60818         case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
60819                 con->rdo = 0;
60820 @@ -999,6 +1022,7 @@ static const struct typec_operations ucsi_ops = {
60821         .pr_set = ucsi_pr_swap
60822  };
60824 +/* Caller must call fwnode_handle_put() after use */
60825  static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con)
60827         struct fwnode_handle *fwnode;
60828 @@ -1033,7 +1057,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
60829         command |= UCSI_CONNECTOR_NUMBER(con->num);
60830         ret = ucsi_send_command(ucsi, command, &con->cap, sizeof(con->cap));
60831         if (ret < 0)
60832 -               goto out;
60833 +               goto out_unlock;
60835         if (con->cap.op_mode & UCSI_CONCAP_OPMODE_DRP)
60836                 cap->data = TYPEC_PORT_DRD;
60837 @@ -1151,6 +1175,8 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
60838         trace_ucsi_register_port(con->num, &con->status);
60840  out:
60841 +       fwnode_handle_put(cap->fwnode);
60842 +out_unlock:
60843         mutex_unlock(&con->lock);
60844         return ret;
60846 diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
60847 index 3920e20a9e9e..cee666790907 100644
60848 --- a/drivers/usb/typec/ucsi/ucsi.h
60849 +++ b/drivers/usb/typec/ucsi/ucsi.h
60850 @@ -8,6 +8,7 @@
60851  #include <linux/power_supply.h>
60852  #include <linux/types.h>
60853  #include <linux/usb/typec.h>
60854 +#include <linux/usb/pd.h>
60855  #include <linux/usb/role.h>
60857  /* -------------------------------------------------------------------------- */
60858 @@ -134,7 +135,9 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
60860  /* GET_PDOS command bits */
60861  #define UCSI_GET_PDOS_PARTNER_PDO(_r_)         ((u64)(_r_) << 23)
60862 +#define UCSI_GET_PDOS_PDO_OFFSET(_r_)          ((u64)(_r_) << 24)
60863  #define UCSI_GET_PDOS_NUM_PDOS(_r_)            ((u64)(_r_) << 32)
60864 +#define UCSI_MAX_PDOS                          (4)
60865  #define UCSI_GET_PDOS_SRC_PDOS                 ((u64)1 << 34)
60867  /* -------------------------------------------------------------------------- */
60868 @@ -302,7 +305,6 @@ struct ucsi {
60870  #define UCSI_MAX_SVID          5
60871  #define UCSI_MAX_ALTMODES      (UCSI_MAX_SVID * 6)
60872 -#define UCSI_MAX_PDOS          (4)
60874  #define UCSI_TYPEC_VSAFE5V     5000
60875  #define UCSI_TYPEC_1_5_CURRENT 1500
60876 @@ -330,7 +332,7 @@ struct ucsi_connector {
60877         struct power_supply *psy;
60878         struct power_supply_desc psy_desc;
60879         u32 rdo;
60880 -       u32 src_pdos[UCSI_MAX_PDOS];
60881 +       u32 src_pdos[PDO_MAX_OBJECTS];
60882         int num_pdos;
60884         struct usb_role_switch *usb_role_sw;
60885 diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
60886 index f7633ee655a1..d1cf6b51bf85 100644
60887 --- a/drivers/usb/usbip/vudc_sysfs.c
60888 +++ b/drivers/usb/usbip/vudc_sysfs.c
60889 @@ -156,12 +156,14 @@ static ssize_t usbip_sockfd_store(struct device *dev,
60890                 tcp_rx = kthread_create(&v_rx_loop, &udc->ud, "vudc_rx");
60891                 if (IS_ERR(tcp_rx)) {
60892                         sockfd_put(socket);
60893 +                       mutex_unlock(&udc->ud.sysfs_lock);
60894                         return -EINVAL;
60895                 }
60896                 tcp_tx = kthread_create(&v_tx_loop, &udc->ud, "vudc_tx");
60897                 if (IS_ERR(tcp_tx)) {
60898                         kthread_stop(tcp_rx);
60899                         sockfd_put(socket);
60900 +                       mutex_unlock(&udc->ud.sysfs_lock);
60901                         return -EINVAL;
60902                 }
60904 diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
60905 index f27e25112c40..8722f5effacd 100644
60906 --- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c
60907 +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
60908 @@ -568,23 +568,39 @@ static int vfio_fsl_mc_init_device(struct vfio_fsl_mc_device *vdev)
60909                 dev_err(&mc_dev->dev, "VFIO_FSL_MC: Failed to setup DPRC (%d)\n", ret);
60910                 goto out_nc_unreg;
60911         }
60912 +       return 0;
60914 +out_nc_unreg:
60915 +       bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
60916 +       return ret;
60919 +static int vfio_fsl_mc_scan_container(struct fsl_mc_device *mc_dev)
60921 +       int ret;
60923 +       /* non dprc devices do not scan for other devices */
60924 +       if (!is_fsl_mc_bus_dprc(mc_dev))
60925 +               return 0;
60926         ret = dprc_scan_container(mc_dev, false);
60927         if (ret) {
60928 -               dev_err(&mc_dev->dev, "VFIO_FSL_MC: Container scanning failed (%d)\n", ret);
60929 -               goto out_dprc_cleanup;
60930 +               dev_err(&mc_dev->dev,
60931 +                       "VFIO_FSL_MC: Container scanning failed (%d)\n", ret);
60932 +               dprc_remove_devices(mc_dev, NULL, 0);
60933 +               return ret;
60934         }
60936         return 0;
60939 +static void vfio_fsl_uninit_device(struct vfio_fsl_mc_device *vdev)
60941 +       struct fsl_mc_device *mc_dev = vdev->mc_dev;
60943 +       if (!is_fsl_mc_bus_dprc(mc_dev))
60944 +               return;
60946 -out_dprc_cleanup:
60947 -       dprc_remove_devices(mc_dev, NULL, 0);
60948         dprc_cleanup(mc_dev);
60949 -out_nc_unreg:
60950         bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
60951 -       vdev->nb.notifier_call = NULL;
60953 -       return ret;
60956  static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
60957 @@ -607,29 +623,39 @@ static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
60958         }
60960         vdev->mc_dev = mc_dev;
60962 -       ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
60963 -       if (ret) {
60964 -               dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n");
60965 -               goto out_group_put;
60966 -       }
60967 +       mutex_init(&vdev->igate);
60969         ret = vfio_fsl_mc_reflck_attach(vdev);
60970         if (ret)
60971 -               goto out_group_dev;
60972 +               goto out_group_put;
60974         ret = vfio_fsl_mc_init_device(vdev);
60975         if (ret)
60976                 goto out_reflck;
60978 -       mutex_init(&vdev->igate);
60979 +       ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
60980 +       if (ret) {
60981 +               dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n");
60982 +               goto out_device;
60983 +       }
60985 +       /*
60986 +        * This triggers recursion into vfio_fsl_mc_probe() on another device
60987 +        * and the vfio_fsl_mc_reflck_attach() must succeed, which relies on the
60988 +        * vfio_add_group_dev() above. It has no impact on this vdev, so it is
60989 +        * safe to be after the vfio device is made live.
60990 +        */
60991 +       ret = vfio_fsl_mc_scan_container(mc_dev);
60992 +       if (ret)
60993 +               goto out_group_dev;
60994         return 0;
60996 -out_reflck:
60997 -       vfio_fsl_mc_reflck_put(vdev->reflck);
60998  out_group_dev:
60999         vfio_del_group_dev(dev);
61000 +out_device:
61001 +       vfio_fsl_uninit_device(vdev);
61002 +out_reflck:
61003 +       vfio_fsl_mc_reflck_put(vdev->reflck);
61004  out_group_put:
61005         vfio_iommu_group_put(group, dev);
61006         return ret;
61007 @@ -646,16 +672,10 @@ static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
61009         mutex_destroy(&vdev->igate);
61011 +       dprc_remove_devices(mc_dev, NULL, 0);
61012 +       vfio_fsl_uninit_device(vdev);
61013         vfio_fsl_mc_reflck_put(vdev->reflck);
61015 -       if (is_fsl_mc_bus_dprc(mc_dev)) {
61016 -               dprc_remove_devices(mc_dev, NULL, 0);
61017 -               dprc_cleanup(mc_dev);
61018 -       }
61020 -       if (vdev->nb.notifier_call)
61021 -               bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
61023         vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
61025         return 0;
61026 diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
61027 index 917fd84c1c6f..367ff5412a38 100644
61028 --- a/drivers/vfio/mdev/mdev_sysfs.c
61029 +++ b/drivers/vfio/mdev/mdev_sysfs.c
61030 @@ -105,6 +105,7 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
61031                 return ERR_PTR(-ENOMEM);
61033         type->kobj.kset = parent->mdev_types_kset;
61034 +       type->parent = parent;
61036         ret = kobject_init_and_add(&type->kobj, &mdev_type_ktype, NULL,
61037                                    "%s-%s", dev_driver_string(parent->dev),
61038 @@ -132,7 +133,6 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
61039         }
61041         type->group = group;
61042 -       type->parent = parent;
61043         return type;
61045  attrs_failed:
61046 diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
61047 index 5023e23db3bc..cb7f2dc09e9d 100644
61048 --- a/drivers/vfio/pci/vfio_pci.c
61049 +++ b/drivers/vfio/pci/vfio_pci.c
61050 @@ -1924,6 +1924,68 @@ static int vfio_pci_bus_notifier(struct notifier_block *nb,
61051         return 0;
61054 +static int vfio_pci_vf_init(struct vfio_pci_device *vdev)
61056 +       struct pci_dev *pdev = vdev->pdev;
61057 +       int ret;
61059 +       if (!pdev->is_physfn)
61060 +               return 0;
61062 +       vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL);
61063 +       if (!vdev->vf_token)
61064 +               return -ENOMEM;
61066 +       mutex_init(&vdev->vf_token->lock);
61067 +       uuid_gen(&vdev->vf_token->uuid);
61069 +       vdev->nb.notifier_call = vfio_pci_bus_notifier;
61070 +       ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
61071 +       if (ret) {
61072 +               kfree(vdev->vf_token);
61073 +               return ret;
61074 +       }
61075 +       return 0;
61078 +static void vfio_pci_vf_uninit(struct vfio_pci_device *vdev)
61080 +       if (!vdev->vf_token)
61081 +               return;
61083 +       bus_unregister_notifier(&pci_bus_type, &vdev->nb);
61084 +       WARN_ON(vdev->vf_token->users);
61085 +       mutex_destroy(&vdev->vf_token->lock);
61086 +       kfree(vdev->vf_token);
61089 +static int vfio_pci_vga_init(struct vfio_pci_device *vdev)
61091 +       struct pci_dev *pdev = vdev->pdev;
61092 +       int ret;
61094 +       if (!vfio_pci_is_vga(pdev))
61095 +               return 0;
61097 +       ret = vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
61098 +       if (ret)
61099 +               return ret;
61100 +       vga_set_legacy_decoding(pdev, vfio_pci_set_vga_decode(vdev, false));
61101 +       return 0;
61104 +static void vfio_pci_vga_uninit(struct vfio_pci_device *vdev)
61106 +       struct pci_dev *pdev = vdev->pdev;
61108 +       if (!vfio_pci_is_vga(pdev))
61109 +               return;
61110 +       vga_client_register(pdev, NULL, NULL, NULL);
61111 +       vga_set_legacy_decoding(pdev, VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
61112 +                                             VGA_RSRC_LEGACY_IO |
61113 +                                             VGA_RSRC_LEGACY_MEM);
61116  static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
61118         struct vfio_pci_device *vdev;
61119 @@ -1970,35 +2032,15 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
61120         INIT_LIST_HEAD(&vdev->vma_list);
61121         init_rwsem(&vdev->memory_lock);
61123 -       ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
61124 +       ret = vfio_pci_reflck_attach(vdev);
61125         if (ret)
61126                 goto out_free;
61128 -       ret = vfio_pci_reflck_attach(vdev);
61129 +       ret = vfio_pci_vf_init(vdev);
61130         if (ret)
61131 -               goto out_del_group_dev;
61133 -       if (pdev->is_physfn) {
61134 -               vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL);
61135 -               if (!vdev->vf_token) {
61136 -                       ret = -ENOMEM;
61137 -                       goto out_reflck;
61138 -               }
61140 -               mutex_init(&vdev->vf_token->lock);
61141 -               uuid_gen(&vdev->vf_token->uuid);
61143 -               vdev->nb.notifier_call = vfio_pci_bus_notifier;
61144 -               ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
61145 -               if (ret)
61146 -                       goto out_vf_token;
61147 -       }
61149 -       if (vfio_pci_is_vga(pdev)) {
61150 -               vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
61151 -               vga_set_legacy_decoding(pdev,
61152 -                                       vfio_pci_set_vga_decode(vdev, false));
61153 -       }
61154 +               goto out_reflck;
61155 +       ret = vfio_pci_vga_init(vdev);
61156 +       if (ret)
61157 +               goto out_vf;
61159         vfio_pci_probe_power_state(vdev);
61161 @@ -2016,15 +2058,20 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
61162                 vfio_pci_set_power_state(vdev, PCI_D3hot);
61163         }
61165 -       return ret;
61166 +       ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
61167 +       if (ret)
61168 +               goto out_power;
61169 +       return 0;
61171 -out_vf_token:
61172 -       kfree(vdev->vf_token);
61173 +out_power:
61174 +       if (!disable_idle_d3)
61175 +               vfio_pci_set_power_state(vdev, PCI_D0);
61176 +out_vf:
61177 +       vfio_pci_vf_uninit(vdev);
61178  out_reflck:
61179         vfio_pci_reflck_put(vdev->reflck);
61180 -out_del_group_dev:
61181 -       vfio_del_group_dev(&pdev->dev);
61182  out_free:
61183 +       kfree(vdev->pm_save);
61184         kfree(vdev);
61185  out_group_put:
61186         vfio_iommu_group_put(group, &pdev->dev);
61187 @@ -2041,33 +2088,19 @@ static void vfio_pci_remove(struct pci_dev *pdev)
61188         if (!vdev)
61189                 return;
61191 -       if (vdev->vf_token) {
61192 -               WARN_ON(vdev->vf_token->users);
61193 -               mutex_destroy(&vdev->vf_token->lock);
61194 -               kfree(vdev->vf_token);
61195 -       }
61197 -       if (vdev->nb.notifier_call)
61198 -               bus_unregister_notifier(&pci_bus_type, &vdev->nb);
61200 +       vfio_pci_vf_uninit(vdev);
61201         vfio_pci_reflck_put(vdev->reflck);
61202 +       vfio_pci_vga_uninit(vdev);
61204         vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
61205 -       kfree(vdev->region);
61206 -       mutex_destroy(&vdev->ioeventfds_lock);
61208         if (!disable_idle_d3)
61209                 vfio_pci_set_power_state(vdev, PCI_D0);
61211 +       mutex_destroy(&vdev->ioeventfds_lock);
61212 +       kfree(vdev->region);
61213         kfree(vdev->pm_save);
61214         kfree(vdev);
61216 -       if (vfio_pci_is_vga(pdev)) {
61217 -               vga_client_register(pdev, NULL, NULL, NULL);
61218 -               vga_set_legacy_decoding(pdev,
61219 -                               VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
61220 -                               VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
61221 -       }
61224  static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
61225 diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
61226 index bfa4c6ef554e..c79d2f2387aa 100644
61227 --- a/drivers/vhost/vdpa.c
61228 +++ b/drivers/vhost/vdpa.c
61229 @@ -993,6 +993,7 @@ static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
61230         if (vma->vm_end - vma->vm_start != notify.size)
61231                 return -ENOTSUPP;
61233 +       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
61234         vma->vm_ops = &vhost_vdpa_vm_ops;
61235         return 0;
61237 diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
61238 index 091f07e7c145..e9fbe2483844 100644
61239 --- a/drivers/video/backlight/qcom-wled.c
61240 +++ b/drivers/video/backlight/qcom-wled.c
61241 @@ -336,19 +336,19 @@ static int wled3_sync_toggle(struct wled *wled)
61242         unsigned int mask = GENMASK(wled->max_string_count - 1, 0);
61244         rc = regmap_update_bits(wled->regmap,
61245 -                               wled->ctrl_addr + WLED3_SINK_REG_SYNC,
61246 +                               wled->sink_addr + WLED3_SINK_REG_SYNC,
61247                                 mask, mask);
61248         if (rc < 0)
61249                 return rc;
61251         rc = regmap_update_bits(wled->regmap,
61252 -                               wled->ctrl_addr + WLED3_SINK_REG_SYNC,
61253 +                               wled->sink_addr + WLED3_SINK_REG_SYNC,
61254                                 mask, WLED3_SINK_REG_SYNC_CLEAR);
61256         return rc;
61259 -static int wled5_sync_toggle(struct wled *wled)
61260 +static int wled5_mod_sync_toggle(struct wled *wled)
61262         int rc;
61263         u8 val;
61264 @@ -445,10 +445,23 @@ static int wled_update_status(struct backlight_device *bl)
61265                         goto unlock_mutex;
61266                 }
61268 -               rc = wled->wled_sync_toggle(wled);
61269 -               if (rc < 0) {
61270 -                       dev_err(wled->dev, "wled sync failed rc:%d\n", rc);
61271 -                       goto unlock_mutex;
61272 +               if (wled->version < 5) {
61273 +                       rc = wled->wled_sync_toggle(wled);
61274 +                       if (rc < 0) {
61275 +                               dev_err(wled->dev, "wled sync failed rc:%d\n", rc);
61276 +                               goto unlock_mutex;
61277 +                       }
61278 +               } else {
61279 +                       /*
61280 +                        * For WLED5 toggling the MOD_SYNC_BIT updates the
61281 +                        * brightness
61282 +                        */
61283 +                       rc = wled5_mod_sync_toggle(wled);
61284 +                       if (rc < 0) {
61285 +                               dev_err(wled->dev, "wled mod sync failed rc:%d\n",
61286 +                                       rc);
61287 +                               goto unlock_mutex;
61288 +                       }
61289                 }
61290         }
61292 @@ -1459,7 +1472,7 @@ static int wled_configure(struct wled *wled)
61293                 size = ARRAY_SIZE(wled5_opts);
61294                 *cfg = wled5_config_defaults;
61295                 wled->wled_set_brightness = wled5_set_brightness;
61296 -               wled->wled_sync_toggle = wled5_sync_toggle;
61297 +               wled->wled_sync_toggle = wled3_sync_toggle;
61298                 wled->wled_cabc_config = wled5_cabc_config;
61299                 wled->wled_ovp_delay = wled5_ovp_delay;
61300                 wled->wled_auto_detection_required =
61301 diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
61302 index 962c12be9774..631eb918f8e1 100644
61303 --- a/drivers/video/console/vgacon.c
61304 +++ b/drivers/video/console/vgacon.c
61305 @@ -383,7 +383,7 @@ static void vgacon_init(struct vc_data *c, int init)
61306                 vc_resize(c, vga_video_num_columns, vga_video_num_lines);
61308         c->vc_scan_lines = vga_scan_lines;
61309 -       c->vc_font.height = vga_video_font_height;
61310 +       c->vc_font.height = c->vc_cell_height = vga_video_font_height;
61311         c->vc_complement_mask = 0x7700;
61312         if (vga_512_chars)
61313                 c->vc_hi_font_mask = 0x0800;
61314 @@ -518,32 +518,32 @@ static void vgacon_cursor(struct vc_data *c, int mode)
61315                 switch (CUR_SIZE(c->vc_cursor_type)) {
61316                 case CUR_UNDERLINE:
61317                         vgacon_set_cursor_size(c->state.x,
61318 -                                              c->vc_font.height -
61319 -                                              (c->vc_font.height <
61320 +                                              c->vc_cell_height -
61321 +                                              (c->vc_cell_height <
61322                                                 10 ? 2 : 3),
61323 -                                              c->vc_font.height -
61324 -                                              (c->vc_font.height <
61325 +                                              c->vc_cell_height -
61326 +                                              (c->vc_cell_height <
61327                                                 10 ? 1 : 2));
61328                         break;
61329                 case CUR_TWO_THIRDS:
61330                         vgacon_set_cursor_size(c->state.x,
61331 -                                              c->vc_font.height / 3,
61332 -                                              c->vc_font.height -
61333 -                                              (c->vc_font.height <
61334 +                                              c->vc_cell_height / 3,
61335 +                                              c->vc_cell_height -
61336 +                                              (c->vc_cell_height <
61337                                                 10 ? 1 : 2));
61338                         break;
61339                 case CUR_LOWER_THIRD:
61340                         vgacon_set_cursor_size(c->state.x,
61341 -                                              (c->vc_font.height * 2) / 3,
61342 -                                              c->vc_font.height -
61343 -                                              (c->vc_font.height <
61344 +                                              (c->vc_cell_height * 2) / 3,
61345 +                                              c->vc_cell_height -
61346 +                                              (c->vc_cell_height <
61347                                                 10 ? 1 : 2));
61348                         break;
61349                 case CUR_LOWER_HALF:
61350                         vgacon_set_cursor_size(c->state.x,
61351 -                                              c->vc_font.height / 2,
61352 -                                              c->vc_font.height -
61353 -                                              (c->vc_font.height <
61354 +                                              c->vc_cell_height / 2,
61355 +                                              c->vc_cell_height -
61356 +                                              (c->vc_cell_height <
61357                                                 10 ? 1 : 2));
61358                         break;
61359                 case CUR_NONE:
61360 @@ -554,7 +554,7 @@ static void vgacon_cursor(struct vc_data *c, int mode)
61361                         break;
61362                 default:
61363                         vgacon_set_cursor_size(c->state.x, 1,
61364 -                                              c->vc_font.height);
61365 +                                              c->vc_cell_height);
61366                         break;
61367                 }
61368                 break;
61369 @@ -565,13 +565,13 @@ static int vgacon_doresize(struct vc_data *c,
61370                 unsigned int width, unsigned int height)
61372         unsigned long flags;
61373 -       unsigned int scanlines = height * c->vc_font.height;
61374 +       unsigned int scanlines = height * c->vc_cell_height;
61375         u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan;
61377         raw_spin_lock_irqsave(&vga_lock, flags);
61379         vgacon_xres = width * VGA_FONTWIDTH;
61380 -       vgacon_yres = height * c->vc_font.height;
61381 +       vgacon_yres = height * c->vc_cell_height;
61382         if (vga_video_type >= VIDEO_TYPE_VGAC) {
61383                 outb_p(VGA_CRTC_MAX_SCAN, vga_video_port_reg);
61384                 max_scan = inb_p(vga_video_port_val);
61385 @@ -626,9 +626,9 @@ static int vgacon_doresize(struct vc_data *c,
61386  static int vgacon_switch(struct vc_data *c)
61388         int x = c->vc_cols * VGA_FONTWIDTH;
61389 -       int y = c->vc_rows * c->vc_font.height;
61390 +       int y = c->vc_rows * c->vc_cell_height;
61391         int rows = screen_info.orig_video_lines * vga_default_font_height/
61392 -               c->vc_font.height;
61393 +               c->vc_cell_height;
61394         /*
61395          * We need to save screen size here as it's the only way
61396          * we can spot the screen has been resized and we need to
61397 @@ -1041,7 +1041,7 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight)
61398                                 cursor_size_lastto = 0;
61399                                 c->vc_sw->con_cursor(c, CM_DRAW);
61400                         }
61401 -                       c->vc_font.height = fontheight;
61402 +                       c->vc_font.height = c->vc_cell_height = fontheight;
61403                         vc_resize(c, 0, rows);  /* Adjust console size */
61404                 }
61405         }
61406 @@ -1089,12 +1089,20 @@ static int vgacon_resize(struct vc_data *c, unsigned int width,
61407         if ((width << 1) * height > vga_vram_size)
61408                 return -EINVAL;
61410 +       if (user) {
61411 +               /*
61412 +                * Ho ho!  Someone (svgatextmode, eh?) may have reprogrammed
61413 +                * the video mode!  Set the new defaults then and go away.
61414 +                */
61415 +               screen_info.orig_video_cols = width;
61416 +               screen_info.orig_video_lines = height;
61417 +               vga_default_font_height = c->vc_cell_height;
61418 +               return 0;
61419 +       }
61420         if (width % 2 || width > screen_info.orig_video_cols ||
61421             height > (screen_info.orig_video_lines * vga_default_font_height)/
61422 -           c->vc_font.height)
61423 -               /* let svgatextmode tinker with video timings and
61424 -                  return success */
61425 -               return (user) ? 0 : -EINVAL;
61426 +           c->vc_cell_height)
61427 +               return -EINVAL;
61429         if (con_is_visible(c) && !vga_is_gfx) /* who knows */
61430                 vgacon_doresize(c, width, height);
61431 diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
61432 index 757d5c3f620b..ff09e57f3c38 100644
61433 --- a/drivers/video/fbdev/core/fbcmap.c
61434 +++ b/drivers/video/fbdev/core/fbcmap.c
61435 @@ -101,17 +101,17 @@ int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags)
61436                 if (!len)
61437                         return 0;
61439 -               cmap->red = kmalloc(size, flags);
61440 +               cmap->red = kzalloc(size, flags);
61441                 if (!cmap->red)
61442                         goto fail;
61443 -               cmap->green = kmalloc(size, flags);
61444 +               cmap->green = kzalloc(size, flags);
61445                 if (!cmap->green)
61446                         goto fail;
61447 -               cmap->blue = kmalloc(size, flags);
61448 +               cmap->blue = kzalloc(size, flags);
61449                 if (!cmap->blue)
61450                         goto fail;
61451                 if (transp) {
61452 -                       cmap->transp = kmalloc(size, flags);
61453 +                       cmap->transp = kzalloc(size, flags);
61454                         if (!cmap->transp)
61455                                 goto fail;
61456                 } else {
61457 diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
61458 index 3406067985b1..22bb3892f6bd 100644
61459 --- a/drivers/video/fbdev/core/fbcon.c
61460 +++ b/drivers/video/fbdev/core/fbcon.c
61461 @@ -2019,7 +2019,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
61462                         return -EINVAL;
61464                 pr_debug("resize now %ix%i\n", var.xres, var.yres);
61465 -               if (con_is_visible(vc)) {
61466 +               if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) {
61467                         var.activate = FB_ACTIVATE_NOW |
61468                                 FB_ACTIVATE_FORCE;
61469                         fb_set_var(info, &var);
61470 diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
61471 index 8bbac7182ad3..bd3d07aa4f0e 100644
61472 --- a/drivers/video/fbdev/hgafb.c
61473 +++ b/drivers/video/fbdev/hgafb.c
61474 @@ -286,7 +286,7 @@ static int hga_card_detect(void)
61476         hga_vram = ioremap(0xb0000, hga_vram_len);
61477         if (!hga_vram)
61478 -               goto error;
61479 +               return -ENOMEM;
61481         if (request_region(0x3b0, 12, "hgafb"))
61482                 release_io_ports = 1;
61483 @@ -346,13 +346,18 @@ static int hga_card_detect(void)
61484                         hga_type_name = "Hercules";
61485                         break;
61486         }
61487 -       return 1;
61488 +       return 0;
61489  error:
61490         if (release_io_ports)
61491                 release_region(0x3b0, 12);
61492         if (release_io_port)
61493                 release_region(0x3bf, 1);
61494 -       return 0;
61496 +       iounmap(hga_vram);
61498 +       pr_err("hgafb: HGA card not detected.\n");
61500 +       return -EINVAL;
61503  /**
61504 @@ -550,13 +555,11 @@ static const struct fb_ops hgafb_ops = {
61505  static int hgafb_probe(struct platform_device *pdev)
61507         struct fb_info *info;
61508 +       int ret;
61510 -       if (! hga_card_detect()) {
61511 -               printk(KERN_INFO "hgafb: HGA card not detected.\n");
61512 -               if (hga_vram)
61513 -                       iounmap(hga_vram);
61514 -               return -EINVAL;
61515 -       }
61516 +       ret = hga_card_detect();
61517 +       if (ret)
61518 +               return ret;
61520         printk(KERN_INFO "hgafb: %s with %ldK of memory detected.\n",
61521                 hga_type_name, hga_vram_len/1024);
61522 diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
61523 index 3ac053b88495..e04411701ec8 100644
61524 --- a/drivers/video/fbdev/imsttfb.c
61525 +++ b/drivers/video/fbdev/imsttfb.c
61526 @@ -1512,11 +1512,6 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
61527         info->fix.smem_start = addr;
61528         info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
61529                                             0x400000 : 0x800000);
61530 -       if (!info->screen_base) {
61531 -               release_mem_region(addr, size);
61532 -               framebuffer_release(info);
61533 -               return -ENOMEM;
61534 -       }
61535         info->fix.mmio_start = addr + 0x800000;
61536         par->dc_regs = ioremap(addr + 0x800000, 0x1000);
61537         par->cmap_regs_phys = addr + 0x840000;
61538 diff --git a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
61539 index cfe63932f825..71c00ef772a3 100644
61540 --- a/drivers/video/fbdev/omap/hwa742.c
61541 +++ b/drivers/video/fbdev/omap/hwa742.c
61542 @@ -913,7 +913,7 @@ static void hwa742_resume(void)
61543                 if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7))
61544                         break;
61545                 set_current_state(TASK_UNINTERRUPTIBLE);
61546 -               schedule_timeout(msecs_to_jiffies(5));
61547 +               schedule_msec_hrtimeout((5));
61548         }
61549         hwa742_set_update_mode(hwa742.update_mode_before_suspend);
61551 diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
61552 index f1551e00eb12..f0f651e92504 100644
61553 --- a/drivers/video/fbdev/pxafb.c
61554 +++ b/drivers/video/fbdev/pxafb.c
61555 @@ -1287,7 +1287,7 @@ static int pxafb_smart_thread(void *arg)
61556                 mutex_unlock(&fbi->ctrlr_lock);
61558                 set_current_state(TASK_INTERRUPTIBLE);
61559 -               schedule_timeout(msecs_to_jiffies(30));
61560 +               schedule_msec_hrtimeout((30));
61561         }
61563         pr_debug("%s(): task ending\n", __func__);
61564 diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c b/drivers/virt/nitro_enclaves/ne_misc_dev.c
61565 index f1964ea4b826..e21e1e86ad15 100644
61566 --- a/drivers/virt/nitro_enclaves/ne_misc_dev.c
61567 +++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c
61568 @@ -1524,7 +1524,8 @@ static const struct file_operations ne_enclave_fops = {
61569   *                       enclave file descriptor to be further used for enclave
61570   *                       resources handling e.g. memory regions and CPUs.
61571   * @ne_pci_dev :       Private data associated with the PCI device.
61572 - * @slot_uid:          Generated unique slot id associated with an enclave.
61573 + * @slot_uid:          User pointer to store the generated unique slot id
61574 + *                     associated with an enclave to.
61575   *
61576   * Context: Process context. This function is called with the ne_pci_dev enclave
61577   *         mutex held.
61578 @@ -1532,7 +1533,7 @@ static const struct file_operations ne_enclave_fops = {
61579   * * Enclave fd on success.
61580   * * Negative return value on failure.
61581   */
61582 -static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 *slot_uid)
61583 +static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 __user *slot_uid)
61585         struct ne_pci_dev_cmd_reply cmd_reply = {};
61586         int enclave_fd = -1;
61587 @@ -1634,7 +1635,18 @@ static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 *slot_uid)
61589         list_add(&ne_enclave->enclave_list_entry, &ne_pci_dev->enclaves_list);
61591 -       *slot_uid = ne_enclave->slot_uid;
61592 +       if (copy_to_user(slot_uid, &ne_enclave->slot_uid, sizeof(ne_enclave->slot_uid))) {
61593 +               /*
61594 +                * As we're holding the only reference to 'enclave_file', fput()
61595 +                * will call ne_enclave_release() which will do a proper cleanup
61596 +                * of all so far allocated resources, leaving only the unused fd
61597 +                * for us to free.
61598 +                */
61599 +               fput(enclave_file);
61600 +               put_unused_fd(enclave_fd);
61602 +               return -EFAULT;
61603 +       }
61605         fd_install(enclave_fd, enclave_file);
61607 @@ -1671,34 +1683,13 @@ static long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
61608         switch (cmd) {
61609         case NE_CREATE_VM: {
61610                 int enclave_fd = -1;
61611 -               struct file *enclave_file = NULL;
61612                 struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev;
61613 -               int rc = -EINVAL;
61614 -               u64 slot_uid = 0;
61615 +               u64 __user *slot_uid = (void __user *)arg;
61617                 mutex_lock(&ne_pci_dev->enclaves_list_mutex);
61619 -               enclave_fd = ne_create_vm_ioctl(ne_pci_dev, &slot_uid);
61620 -               if (enclave_fd < 0) {
61621 -                       rc = enclave_fd;
61623 -                       mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
61625 -                       return rc;
61626 -               }
61628 +               enclave_fd = ne_create_vm_ioctl(ne_pci_dev, slot_uid);
61629                 mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
61631 -               if (copy_to_user((void __user *)arg, &slot_uid, sizeof(slot_uid))) {
61632 -                       enclave_file = fget(enclave_fd);
61633 -                       /* Decrement file refs to have release() called. */
61634 -                       fput(enclave_file);
61635 -                       fput(enclave_file);
61636 -                       put_unused_fd(enclave_fd);
61638 -                       return -EFAULT;
61639 -               }
61641                 return enclave_fd;
61642         }
61644 diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
61645 index f01d58c7a042..a3e7be96527d 100644
61646 --- a/drivers/xen/gntdev.c
61647 +++ b/drivers/xen/gntdev.c
61648 @@ -1017,8 +1017,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
61649                 err = mmu_interval_notifier_insert_locked(
61650                         &map->notifier, vma->vm_mm, vma->vm_start,
61651                         vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
61652 -               if (err)
61653 +               if (err) {
61654 +                       map->vma = NULL;
61655                         goto out_unlock_put;
61656 +               }
61657         }
61658         mutex_unlock(&priv->lock);
61660 diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c
61661 index e64e6befc63b..87e6b7db892f 100644
61662 --- a/drivers/xen/unpopulated-alloc.c
61663 +++ b/drivers/xen/unpopulated-alloc.c
61664 @@ -39,8 +39,10 @@ static int fill_list(unsigned int nr_pages)
61665         }
61667         pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
61668 -       if (!pgmap)
61669 +       if (!pgmap) {
61670 +               ret = -ENOMEM;
61671                 goto err_pgmap;
61672 +       }
61674         pgmap->type = MEMORY_DEVICE_GENERIC;
61675         pgmap->range = (struct range) {
61676 diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c
61677 index 5447b5ab7c76..1221cfd914cb 100644
61678 --- a/drivers/xen/xen-pciback/vpci.c
61679 +++ b/drivers/xen/xen-pciback/vpci.c
61680 @@ -70,7 +70,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
61681                                    struct pci_dev *dev, int devid,
61682                                    publish_pci_dev_cb publish_cb)
61684 -       int err = 0, slot, func = -1;
61685 +       int err = 0, slot, func = PCI_FUNC(dev->devfn);
61686         struct pci_dev_entry *t, *dev_entry;
61687         struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
61689 @@ -95,22 +95,25 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
61691         /*
61692          * Keep multi-function devices together on the virtual PCI bus, except
61693 -        * virtual functions.
61694 +        * that we want to keep virtual functions at func 0 on their own. They
61695 +        * aren't multi-function devices and hence their presence at func 0
61696 +        * may cause guests to not scan the other functions.
61697          */
61698 -       if (!dev->is_virtfn) {
61699 +       if (!dev->is_virtfn || func) {
61700                 for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
61701                         if (list_empty(&vpci_dev->dev_list[slot]))
61702                                 continue;
61704                         t = list_entry(list_first(&vpci_dev->dev_list[slot]),
61705                                        struct pci_dev_entry, list);
61706 +                       if (t->dev->is_virtfn && !PCI_FUNC(t->dev->devfn))
61707 +                               continue;
61709                         if (match_slot(dev, t->dev)) {
61710                                 dev_info(&dev->dev, "vpci: assign to virtual slot %d func %d\n",
61711 -                                        slot, PCI_FUNC(dev->devfn));
61712 +                                        slot, func);
61713                                 list_add_tail(&dev_entry->list,
61714                                               &vpci_dev->dev_list[slot]);
61715 -                               func = PCI_FUNC(dev->devfn);
61716                                 goto unlock;
61717                         }
61718                 }
61719 @@ -123,7 +126,6 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
61720                                  slot);
61721                         list_add_tail(&dev_entry->list,
61722                                       &vpci_dev->dev_list[slot]);
61723 -                       func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn);
61724                         goto unlock;
61725                 }
61726         }
61727 diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
61728 index 5188f02e75fb..c09c7ebd6968 100644
61729 --- a/drivers/xen/xen-pciback/xenbus.c
61730 +++ b/drivers/xen/xen-pciback/xenbus.c
61731 @@ -359,7 +359,8 @@ static int xen_pcibk_publish_pci_root(struct xen_pcibk_device *pdev,
61732         return err;
61735 -static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
61736 +static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev,
61737 +                                enum xenbus_state state)
61739         int err = 0;
61740         int num_devs;
61741 @@ -373,9 +374,7 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
61742         dev_dbg(&pdev->xdev->dev, "Reconfiguring device ...\n");
61744         mutex_lock(&pdev->dev_lock);
61745 -       /* Make sure we only reconfigure once */
61746 -       if (xenbus_read_driver_state(pdev->xdev->nodename) !=
61747 -           XenbusStateReconfiguring)
61748 +       if (xenbus_read_driver_state(pdev->xdev->nodename) != state)
61749                 goto out;
61751         err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d",
61752 @@ -500,6 +499,10 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
61753                 }
61754         }
61756 +       if (state != XenbusStateReconfiguring)
61757 +               /* Make sure we only reconfigure once. */
61758 +               goto out;
61760         err = xenbus_switch_state(pdev->xdev, XenbusStateReconfigured);
61761         if (err) {
61762                 xenbus_dev_fatal(pdev->xdev, err,
61763 @@ -525,7 +528,7 @@ static void xen_pcibk_frontend_changed(struct xenbus_device *xdev,
61764                 break;
61766         case XenbusStateReconfiguring:
61767 -               xen_pcibk_reconfigure(pdev);
61768 +               xen_pcibk_reconfigure(pdev, XenbusStateReconfiguring);
61769                 break;
61771         case XenbusStateConnected:
61772 @@ -664,6 +667,15 @@ static void xen_pcibk_be_watch(struct xenbus_watch *watch,
61773                 xen_pcibk_setup_backend(pdev);
61774                 break;
61776 +       case XenbusStateInitialised:
61777 +               /*
61778 +                * We typically move to Initialised when the first device was
61779 +                * added. Hence subsequent devices getting added may need
61780 +                * reconfiguring.
61781 +                */
61782 +               xen_pcibk_reconfigure(pdev, XenbusStateInitialised);
61783 +               break;
61785         default:
61786                 break;
61787         }
61788 diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
61789 index 649f04f112dc..59c32c9b799f 100644
61790 --- a/fs/9p/vfs_file.c
61791 +++ b/fs/9p/vfs_file.c
61792 @@ -86,8 +86,8 @@ int v9fs_file_open(struct inode *inode, struct file *file)
61793                  * to work.
61794                  */
61795                 writeback_fid = v9fs_writeback_fid(file_dentry(file));
61796 -               if (IS_ERR(fid)) {
61797 -                       err = PTR_ERR(fid);
61798 +               if (IS_ERR(writeback_fid)) {
61799 +                       err = PTR_ERR(writeback_fid);
61800                         mutex_unlock(&v9inode->v_mutex);
61801                         goto out_error;
61802                 }
61803 diff --git a/fs/Kconfig b/fs/Kconfig
61804 index a55bda4233bb..f61330e4efc0 100644
61805 --- a/fs/Kconfig
61806 +++ b/fs/Kconfig
61807 @@ -145,6 +145,7 @@ menu "DOS/FAT/EXFAT/NT Filesystems"
61808  source "fs/fat/Kconfig"
61809  source "fs/exfat/Kconfig"
61810  source "fs/ntfs/Kconfig"
61811 +source "fs/ntfs3/Kconfig"
61813  endmenu
61814  endif # BLOCK
61815 diff --git a/fs/Makefile b/fs/Makefile
61816 index 3215fe205256..6bdfcf712cb1 100644
61817 --- a/fs/Makefile
61818 +++ b/fs/Makefile
61819 @@ -99,6 +99,7 @@ obj-$(CONFIG_SYSV_FS)         += sysv/
61820  obj-$(CONFIG_CIFS)             += cifs/
61821  obj-$(CONFIG_HPFS_FS)          += hpfs/
61822  obj-$(CONFIG_NTFS_FS)          += ntfs/
61823 +obj-$(CONFIG_NTFS3_FS)         += ntfs3/
61824  obj-$(CONFIG_UFS_FS)           += ufs/
61825  obj-$(CONFIG_EFS_FS)           += efs/
61826  obj-$(CONFIG_JFFS2_FS)         += jffs2/
61827 diff --git a/fs/afs/dir.c b/fs/afs/dir.c
61828 index 17548c1faf02..31251d11d576 100644
61829 --- a/fs/afs/dir.c
61830 +++ b/fs/afs/dir.c
61831 @@ -1342,6 +1342,7 @@ static int afs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
61833         afs_op_set_vnode(op, 0, dvnode);
61834         op->file[0].dv_delta = 1;
61835 +       op->file[0].modification = true;
61836         op->file[0].update_ctime = true;
61837         op->dentry      = dentry;
61838         op->create.mode = S_IFDIR | mode;
61839 @@ -1423,6 +1424,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
61841         afs_op_set_vnode(op, 0, dvnode);
61842         op->file[0].dv_delta = 1;
61843 +       op->file[0].modification = true;
61844         op->file[0].update_ctime = true;
61846         op->dentry      = dentry;
61847 @@ -1559,6 +1561,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
61849         afs_op_set_vnode(op, 0, dvnode);
61850         op->file[0].dv_delta = 1;
61851 +       op->file[0].modification = true;
61852         op->file[0].update_ctime = true;
61854         /* Try to make sure we have a callback promise on the victim. */
61855 @@ -1641,6 +1644,7 @@ static int afs_create(struct user_namespace *mnt_userns, struct inode *dir,
61857         afs_op_set_vnode(op, 0, dvnode);
61858         op->file[0].dv_delta = 1;
61859 +       op->file[0].modification = true;
61860         op->file[0].update_ctime = true;
61862         op->dentry      = dentry;
61863 @@ -1715,6 +1719,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
61864         afs_op_set_vnode(op, 0, dvnode);
61865         afs_op_set_vnode(op, 1, vnode);
61866         op->file[0].dv_delta = 1;
61867 +       op->file[0].modification = true;
61868         op->file[0].update_ctime = true;
61869         op->file[1].update_ctime = true;
61871 @@ -1910,6 +1915,8 @@ static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
61872         afs_op_set_vnode(op, 1, new_dvnode); /* May be same as orig_dvnode */
61873         op->file[0].dv_delta = 1;
61874         op->file[1].dv_delta = 1;
61875 +       op->file[0].modification = true;
61876 +       op->file[1].modification = true;
61877         op->file[0].update_ctime = true;
61878         op->file[1].update_ctime = true;
61880 diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c
61881 index 04f75a44f243..dae9a57d7ec0 100644
61882 --- a/fs/afs/dir_silly.c
61883 +++ b/fs/afs/dir_silly.c
61884 @@ -73,6 +73,8 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
61885         afs_op_set_vnode(op, 1, dvnode);
61886         op->file[0].dv_delta = 1;
61887         op->file[1].dv_delta = 1;
61888 +       op->file[0].modification = true;
61889 +       op->file[1].modification = true;
61890         op->file[0].update_ctime = true;
61891         op->file[1].update_ctime = true;
61893 @@ -201,6 +203,7 @@ static int afs_do_silly_unlink(struct afs_vnode *dvnode, struct afs_vnode *vnode
61894         afs_op_set_vnode(op, 0, dvnode);
61895         afs_op_set_vnode(op, 1, vnode);
61896         op->file[0].dv_delta = 1;
61897 +       op->file[0].modification = true;
61898         op->file[0].update_ctime = true;
61899         op->file[1].op_unlinked = true;
61900         op->file[1].update_ctime = true;
61901 diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c
61902 index 71c58723763d..a82515b47350 100644
61903 --- a/fs/afs/fs_operation.c
61904 +++ b/fs/afs/fs_operation.c
61905 @@ -118,6 +118,8 @@ static void afs_prepare_vnode(struct afs_operation *op, struct afs_vnode_param *
61906                 vp->cb_break_before     = afs_calc_vnode_cb_break(vnode);
61907                 if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
61908                         op->flags       |= AFS_OPERATION_CUR_ONLY;
61909 +               if (vp->modification)
61910 +                       set_bit(AFS_VNODE_MODIFYING, &vnode->flags);
61911         }
61913         if (vp->fid.vnode)
61914 @@ -223,6 +225,10 @@ int afs_put_operation(struct afs_operation *op)
61916         if (op->ops && op->ops->put)
61917                 op->ops->put(op);
61918 +       if (op->file[0].modification)
61919 +               clear_bit(AFS_VNODE_MODIFYING, &op->file[0].vnode->flags);
61920 +       if (op->file[1].modification && op->file[1].vnode != op->file[0].vnode)
61921 +               clear_bit(AFS_VNODE_MODIFYING, &op->file[1].vnode->flags);
61922         if (op->file[0].put_vnode)
61923                 iput(&op->file[0].vnode->vfs_inode);
61924         if (op->file[1].put_vnode)
61925 diff --git a/fs/afs/inode.c b/fs/afs/inode.c
61926 index 12be88716e4c..fddf7d54e0b7 100644
61927 --- a/fs/afs/inode.c
61928 +++ b/fs/afs/inode.c
61929 @@ -102,13 +102,13 @@ static int afs_inode_init_from_status(struct afs_operation *op,
61931         switch (status->type) {
61932         case AFS_FTYPE_FILE:
61933 -               inode->i_mode   = S_IFREG | status->mode;
61934 +               inode->i_mode   = S_IFREG | (status->mode & S_IALLUGO);
61935                 inode->i_op     = &afs_file_inode_operations;
61936                 inode->i_fop    = &afs_file_operations;
61937                 inode->i_mapping->a_ops = &afs_fs_aops;
61938                 break;
61939         case AFS_FTYPE_DIR:
61940 -               inode->i_mode   = S_IFDIR | status->mode;
61941 +               inode->i_mode   = S_IFDIR |  (status->mode & S_IALLUGO);
61942                 inode->i_op     = &afs_dir_inode_operations;
61943                 inode->i_fop    = &afs_dir_file_operations;
61944                 inode->i_mapping->a_ops = &afs_dir_aops;
61945 @@ -198,7 +198,7 @@ static void afs_apply_status(struct afs_operation *op,
61946         if (status->mode != vnode->status.mode) {
61947                 mode = inode->i_mode;
61948                 mode &= ~S_IALLUGO;
61949 -               mode |= status->mode;
61950 +               mode |= status->mode & S_IALLUGO;
61951                 WRITE_ONCE(inode->i_mode, mode);
61952         }
61954 @@ -293,8 +293,9 @@ void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *v
61955                         op->flags &= ~AFS_OPERATION_DIR_CONFLICT;
61956                 }
61957         } else if (vp->scb.have_status) {
61958 -               if (vp->dv_before + vp->dv_delta != vp->scb.status.data_version &&
61959 -                   vp->speculative)
61960 +               if (vp->speculative &&
61961 +                   (test_bit(AFS_VNODE_MODIFYING, &vnode->flags) ||
61962 +                    vp->dv_before != vnode->status.data_version))
61963                         /* Ignore the result of a speculative bulk status fetch
61964                          * if it splits around a modification op, thereby
61965                          * appearing to regress the data version.
61966 @@ -910,6 +911,7 @@ int afs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
61967         }
61968         op->ctime = attr->ia_ctime;
61969         op->file[0].update_ctime = 1;
61970 +       op->file[0].modification = true;
61972         op->ops = &afs_setattr_operation;
61973         ret = afs_do_sync_operation(op);
61974 diff --git a/fs/afs/internal.h b/fs/afs/internal.h
61975 index 1627b1872812..be981a9a1add 100644
61976 --- a/fs/afs/internal.h
61977 +++ b/fs/afs/internal.h
61978 @@ -640,6 +640,7 @@ struct afs_vnode {
61979  #define AFS_VNODE_PSEUDODIR    7               /* set if Vnode is a pseudo directory */
61980  #define AFS_VNODE_NEW_CONTENT  8               /* Set if file has new content (create/trunc-0) */
61981  #define AFS_VNODE_SILLY_DELETED        9               /* Set if file has been silly-deleted */
61982 +#define AFS_VNODE_MODIFYING    10              /* Set if we're performing a modification op */
61984         struct list_head        wb_keys;        /* List of keys available for writeback */
61985         struct list_head        pending_locks;  /* locks waiting to be granted */
61986 @@ -756,6 +757,7 @@ struct afs_vnode_param {
61987         bool                    set_size:1;     /* Must update i_size */
61988         bool                    op_unlinked:1;  /* True if file was unlinked by op */
61989         bool                    speculative:1;  /* T if speculative status fetch (no vnode lock) */
61990 +       bool                    modification:1; /* Set if the content gets modified */
61991  };
61993  /*
61994 diff --git a/fs/afs/write.c b/fs/afs/write.c
61995 index eb737ed63afb..ebe3b6493fce 100644
61996 --- a/fs/afs/write.c
61997 +++ b/fs/afs/write.c
61998 @@ -450,6 +450,7 @@ static int afs_store_data(struct address_space *mapping,
61999         afs_op_set_vnode(op, 0, vnode);
62000         op->file[0].dv_delta = 1;
62001         op->store.mapping = mapping;
62002 +       op->file[0].modification = true;
62003         op->store.first = first;
62004         op->store.last = last;
62005         op->store.first_offset = offset;
62006 diff --git a/fs/block_dev.c b/fs/block_dev.c
62007 index 09d6f7229db9..a5a6a7930e5e 100644
62008 --- a/fs/block_dev.c
62009 +++ b/fs/block_dev.c
62010 @@ -1684,6 +1684,7 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
62011         struct inode *bd_inode = bdev_file_inode(file);
62012         loff_t size = i_size_read(bd_inode);
62013         struct blk_plug plug;
62014 +       size_t shorted = 0;
62015         ssize_t ret;
62017         if (bdev_read_only(I_BDEV(bd_inode)))
62018 @@ -1701,12 +1702,17 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
62019         if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
62020                 return -EOPNOTSUPP;
62022 -       iov_iter_truncate(from, size - iocb->ki_pos);
62023 +       size -= iocb->ki_pos;
62024 +       if (iov_iter_count(from) > size) {
62025 +               shorted = iov_iter_count(from) - size;
62026 +               iov_iter_truncate(from, size);
62027 +       }
62029         blk_start_plug(&plug);
62030         ret = __generic_file_write_iter(iocb, from);
62031         if (ret > 0)
62032                 ret = generic_write_sync(iocb, ret);
62033 +       iov_iter_reexpand(from, iov_iter_count(from) + shorted);
62034         blk_finish_plug(&plug);
62035         return ret;
62037 @@ -1718,13 +1724,21 @@ ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
62038         struct inode *bd_inode = bdev_file_inode(file);
62039         loff_t size = i_size_read(bd_inode);
62040         loff_t pos = iocb->ki_pos;
62041 +       size_t shorted = 0;
62042 +       ssize_t ret;
62044         if (pos >= size)
62045                 return 0;
62047         size -= pos;
62048 -       iov_iter_truncate(to, size);
62049 -       return generic_file_read_iter(iocb, to);
62050 +       if (iov_iter_count(to) > size) {
62051 +               shorted = iov_iter_count(to) - size;
62052 +               iov_iter_truncate(to, size);
62053 +       }
62055 +       ret = generic_file_read_iter(iocb, to);
62056 +       iov_iter_reexpand(to, iov_iter_count(to) + shorted);
62057 +       return ret;
62059  EXPORT_SYMBOL_GPL(blkdev_read_iter);
62061 diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
62062 index 744b99ddc28c..a7d9e147dee6 100644
62063 --- a/fs/btrfs/block-group.c
62064 +++ b/fs/btrfs/block-group.c
62065 @@ -3269,6 +3269,7 @@ static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
62066   */
62067  void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
62069 +       struct btrfs_transaction *cur_trans = trans->transaction;
62070         struct btrfs_fs_info *fs_info = trans->fs_info;
62071         struct btrfs_space_info *info;
62072         u64 left;
62073 @@ -3283,6 +3284,7 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
62074         lockdep_assert_held(&fs_info->chunk_mutex);
62076         info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
62077 +again:
62078         spin_lock(&info->lock);
62079         left = info->total_bytes - btrfs_space_info_used(info, true);
62080         spin_unlock(&info->lock);
62081 @@ -3301,6 +3303,58 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
62083         if (left < thresh) {
62084                 u64 flags = btrfs_system_alloc_profile(fs_info);
62085 +               u64 reserved = atomic64_read(&cur_trans->chunk_bytes_reserved);
62087 +               /*
62088 +                * If there's not available space for the chunk tree (system
62089 +                * space) and there are other tasks that reserved space for
62090 +                * creating a new system block group, wait for them to complete
62091 +                * the creation of their system block group and release excess
62092 +                * reserved space. We do this because:
62093 +                *
62094 +                * *) We can end up allocating more system chunks than necessary
62095 +                *    when there are multiple tasks that are concurrently
62096 +                *    allocating block groups, which can lead to exhaustion of
62097 +                *    the system array in the superblock;
62098 +                *
62099 +                * *) If we allocate extra and unnecessary system block groups,
62100 +                *    despite being empty for a long time, and possibly forever,
62101 +                *    they end not being added to the list of unused block groups
62102 +                *    because that typically happens only when deallocating the
62103 +                *    last extent from a block group - which never happens since
62104 +                *    we never allocate from them in the first place. The few
62105 +                *    exceptions are when mounting a filesystem or running scrub,
62106 +                *    which add unused block groups to the list of unused block
62107 +                *    groups, to be deleted by the cleaner kthread.
62108 +                *    And even when they are added to the list of unused block
62109 +                *    groups, it can take a long time until they get deleted,
62110 +                *    since the cleaner kthread might be sleeping or busy with
62111 +                *    other work (deleting subvolumes, running delayed iputs,
62112 +                *    defrag scheduling, etc);
62113 +                *
62114 +                * This is rare in practice, but can happen when too many tasks
62115 +                * are allocating blocks groups in parallel (via fallocate())
62116 +                * and before the one that reserved space for a new system block
62117 +                * group finishes the block group creation and releases the space
62118 +                * reserved in excess (at btrfs_create_pending_block_groups()),
62119 +                * other tasks end up here and see free system space temporarily
62120 +                * not enough for updating the chunk tree.
62121 +                *
62122 +                * We unlock the chunk mutex before waiting for such tasks and
62123 +                * lock it again after the wait, otherwise we would deadlock.
62124 +                * It is safe to do so because allocating a system chunk is the
62125 +                * first thing done while allocating a new block group.
62126 +                */
62127 +               if (reserved > trans->chunk_bytes_reserved) {
62128 +                       const u64 min_needed = reserved - thresh;
62130 +                       mutex_unlock(&fs_info->chunk_mutex);
62131 +                       wait_event(cur_trans->chunk_reserve_wait,
62132 +                          atomic64_read(&cur_trans->chunk_bytes_reserved) <=
62133 +                          min_needed);
62134 +                       mutex_lock(&fs_info->chunk_mutex);
62135 +                       goto again;
62136 +               }
62138                 /*
62139                  * Ignore failure to create system chunk. We might end up not
62140 @@ -3315,8 +3369,10 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
62141                 ret = btrfs_block_rsv_add(fs_info->chunk_root,
62142                                           &fs_info->chunk_block_rsv,
62143                                           thresh, BTRFS_RESERVE_NO_FLUSH);
62144 -               if (!ret)
62145 +               if (!ret) {
62146 +                       atomic64_add(thresh, &cur_trans->chunk_bytes_reserved);
62147                         trans->chunk_bytes_reserved += thresh;
62148 +               }
62149         }
62152 diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
62153 index 28e202e89660..418903604936 100644
62154 --- a/fs/btrfs/btrfs_inode.h
62155 +++ b/fs/btrfs/btrfs_inode.h
62156 @@ -299,6 +299,21 @@ static inline void btrfs_mod_outstanding_extents(struct btrfs_inode *inode,
62157                                                   mod);
62161 + * Called every time after doing a buffered, direct IO or memory mapped write.
62162 + *
62163 + * This is to ensure that if we write to a file that was previously fsynced in
62164 + * the current transaction, then try to fsync it again in the same transaction,
62165 + * we will know that there were changes in the file and that it needs to be
62166 + * logged.
62167 + */
62168 +static inline void btrfs_set_inode_last_sub_trans(struct btrfs_inode *inode)
62170 +       spin_lock(&inode->lock);
62171 +       inode->last_sub_trans = inode->root->log_transid;
62172 +       spin_unlock(&inode->lock);
62175  static inline int btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation)
62177         int ret = 0;
62178 diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
62179 index 3f4c832abfed..81387cdf334d 100644
62180 --- a/fs/btrfs/compression.c
62181 +++ b/fs/btrfs/compression.c
62182 @@ -80,10 +80,15 @@ static int compression_compress_pages(int type, struct list_head *ws,
62183         case BTRFS_COMPRESS_NONE:
62184         default:
62185                 /*
62186 -                * This can't happen, the type is validated several times
62187 -                * before we get here. As a sane fallback, return what the
62188 -                * callers will understand as 'no compression happened'.
62189 +                * This can happen when compression races with remount setting
62190 +                * it to 'no compress', while caller doesn't call
62191 +                * inode_need_compress() to check if we really need to
62192 +                * compress.
62193 +                *
62194 +                * Not a big deal, just need to inform caller that we
62195 +                * haven't allocated any pages yet.
62196                  */
62197 +               *out_pages = 0;
62198                 return -E2BIG;
62199         }
62201 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
62202 index 34b929bd5c1a..f43ce82a6aed 100644
62203 --- a/fs/btrfs/ctree.c
62204 +++ b/fs/btrfs/ctree.c
62205 @@ -1365,10 +1365,30 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
62206                                    "failed to read tree block %llu from get_old_root",
62207                                    logical);
62208                 } else {
62209 +                       struct tree_mod_elem *tm2;
62211                         btrfs_tree_read_lock(old);
62212                         eb = btrfs_clone_extent_buffer(old);
62213 +                       /*
62214 +                        * After the lookup for the most recent tree mod operation
62215 +                        * above and before we locked and cloned the extent buffer
62216 +                        * 'old', a new tree mod log operation may have been added.
62217 +                        * So lookup for a more recent one to make sure the number
62218 +                        * of mod log operations we replay is consistent with the
62219 +                        * number of items we have in the cloned extent buffer,
62220 +                        * otherwise we can hit a BUG_ON when rewinding the extent
62221 +                        * buffer.
62222 +                        */
62223 +                       tm2 = tree_mod_log_search(fs_info, logical, time_seq);
62224                         btrfs_tree_read_unlock(old);
62225                         free_extent_buffer(old);
62226 +                       ASSERT(tm2);
62227 +                       ASSERT(tm2 == tm || tm2->seq > tm->seq);
62228 +                       if (!tm2 || tm2->seq < tm->seq) {
62229 +                               free_extent_buffer(eb);
62230 +                               return NULL;
62231 +                       }
62232 +                       tm = tm2;
62233                 }
62234         } else if (old_root) {
62235                 eb_root_owner = btrfs_header_owner(eb_root);
62236 diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
62237 index 9ae776ab3967..29ef969035df 100644
62238 --- a/fs/btrfs/ctree.h
62239 +++ b/fs/btrfs/ctree.h
62240 @@ -3110,7 +3110,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
62241                                struct btrfs_inode *inode, u64 new_size,
62242                                u32 min_type);
62244 -int btrfs_start_delalloc_snapshot(struct btrfs_root *root);
62245 +int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context);
62246  int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
62247                                bool in_reclaim_context);
62248  int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
62249 diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
62250 index 56642ca7af10..fa1c3bc93ccf 100644
62251 --- a/fs/btrfs/delalloc-space.c
62252 +++ b/fs/btrfs/delalloc-space.c
62253 @@ -311,7 +311,7 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
62254                         flush = BTRFS_RESERVE_FLUSH_LIMIT;
62256                 if (btrfs_transaction_in_commit(fs_info))
62257 -                       schedule_timeout(1);
62258 +                       schedule_min_hrtimeout();
62259         }
62261         num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
62262 diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
62263 index bf25401c9768..c1d2b6786129 100644
62264 --- a/fs/btrfs/delayed-inode.c
62265 +++ b/fs/btrfs/delayed-inode.c
62266 @@ -1589,8 +1589,8 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode,
62267          * We can only do one readdir with delayed items at a time because of
62268          * item->readdir_list.
62269          */
62270 -       inode_unlock_shared(inode);
62271 -       inode_lock(inode);
62272 +       btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
62273 +       btrfs_inode_lock(inode, 0);
62275         mutex_lock(&delayed_node->mutex);
62276         item = __btrfs_first_delayed_insertion_item(delayed_node);
62277 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
62278 index 36a3c973fda1..5b82050b871a 100644
62279 --- a/fs/btrfs/extent-tree.c
62280 +++ b/fs/btrfs/extent-tree.c
62281 @@ -1340,12 +1340,16 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
62282                 stripe = bbio->stripes;
62283                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
62284                         u64 bytes;
62285 +                       struct btrfs_device *device = stripe->dev;
62287 -                       if (!stripe->dev->bdev) {
62288 +                       if (!device->bdev) {
62289                                 ASSERT(btrfs_test_opt(fs_info, DEGRADED));
62290                                 continue;
62291                         }
62293 +                       if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
62294 +                               continue;
62296                         ret = do_discard_extent(stripe, &bytes);
62297                         if (!ret) {
62298                                 discarded_bytes += bytes;
62299 diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
62300 index 0e155f013839..abee4b62741d 100644
62301 --- a/fs/btrfs/file.c
62302 +++ b/fs/btrfs/file.c
62303 @@ -2014,14 +2014,8 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
62304         else
62305                 num_written = btrfs_buffered_write(iocb, from);
62307 -       /*
62308 -        * We also have to set last_sub_trans to the current log transid,
62309 -        * otherwise subsequent syncs to a file that's been synced in this
62310 -        * transaction will appear to have already occurred.
62311 -        */
62312 -       spin_lock(&inode->lock);
62313 -       inode->last_sub_trans = inode->root->log_transid;
62314 -       spin_unlock(&inode->lock);
62315 +       btrfs_set_inode_last_sub_trans(inode);
62317         if (num_written > 0)
62318                 num_written = generic_write_sync(iocb, num_written);
62320 @@ -2073,6 +2067,30 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
62321         return ret;
62324 +static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
62326 +       struct btrfs_inode *inode = BTRFS_I(ctx->inode);
62327 +       struct btrfs_fs_info *fs_info = inode->root->fs_info;
62329 +       if (btrfs_inode_in_log(inode, fs_info->generation) &&
62330 +           list_empty(&ctx->ordered_extents))
62331 +               return true;
62333 +       /*
62334 +        * If we are doing a fast fsync we can not bail out if the inode's
62335 +        * last_trans is <= then the last committed transaction, because we only
62336 +        * update the last_trans of the inode during ordered extent completion,
62337 +        * and for a fast fsync we don't wait for that, we only wait for the
62338 +        * writeback to complete.
62339 +        */
62340 +       if (inode->last_trans <= fs_info->last_trans_committed &&
62341 +           (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) ||
62342 +            list_empty(&ctx->ordered_extents)))
62343 +               return true;
62345 +       return false;
62348  /*
62349   * fsync call for both files and directories.  This logs the inode into
62350   * the tree log instead of forcing full commits whenever possible.
62351 @@ -2122,7 +2140,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
62352         if (ret)
62353                 goto out;
62355 -       inode_lock(inode);
62356 +       btrfs_inode_lock(inode, 0);
62358         atomic_inc(&root->log_batch);
62360 @@ -2154,7 +2172,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
62361          */
62362         ret = start_ordered_ops(inode, start, end);
62363         if (ret) {
62364 -               inode_unlock(inode);
62365 +               btrfs_inode_unlock(inode, 0);
62366                 goto out;
62367         }
62369 @@ -2191,17 +2209,8 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
62371         atomic_inc(&root->log_batch);
62373 -       /*
62374 -        * If we are doing a fast fsync we can not bail out if the inode's
62375 -        * last_trans is <= then the last committed transaction, because we only
62376 -        * update the last_trans of the inode during ordered extent completion,
62377 -        * and for a fast fsync we don't wait for that, we only wait for the
62378 -        * writeback to complete.
62379 -        */
62380         smp_mb();
62381 -       if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) ||
62382 -           (BTRFS_I(inode)->last_trans <= fs_info->last_trans_committed &&
62383 -            (full_sync || list_empty(&ctx.ordered_extents)))) {
62384 +       if (skip_inode_logging(&ctx)) {
62385                 /*
62386                  * We've had everything committed since the last time we were
62387                  * modified so clear this flag in case it was set for whatever
62388 @@ -2255,7 +2264,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
62389          * file again, but that will end up using the synchronization
62390          * inside btrfs_sync_log to keep things safe.
62391          */
62392 -       inode_unlock(inode);
62393 +       btrfs_inode_unlock(inode, 0);
62395         if (ret != BTRFS_NO_LOG_SYNC) {
62396                 if (!ret) {
62397 @@ -2285,7 +2294,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
62399  out_release_extents:
62400         btrfs_release_log_ctx_extents(&ctx);
62401 -       inode_unlock(inode);
62402 +       btrfs_inode_unlock(inode, 0);
62403         goto out;
62406 @@ -2735,8 +2744,6 @@ int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path,
62407                         extent_info->file_offset += replace_len;
62408                 }
62410 -               cur_offset = drop_args.drop_end;
62412                 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
62413                 if (ret)
62414                         break;
62415 @@ -2756,7 +2763,9 @@ int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path,
62416                 BUG_ON(ret);    /* shouldn't happen */
62417                 trans->block_rsv = rsv;
62419 -               if (!extent_info) {
62420 +               cur_offset = drop_args.drop_end;
62421 +               len = end - cur_offset;
62422 +               if (!extent_info && len) {
62423                         ret = find_first_non_hole(BTRFS_I(inode), &cur_offset,
62424                                                   &len);
62425                         if (unlikely(ret < 0))
62426 @@ -2868,7 +2877,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
62427         if (ret)
62428                 return ret;
62430 -       inode_lock(inode);
62431 +       btrfs_inode_lock(inode, 0);
62432         ino_size = round_up(inode->i_size, fs_info->sectorsize);
62433         ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
62434         if (ret < 0)
62435 @@ -2908,7 +2917,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
62436                 truncated_block = true;
62437                 ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
62438                 if (ret) {
62439 -                       inode_unlock(inode);
62440 +                       btrfs_inode_unlock(inode, 0);
62441                         return ret;
62442                 }
62443         }
62444 @@ -3009,7 +3018,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
62445                                 ret = ret2;
62446                 }
62447         }
62448 -       inode_unlock(inode);
62449 +       btrfs_inode_unlock(inode, 0);
62450         return ret;
62453 @@ -3377,7 +3386,7 @@ static long btrfs_fallocate(struct file *file, int mode,
62455         if (mode & FALLOC_FL_ZERO_RANGE) {
62456                 ret = btrfs_zero_range(inode, offset, len, mode);
62457 -               inode_unlock(inode);
62458 +               btrfs_inode_unlock(inode, 0);
62459                 return ret;
62460         }
62462 @@ -3487,7 +3496,7 @@ static long btrfs_fallocate(struct file *file, int mode,
62463         unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
62464                              &cached_state);
62465  out:
62466 -       inode_unlock(inode);
62467 +       btrfs_inode_unlock(inode, 0);
62468         /* Let go of our reservation. */
62469         if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
62470                 btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
62471 diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
62472 index 9988decd5717..ac9c2691376d 100644
62473 --- a/fs/btrfs/free-space-cache.c
62474 +++ b/fs/btrfs/free-space-cache.c
62475 @@ -3942,7 +3942,7 @@ static int cleanup_free_space_cache_v1(struct btrfs_fs_info *fs_info,
62477         struct btrfs_block_group *block_group;
62478         struct rb_node *node;
62479 -       int ret;
62480 +       int ret = 0;
62482         btrfs_info(fs_info, "cleaning free space cache v1");
62484 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
62485 index a520775949a0..81b93c9c659b 100644
62486 --- a/fs/btrfs/inode.c
62487 +++ b/fs/btrfs/inode.c
62488 @@ -3253,6 +3253,7 @@ void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
62489                 inode = list_first_entry(&fs_info->delayed_iputs,
62490                                 struct btrfs_inode, delayed_iput);
62491                 run_delayed_iput_locked(fs_info, inode);
62492 +               cond_resched_lock(&fs_info->delayed_iput_lock);
62493         }
62494         spin_unlock(&fs_info->delayed_iput_lock);
62496 @@ -8619,9 +8620,7 @@ vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
62497         set_page_dirty(page);
62498         SetPageUptodate(page);
62500 -       BTRFS_I(inode)->last_trans = fs_info->generation;
62501 -       BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
62502 -       BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
62503 +       btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
62505         unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
62507 @@ -9674,7 +9673,7 @@ static int start_delalloc_inodes(struct btrfs_root *root,
62508         return ret;
62511 -int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
62512 +int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
62514         struct writeback_control wbc = {
62515                 .nr_to_write = LONG_MAX,
62516 @@ -9687,7 +9686,7 @@ int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
62517         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
62518                 return -EROFS;
62520 -       return start_delalloc_inodes(root, &wbc, true, false);
62521 +       return start_delalloc_inodes(root, &wbc, true, in_reclaim_context);
62524  int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
62525 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
62526 index e8d53fea4c61..f9ecb6c0bf15 100644
62527 --- a/fs/btrfs/ioctl.c
62528 +++ b/fs/btrfs/ioctl.c
62529 @@ -226,7 +226,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
62530         if (ret)
62531                 return ret;
62533 -       inode_lock(inode);
62534 +       btrfs_inode_lock(inode, 0);
62535         fsflags = btrfs_mask_fsflags_for_type(inode, fsflags);
62536         old_fsflags = btrfs_inode_flags_to_fsflags(binode->flags);
62538 @@ -353,7 +353,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
62539   out_end_trans:
62540         btrfs_end_transaction(trans);
62541   out_unlock:
62542 -       inode_unlock(inode);
62543 +       btrfs_inode_unlock(inode, 0);
62544         mnt_drop_write_file(file);
62545         return ret;
62547 @@ -449,7 +449,7 @@ static int btrfs_ioctl_fssetxattr(struct file *file, void __user *arg)
62548         if (ret)
62549                 return ret;
62551 -       inode_lock(inode);
62552 +       btrfs_inode_lock(inode, 0);
62554         old_flags = binode->flags;
62555         old_i_flags = inode->i_flags;
62556 @@ -501,7 +501,7 @@ static int btrfs_ioctl_fssetxattr(struct file *file, void __user *arg)
62557                 inode->i_flags = old_i_flags;
62558         }
62560 -       inode_unlock(inode);
62561 +       btrfs_inode_unlock(inode, 0);
62562         mnt_drop_write_file(file);
62564         return ret;
62565 @@ -697,8 +697,6 @@ static noinline int create_subvol(struct inode *dir,
62566         btrfs_set_root_otransid(root_item, trans->transid);
62568         btrfs_tree_unlock(leaf);
62569 -       free_extent_buffer(leaf);
62570 -       leaf = NULL;
62572         btrfs_set_root_dirid(root_item, BTRFS_FIRST_FREE_OBJECTID);
62574 @@ -707,8 +705,22 @@ static noinline int create_subvol(struct inode *dir,
62575         key.type = BTRFS_ROOT_ITEM_KEY;
62576         ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
62577                                 root_item);
62578 -       if (ret)
62579 +       if (ret) {
62580 +               /*
62581 +                * Since we don't abort the transaction in this case, free the
62582 +                * tree block so that we don't leak space and leave the
62583 +                * filesystem in an inconsistent state (an extent item in the
62584 +                * extent tree without backreferences). Also no need to have
62585 +                * the tree block locked since it is not in any tree at this
62586 +                * point, so no other task can find it and use it.
62587 +                */
62588 +               btrfs_free_tree_block(trans, root, leaf, 0, 1);
62589 +               free_extent_buffer(leaf);
62590                 goto fail;
62591 +       }
62593 +       free_extent_buffer(leaf);
62594 +       leaf = NULL;
62596         key.offset = (u64)-1;
62597         new_root = btrfs_get_new_fs_root(fs_info, objectid, anon_dev);
62598 @@ -1014,7 +1026,7 @@ static noinline int btrfs_mksubvol(const struct path *parent,
62599  out_dput:
62600         dput(dentry);
62601  out_unlock:
62602 -       inode_unlock(dir);
62603 +       btrfs_inode_unlock(dir, 0);
62604         return error;
62607 @@ -1034,7 +1046,7 @@ static noinline int btrfs_mksnapshot(const struct path *parent,
62608          */
62609         btrfs_drew_read_lock(&root->snapshot_lock);
62611 -       ret = btrfs_start_delalloc_snapshot(root);
62612 +       ret = btrfs_start_delalloc_snapshot(root, false);
62613         if (ret)
62614                 goto out;
62616 @@ -1612,7 +1624,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
62617                         ra_index += cluster;
62618                 }
62620 -               inode_lock(inode);
62621 +               btrfs_inode_lock(inode, 0);
62622                 if (IS_SWAPFILE(inode)) {
62623                         ret = -ETXTBSY;
62624                 } else {
62625 @@ -1621,13 +1633,13 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
62626                         ret = cluster_pages_for_defrag(inode, pages, i, cluster);
62627                 }
62628                 if (ret < 0) {
62629 -                       inode_unlock(inode);
62630 +                       btrfs_inode_unlock(inode, 0);
62631                         goto out_ra;
62632                 }
62634                 defrag_count += ret;
62635                 balance_dirty_pages_ratelimited(inode->i_mapping);
62636 -               inode_unlock(inode);
62637 +               btrfs_inode_unlock(inode, 0);
62639                 if (newer_than) {
62640                         if (newer_off == (u64)-1)
62641 @@ -1675,9 +1687,9 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
62643  out_ra:
62644         if (do_compress) {
62645 -               inode_lock(inode);
62646 +               btrfs_inode_lock(inode, 0);
62647                 BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE;
62648 -               inode_unlock(inode);
62649 +               btrfs_inode_unlock(inode, 0);
62650         }
62651         if (!file)
62652                 kfree(ra);
62653 @@ -3112,9 +3124,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
62654                 goto out_dput;
62655         }
62657 -       inode_lock(inode);
62658 +       btrfs_inode_lock(inode, 0);
62659         err = btrfs_delete_subvolume(dir, dentry);
62660 -       inode_unlock(inode);
62661 +       btrfs_inode_unlock(inode, 0);
62662         if (!err) {
62663                 fsnotify_rmdir(dir, dentry);
62664                 d_delete(dentry);
62665 @@ -3123,7 +3135,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
62666  out_dput:
62667         dput(dentry);
62668  out_unlock_dir:
62669 -       inode_unlock(dir);
62670 +       btrfs_inode_unlock(dir, 0);
62671  free_subvol_name:
62672         kfree(subvol_name_ptr);
62673  free_parent:
62674 diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
62675 index 985a21558437..043e3fa961e0 100644
62676 --- a/fs/btrfs/ordered-data.c
62677 +++ b/fs/btrfs/ordered-data.c
62678 @@ -995,7 +995,7 @@ int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
62680         if (pre)
62681                 ret = clone_ordered_extent(ordered, 0, pre);
62682 -       if (post)
62683 +       if (ret == 0 && post)
62684                 ret = clone_ordered_extent(ordered, pre + ordered->disk_num_bytes,
62685                                            post);
62687 diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
62688 index f0b9ef13153a..2991287a71a8 100644
62689 --- a/fs/btrfs/qgroup.c
62690 +++ b/fs/btrfs/qgroup.c
62691 @@ -3579,7 +3579,7 @@ static int try_flush_qgroup(struct btrfs_root *root)
62692                 return 0;
62693         }
62695 -       ret = btrfs_start_delalloc_snapshot(root);
62696 +       ret = btrfs_start_delalloc_snapshot(root, true);
62697         if (ret < 0)
62698                 goto out;
62699         btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
62700 diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
62701 index 762881b777b3..0abbf050580d 100644
62702 --- a/fs/btrfs/reflink.c
62703 +++ b/fs/btrfs/reflink.c
62704 @@ -833,7 +833,7 @@ loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
62705                 return -EINVAL;
62707         if (same_inode)
62708 -               inode_lock(src_inode);
62709 +               btrfs_inode_lock(src_inode, 0);
62710         else
62711                 lock_two_nondirectories(src_inode, dst_inode);
62713 @@ -849,7 +849,7 @@ loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
62715  out_unlock:
62716         if (same_inode)
62717 -               inode_unlock(src_inode);
62718 +               btrfs_inode_unlock(src_inode, 0);
62719         else
62720                 unlock_two_nondirectories(src_inode, dst_inode);
62722 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
62723 index 232d5da7b7be..829dc8dcc151 100644
62724 --- a/fs/btrfs/relocation.c
62725 +++ b/fs/btrfs/relocation.c
62726 @@ -733,10 +733,12 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
62727         struct extent_buffer *eb;
62728         struct btrfs_root_item *root_item;
62729         struct btrfs_key root_key;
62730 -       int ret;
62731 +       int ret = 0;
62732 +       bool must_abort = false;
62734         root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
62735 -       BUG_ON(!root_item);
62736 +       if (!root_item)
62737 +               return ERR_PTR(-ENOMEM);
62739         root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
62740         root_key.type = BTRFS_ROOT_ITEM_KEY;
62741 @@ -748,7 +750,9 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
62742                 /* called by btrfs_init_reloc_root */
62743                 ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
62744                                       BTRFS_TREE_RELOC_OBJECTID);
62745 -               BUG_ON(ret);
62746 +               if (ret)
62747 +                       goto fail;
62749                 /*
62750                  * Set the last_snapshot field to the generation of the commit
62751                  * root - like this ctree.c:btrfs_block_can_be_shared() behaves
62752 @@ -769,9 +773,16 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
62753                  */
62754                 ret = btrfs_copy_root(trans, root, root->node, &eb,
62755                                       BTRFS_TREE_RELOC_OBJECTID);
62756 -               BUG_ON(ret);
62757 +               if (ret)
62758 +                       goto fail;
62759         }
62761 +       /*
62762 +        * We have changed references at this point, we must abort the
62763 +        * transaction if anything fails.
62764 +        */
62765 +       must_abort = true;
62767         memcpy(root_item, &root->root_item, sizeof(*root_item));
62768         btrfs_set_root_bytenr(root_item, eb->start);
62769         btrfs_set_root_level(root_item, btrfs_header_level(eb));
62770 @@ -789,14 +800,25 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
62772         ret = btrfs_insert_root(trans, fs_info->tree_root,
62773                                 &root_key, root_item);
62774 -       BUG_ON(ret);
62775 +       if (ret)
62776 +               goto fail;
62778         kfree(root_item);
62780         reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
62781 -       BUG_ON(IS_ERR(reloc_root));
62782 +       if (IS_ERR(reloc_root)) {
62783 +               ret = PTR_ERR(reloc_root);
62784 +               goto abort;
62785 +       }
62786         set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
62787         reloc_root->last_trans = trans->transid;
62788         return reloc_root;
62789 +fail:
62790 +       kfree(root_item);
62791 +abort:
62792 +       if (must_abort)
62793 +               btrfs_abort_transaction(trans, ret);
62794 +       return ERR_PTR(ret);
62797  /*
62798 @@ -875,7 +897,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
62799         int ret;
62801         if (!have_reloc_root(root))
62802 -               goto out;
62803 +               return 0;
62805         reloc_root = root->reloc_root;
62806         root_item = &reloc_root->root_item;
62807 @@ -908,10 +930,8 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
62809         ret = btrfs_update_root(trans, fs_info->tree_root,
62810                                 &reloc_root->root_key, root_item);
62811 -       BUG_ON(ret);
62812         btrfs_put_root(reloc_root);
62813 -out:
62814 -       return 0;
62815 +       return ret;
62818  /*
62819 @@ -1185,8 +1205,8 @@ int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
62820         int ret;
62821         int slot;
62823 -       BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
62824 -       BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
62825 +       ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
62826 +       ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
62828         last_snapshot = btrfs_root_last_snapshot(&src->root_item);
62829  again:
62830 @@ -1217,7 +1237,7 @@ int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
62831         parent = eb;
62832         while (1) {
62833                 level = btrfs_header_level(parent);
62834 -               BUG_ON(level < lowest_level);
62835 +               ASSERT(level >= lowest_level);
62837                 ret = btrfs_bin_search(parent, &key, &slot);
62838                 if (ret < 0)
62839 @@ -2578,7 +2598,7 @@ static noinline_for_stack int prealloc_file_extent_cluster(
62840                 return btrfs_end_transaction(trans);
62841         }
62843 -       inode_lock(&inode->vfs_inode);
62844 +       btrfs_inode_lock(&inode->vfs_inode, 0);
62845         for (nr = 0; nr < cluster->nr; nr++) {
62846                 start = cluster->boundary[nr] - offset;
62847                 if (nr + 1 < cluster->nr)
62848 @@ -2596,7 +2616,7 @@ static noinline_for_stack int prealloc_file_extent_cluster(
62849                 if (ret)
62850                         break;
62851         }
62852 -       inode_unlock(&inode->vfs_inode);
62853 +       btrfs_inode_unlock(&inode->vfs_inode, 0);
62855         if (cur_offset < prealloc_end)
62856                 btrfs_free_reserved_data_space_noquota(inode->root->fs_info,
62857 diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
62858 index 3d9088eab2fc..b9202a1f1af1 100644
62859 --- a/fs/btrfs/scrub.c
62860 +++ b/fs/btrfs/scrub.c
62861 @@ -3682,8 +3682,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
62862                         spin_lock(&cache->lock);
62863                         if (!cache->to_copy) {
62864                                 spin_unlock(&cache->lock);
62865 -                               ro_set = 0;
62866 -                               goto done;
62867 +                               btrfs_put_block_group(cache);
62868 +                               goto skip;
62869                         }
62870                         spin_unlock(&cache->lock);
62871                 }
62872 @@ -3841,7 +3841,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
62873                                                       cache, found_key.offset))
62874                         ro_set = 0;
62876 -done:
62877                 down_write(&dev_replace->rwsem);
62878                 dev_replace->cursor_left = dev_replace->cursor_right;
62879                 dev_replace->item_needs_writeback = 1;
62880 diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
62881 index 8f323859156b..8ae8f1732fd2 100644
62882 --- a/fs/btrfs/send.c
62883 +++ b/fs/btrfs/send.c
62884 @@ -7139,7 +7139,7 @@ static int flush_delalloc_roots(struct send_ctx *sctx)
62885         int i;
62887         if (root) {
62888 -               ret = btrfs_start_delalloc_snapshot(root);
62889 +               ret = btrfs_start_delalloc_snapshot(root, false);
62890                 if (ret)
62891                         return ret;
62892                 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
62893 @@ -7147,7 +7147,7 @@ static int flush_delalloc_roots(struct send_ctx *sctx)
62895         for (i = 0; i < sctx->clone_roots_cnt; i++) {
62896                 root = sctx->clone_roots[i].root;
62897 -               ret = btrfs_start_delalloc_snapshot(root);
62898 +               ret = btrfs_start_delalloc_snapshot(root, false);
62899                 if (ret)
62900                         return ret;
62901                 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
62902 diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
62903 index acff6bb49a97..d56d3e7ca324 100644
62904 --- a/fs/btrfs/transaction.c
62905 +++ b/fs/btrfs/transaction.c
62906 @@ -260,6 +260,7 @@ static inline int extwriter_counter_read(struct btrfs_transaction *trans)
62907  void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
62909         struct btrfs_fs_info *fs_info = trans->fs_info;
62910 +       struct btrfs_transaction *cur_trans = trans->transaction;
62912         if (!trans->chunk_bytes_reserved)
62913                 return;
62914 @@ -268,6 +269,8 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
62916         btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv,
62917                                 trans->chunk_bytes_reserved, NULL);
62918 +       atomic64_sub(trans->chunk_bytes_reserved, &cur_trans->chunk_bytes_reserved);
62919 +       cond_wake_up(&cur_trans->chunk_reserve_wait);
62920         trans->chunk_bytes_reserved = 0;
62923 @@ -383,6 +386,8 @@ static noinline int join_transaction(struct btrfs_fs_info *fs_info,
62924         spin_lock_init(&cur_trans->dropped_roots_lock);
62925         INIT_LIST_HEAD(&cur_trans->releasing_ebs);
62926         spin_lock_init(&cur_trans->releasing_ebs_lock);
62927 +       atomic64_set(&cur_trans->chunk_bytes_reserved, 0);
62928 +       init_waitqueue_head(&cur_trans->chunk_reserve_wait);
62929         list_add_tail(&cur_trans->list, &fs_info->trans_list);
62930         extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
62931                         IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode);
62932 @@ -1961,7 +1966,6 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
62933          */
62934         BUG_ON(list_empty(&cur_trans->list));
62936 -       list_del_init(&cur_trans->list);
62937         if (cur_trans == fs_info->running_transaction) {
62938                 cur_trans->state = TRANS_STATE_COMMIT_DOING;
62939                 spin_unlock(&fs_info->trans_lock);
62940 @@ -1970,6 +1974,17 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
62942                 spin_lock(&fs_info->trans_lock);
62943         }
62945 +       /*
62946 +        * Now that we know no one else is still using the transaction we can
62947 +        * remove the transaction from the list of transactions. This avoids
62948 +        * the transaction kthread from cleaning up the transaction while some
62949 +        * other task is still using it, which could result in a use-after-free
62950 +        * on things like log trees, as it forces the transaction kthread to
62951 +        * wait for this transaction to be cleaned up by us.
62952 +        */
62953 +       list_del_init(&cur_trans->list);
62955         spin_unlock(&fs_info->trans_lock);
62957         btrfs_cleanup_one_transaction(trans->transaction, fs_info);
62958 diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
62959 index 6335716e513f..364cfbb4c5c5 100644
62960 --- a/fs/btrfs/transaction.h
62961 +++ b/fs/btrfs/transaction.h
62962 @@ -96,6 +96,13 @@ struct btrfs_transaction {
62964         spinlock_t releasing_ebs_lock;
62965         struct list_head releasing_ebs;
62967 +       /*
62968 +        * The number of bytes currently reserved, by all transaction handles
62969 +        * attached to this transaction, for metadata extents of the chunk tree.
62970 +        */
62971 +       atomic64_t chunk_bytes_reserved;
62972 +       wait_queue_head_t chunk_reserve_wait;
62973  };
62975  #define __TRANS_FREEZABLE      (1U << 0)
62976 @@ -175,7 +182,7 @@ static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
62977         spin_lock(&inode->lock);
62978         inode->last_trans = trans->transaction->transid;
62979         inode->last_sub_trans = inode->root->log_transid;
62980 -       inode->last_log_commit = inode->root->last_log_commit;
62981 +       inode->last_log_commit = inode->last_sub_trans - 1;
62982         spin_unlock(&inode->lock);
62985 diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
62986 index 92a368627791..53624fca0747 100644
62987 --- a/fs/btrfs/tree-log.c
62988 +++ b/fs/btrfs/tree-log.c
62989 @@ -3165,20 +3165,22 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
62990          */
62991         mutex_unlock(&root->log_mutex);
62993 -       btrfs_init_log_ctx(&root_log_ctx, NULL);
62995 -       mutex_lock(&log_root_tree->log_mutex);
62997         if (btrfs_is_zoned(fs_info)) {
62998 +               mutex_lock(&fs_info->tree_root->log_mutex);
62999                 if (!log_root_tree->node) {
63000                         ret = btrfs_alloc_log_tree_node(trans, log_root_tree);
63001                         if (ret) {
63002 -                               mutex_unlock(&log_root_tree->log_mutex);
63003 +                               mutex_unlock(&fs_info->tree_log_mutex);
63004                                 goto out;
63005                         }
63006                 }
63007 +               mutex_unlock(&fs_info->tree_root->log_mutex);
63008         }
63010 +       btrfs_init_log_ctx(&root_log_ctx, NULL);
63012 +       mutex_lock(&log_root_tree->log_mutex);
63014         index2 = log_root_tree->log_transid % 2;
63015         list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
63016         root_log_ctx.log_transid = log_root_tree->log_transid;
63017 @@ -6058,7 +6060,8 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
63018          * (since logging them is pointless, a link count of 0 means they
63019          * will never be accessible).
63020          */
63021 -       if (btrfs_inode_in_log(inode, trans->transid) ||
63022 +       if ((btrfs_inode_in_log(inode, trans->transid) &&
63023 +            list_empty(&ctx->ordered_extents)) ||
63024             inode->vfs_inode.i_nlink == 0) {
63025                 ret = BTRFS_NO_LOG_SYNC;
63026                 goto end_no_trans;
63027 @@ -6454,6 +6457,24 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
63028             (!old_dir || old_dir->logged_trans < trans->transid))
63029                 return;
63031 +       /*
63032 +        * If we are doing a rename (old_dir is not NULL) from a directory that
63033 +        * was previously logged, make sure the next log attempt on the directory
63034 +        * is not skipped and logs the inode again. This is because the log may
63035 +        * not currently be authoritative for a range including the old
63036 +        * BTRFS_DIR_ITEM_KEY and BTRFS_DIR_INDEX_KEY keys, so we want to make
63037 +        * sure after a log replay we do not end up with both the new and old
63038 +        * dentries around (in case the inode is a directory we would have a
63039 +        * directory with two hard links and 2 inode references for different
63040 +        * parents). The next log attempt of old_dir will happen at
63041 +        * btrfs_log_all_parents(), called through btrfs_log_inode_parent()
63042 +        * below, because we have previously set inode->last_unlink_trans to the
63043 +        * current transaction ID, either here or at btrfs_record_unlink_dir() in
63044 +        * case inode is a directory.
63045 +        */
63046 +       if (old_dir)
63047 +               old_dir->logged_trans = 0;
63049         btrfs_init_log_ctx(&ctx, &inode->vfs_inode);
63050         ctx.logging_new_name = true;
63051         /*
63052 diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
63053 index 1c6810bbaf8b..3912eda7905f 100644
63054 --- a/fs/btrfs/volumes.c
63055 +++ b/fs/btrfs/volumes.c
63056 @@ -4989,6 +4989,8 @@ static void init_alloc_chunk_ctl_policy_zoned(
63057                 ctl->max_chunk_size = 2 * ctl->max_stripe_size;
63058                 ctl->devs_max = min_t(int, ctl->devs_max,
63059                                       BTRFS_MAX_DEVS_SYS_CHUNK);
63060 +       } else {
63061 +               BUG();
63062         }
63064         /* We don't want a chunk larger than 10% of writable space */
63065 diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
63066 index eeb3ebe11d7a..304ce64c70a4 100644
63067 --- a/fs/btrfs/zoned.c
63068 +++ b/fs/btrfs/zoned.c
63069 @@ -342,6 +342,13 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
63070         if (!IS_ALIGNED(nr_sectors, zone_sectors))
63071                 zone_info->nr_zones++;
63073 +       if (bdev_is_zoned(bdev) && zone_info->max_zone_append_size == 0) {
63074 +               btrfs_err(fs_info, "zoned: device %pg does not support zone append",
63075 +                         bdev);
63076 +               ret = -EINVAL;
63077 +               goto out;
63078 +       }
63080         zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
63081         if (!zone_info->seq_zones) {
63082                 ret = -ENOMEM;
63083 @@ -1119,6 +1126,11 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
63084                         goto out;
63085                 }
63087 +               if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
63088 +                       ret = -EIO;
63089 +                       goto out;
63090 +               }
63092                 switch (zone.cond) {
63093                 case BLK_ZONE_COND_OFFLINE:
63094                 case BLK_ZONE_COND_READONLY:
63095 diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
63096 index 8e9626d63976..14418b02c189 100644
63097 --- a/fs/btrfs/zstd.c
63098 +++ b/fs/btrfs/zstd.c
63099 @@ -28,10 +28,10 @@
63100  /* 307s to avoid pathologically clashing with transaction commit */
63101  #define ZSTD_BTRFS_RECLAIM_JIFFIES (307 * HZ)
63103 -static ZSTD_parameters zstd_get_btrfs_parameters(unsigned int level,
63104 +static zstd_parameters zstd_get_btrfs_parameters(unsigned int level,
63105                                                  size_t src_len)
63107 -       ZSTD_parameters params = ZSTD_getParams(level, src_len, 0);
63108 +       zstd_parameters params = zstd_get_params(level, src_len);
63110         if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG)
63111                 params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG;
63112 @@ -48,8 +48,8 @@ struct workspace {
63113         unsigned long last_used; /* jiffies */
63114         struct list_head list;
63115         struct list_head lru_list;
63116 -       ZSTD_inBuffer in_buf;
63117 -       ZSTD_outBuffer out_buf;
63118 +       zstd_in_buffer in_buf;
63119 +       zstd_out_buffer out_buf;
63120  };
63122  /*
63123 @@ -155,12 +155,12 @@ static void zstd_calc_ws_mem_sizes(void)
63124         unsigned int level;
63126         for (level = 1; level <= ZSTD_BTRFS_MAX_LEVEL; level++) {
63127 -               ZSTD_parameters params =
63128 +               zstd_parameters params =
63129                         zstd_get_btrfs_parameters(level, ZSTD_BTRFS_MAX_INPUT);
63130                 size_t level_size =
63131                         max_t(size_t,
63132 -                             ZSTD_CStreamWorkspaceBound(params.cParams),
63133 -                             ZSTD_DStreamWorkspaceBound(ZSTD_BTRFS_MAX_INPUT));
63134 +                             zstd_cstream_workspace_bound(&params.cParams),
63135 +                             zstd_dstream_workspace_bound(ZSTD_BTRFS_MAX_INPUT));
63137                 max_size = max_t(size_t, max_size, level_size);
63138                 zstd_ws_mem_sizes[level - 1] = max_size;
63139 @@ -371,7 +371,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
63140                 unsigned long *total_in, unsigned long *total_out)
63142         struct workspace *workspace = list_entry(ws, struct workspace, list);
63143 -       ZSTD_CStream *stream;
63144 +       zstd_cstream *stream;
63145         int ret = 0;
63146         int nr_pages = 0;
63147         struct page *in_page = NULL;  /* The current page to read */
63148 @@ -381,7 +381,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
63149         unsigned long len = *total_out;
63150         const unsigned long nr_dest_pages = *out_pages;
63151         unsigned long max_out = nr_dest_pages * PAGE_SIZE;
63152 -       ZSTD_parameters params = zstd_get_btrfs_parameters(workspace->req_level,
63153 +       zstd_parameters params = zstd_get_btrfs_parameters(workspace->req_level,
63154                                                            len);
63156         *out_pages = 0;
63157 @@ -389,10 +389,10 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
63158         *total_in = 0;
63160         /* Initialize the stream */
63161 -       stream = ZSTD_initCStream(params, len, workspace->mem,
63162 +       stream = zstd_init_cstream(&params, len, workspace->mem,
63163                         workspace->size);
63164         if (!stream) {
63165 -               pr_warn("BTRFS: ZSTD_initCStream failed\n");
63166 +               pr_warn("BTRFS: zstd_init_cstream failed\n");
63167                 ret = -EIO;
63168                 goto out;
63169         }
63170 @@ -418,11 +418,11 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
63171         while (1) {
63172                 size_t ret2;
63174 -               ret2 = ZSTD_compressStream(stream, &workspace->out_buf,
63175 +               ret2 = zstd_compress_stream(stream, &workspace->out_buf,
63176                                 &workspace->in_buf);
63177 -               if (ZSTD_isError(ret2)) {
63178 -                       pr_debug("BTRFS: ZSTD_compressStream returned %d\n",
63179 -                                       ZSTD_getErrorCode(ret2));
63180 +               if (zstd_is_error(ret2)) {
63181 +                       pr_debug("BTRFS: zstd_compress_stream returned %d\n",
63182 +                                       zstd_get_error_code(ret2));
63183                         ret = -EIO;
63184                         goto out;
63185                 }
63186 @@ -487,10 +487,10 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
63187         while (1) {
63188                 size_t ret2;
63190 -               ret2 = ZSTD_endStream(stream, &workspace->out_buf);
63191 -               if (ZSTD_isError(ret2)) {
63192 -                       pr_debug("BTRFS: ZSTD_endStream returned %d\n",
63193 -                                       ZSTD_getErrorCode(ret2));
63194 +               ret2 = zstd_end_stream(stream, &workspace->out_buf);
63195 +               if (zstd_is_error(ret2)) {
63196 +                       pr_debug("BTRFS: zstd_end_stream returned %d\n",
63197 +                                       zstd_get_error_code(ret2));
63198                         ret = -EIO;
63199                         goto out;
63200                 }
63201 @@ -550,17 +550,17 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
63202         u64 disk_start = cb->start;
63203         struct bio *orig_bio = cb->orig_bio;
63204         size_t srclen = cb->compressed_len;
63205 -       ZSTD_DStream *stream;
63206 +       zstd_dstream *stream;
63207         int ret = 0;
63208         unsigned long page_in_index = 0;
63209         unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
63210         unsigned long buf_start;
63211         unsigned long total_out = 0;
63213 -       stream = ZSTD_initDStream(
63214 +       stream = zstd_init_dstream(
63215                         ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
63216         if (!stream) {
63217 -               pr_debug("BTRFS: ZSTD_initDStream failed\n");
63218 +               pr_debug("BTRFS: zstd_init_dstream failed\n");
63219                 ret = -EIO;
63220                 goto done;
63221         }
63222 @@ -576,11 +576,11 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
63223         while (1) {
63224                 size_t ret2;
63226 -               ret2 = ZSTD_decompressStream(stream, &workspace->out_buf,
63227 +               ret2 = zstd_decompress_stream(stream, &workspace->out_buf,
63228                                 &workspace->in_buf);
63229 -               if (ZSTD_isError(ret2)) {
63230 -                       pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
63231 -                                       ZSTD_getErrorCode(ret2));
63232 +               if (zstd_is_error(ret2)) {
63233 +                       pr_debug("BTRFS: zstd_decompress_stream returned %d\n",
63234 +                                       zstd_get_error_code(ret2));
63235                         ret = -EIO;
63236                         goto done;
63237                 }
63238 @@ -626,17 +626,17 @@ int zstd_decompress(struct list_head *ws, unsigned char *data_in,
63239                 size_t destlen)
63241         struct workspace *workspace = list_entry(ws, struct workspace, list);
63242 -       ZSTD_DStream *stream;
63243 +       zstd_dstream *stream;
63244         int ret = 0;
63245         size_t ret2;
63246         unsigned long total_out = 0;
63247         unsigned long pg_offset = 0;
63248         char *kaddr;
63250 -       stream = ZSTD_initDStream(
63251 +       stream = zstd_init_dstream(
63252                         ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
63253         if (!stream) {
63254 -               pr_warn("BTRFS: ZSTD_initDStream failed\n");
63255 +               pr_warn("BTRFS: zstd_init_dstream failed\n");
63256                 ret = -EIO;
63257                 goto finish;
63258         }
63259 @@ -660,15 +660,15 @@ int zstd_decompress(struct list_head *ws, unsigned char *data_in,
63261                 /* Check if the frame is over and we still need more input */
63262                 if (ret2 == 0) {
63263 -                       pr_debug("BTRFS: ZSTD_decompressStream ended early\n");
63264 +                       pr_debug("BTRFS: zstd_decompress_stream ended early\n");
63265                         ret = -EIO;
63266                         goto finish;
63267                 }
63268 -               ret2 = ZSTD_decompressStream(stream, &workspace->out_buf,
63269 +               ret2 = zstd_decompress_stream(stream, &workspace->out_buf,
63270                                 &workspace->in_buf);
63271 -               if (ZSTD_isError(ret2)) {
63272 -                       pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
63273 -                                       ZSTD_getErrorCode(ret2));
63274 +               if (zstd_is_error(ret2)) {
63275 +                       pr_debug("BTRFS: zstd_decompress_stream returned %d\n",
63276 +                                       zstd_get_error_code(ret2));
63277                         ret = -EIO;
63278                         goto finish;
63279                 }
63280 diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
63281 index 570731c4d019..d405ba801492 100644
63282 --- a/fs/ceph/caps.c
63283 +++ b/fs/ceph/caps.c
63284 @@ -1867,6 +1867,7 @@ static int try_nonblocking_invalidate(struct inode *inode)
63285         u32 invalidating_gen = ci->i_rdcache_gen;
63287         spin_unlock(&ci->i_ceph_lock);
63288 +       ceph_fscache_invalidate(inode);
63289         invalidate_mapping_pages(&inode->i_data, 0, -1);
63290         spin_lock(&ci->i_ceph_lock);
63292 diff --git a/fs/ceph/export.c b/fs/ceph/export.c
63293 index e088843a7734..042bb4a02c0a 100644
63294 --- a/fs/ceph/export.c
63295 +++ b/fs/ceph/export.c
63296 @@ -129,6 +129,10 @@ static struct inode *__lookup_inode(struct super_block *sb, u64 ino)
63298         vino.ino = ino;
63299         vino.snap = CEPH_NOSNAP;
63301 +       if (ceph_vino_is_reserved(vino))
63302 +               return ERR_PTR(-ESTALE);
63304         inode = ceph_find_inode(sb, vino);
63305         if (!inode) {
63306                 struct ceph_mds_request *req;
63307 @@ -178,8 +182,10 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
63308                 return ERR_CAST(inode);
63309         /* We need LINK caps to reliably check i_nlink */
63310         err = ceph_do_getattr(inode, CEPH_CAP_LINK_SHARED, false);
63311 -       if (err)
63312 +       if (err) {
63313 +               iput(inode);
63314                 return ERR_PTR(err);
63315 +       }
63316         /* -ESTALE if inode as been unlinked and no file is open */
63317         if ((inode->i_nlink == 0) && (atomic_read(&inode->i_count) == 1)) {
63318                 iput(inode);
63319 @@ -212,6 +218,10 @@ static struct dentry *__snapfh_to_dentry(struct super_block *sb,
63320                 vino.ino = sfh->ino;
63321                 vino.snap = sfh->snapid;
63322         }
63324 +       if (ceph_vino_is_reserved(vino))
63325 +               return ERR_PTR(-ESTALE);
63327         inode = ceph_find_inode(sb, vino);
63328         if (inode)
63329                 return d_obtain_alias(inode);
63330 diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
63331 index 156f849f5385..179d2ef69a24 100644
63332 --- a/fs/ceph/inode.c
63333 +++ b/fs/ceph/inode.c
63334 @@ -56,6 +56,9 @@ struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
63336         struct inode *inode;
63338 +       if (ceph_vino_is_reserved(vino))
63339 +               return ERR_PTR(-EREMOTEIO);
63341         inode = iget5_locked(sb, (unsigned long)vino.ino, ceph_ino_compare,
63342                              ceph_set_ino_cb, &vino);
63343         if (!inode)
63344 @@ -87,14 +90,15 @@ struct inode *ceph_get_snapdir(struct inode *parent)
63345         inode->i_mtime = parent->i_mtime;
63346         inode->i_ctime = parent->i_ctime;
63347         inode->i_atime = parent->i_atime;
63348 -       inode->i_op = &ceph_snapdir_iops;
63349 -       inode->i_fop = &ceph_snapdir_fops;
63350 -       ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
63351         ci->i_rbytes = 0;
63352         ci->i_btime = ceph_inode(parent)->i_btime;
63354 -       if (inode->i_state & I_NEW)
63355 +       if (inode->i_state & I_NEW) {
63356 +               inode->i_op = &ceph_snapdir_iops;
63357 +               inode->i_fop = &ceph_snapdir_fops;
63358 +               ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
63359                 unlock_new_inode(inode);
63360 +       }
63362         return inode;
63364 @@ -1863,6 +1867,7 @@ static void ceph_do_invalidate_pages(struct inode *inode)
63365         orig_gen = ci->i_rdcache_gen;
63366         spin_unlock(&ci->i_ceph_lock);
63368 +       ceph_fscache_invalidate(inode);
63369         if (invalidate_inode_pages2(inode->i_mapping) < 0) {
63370                 pr_err("invalidate_pages %p fails\n", inode);
63371         }
63372 diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
63373 index d87bd852ed96..298cb0b3d28c 100644
63374 --- a/fs/ceph/mds_client.c
63375 +++ b/fs/ceph/mds_client.c
63376 @@ -433,6 +433,13 @@ static int ceph_parse_deleg_inos(void **p, void *end,
63378                 ceph_decode_64_safe(p, end, start, bad);
63379                 ceph_decode_64_safe(p, end, len, bad);
63381 +               /* Don't accept a delegation of system inodes */
63382 +               if (start < CEPH_INO_SYSTEM_BASE) {
63383 +                       pr_warn_ratelimited("ceph: ignoring reserved inode range delegation (start=0x%llx len=0x%llx)\n",
63384 +                                       start, len);
63385 +                       continue;
63386 +               }
63387                 while (len--) {
63388                         int err = xa_insert(&s->s_delegated_inos, ino = start++,
63389                                             DELEGATED_INO_AVAILABLE,
63390 diff --git a/fs/ceph/super.h b/fs/ceph/super.h
63391 index c48bb30c8d70..1d2fe70439bd 100644
63392 --- a/fs/ceph/super.h
63393 +++ b/fs/ceph/super.h
63394 @@ -529,10 +529,34 @@ static inline int ceph_ino_compare(struct inode *inode, void *data)
63395                 ci->i_vino.snap == pvino->snap;
63399 + * The MDS reserves a set of inodes for its own usage. These should never
63400 + * be accessible by clients, and so the MDS has no reason to ever hand these
63401 + * out. The range is CEPH_MDS_INO_MDSDIR_OFFSET..CEPH_INO_SYSTEM_BASE.
63402 + *
63403 + * These come from src/mds/mdstypes.h in the ceph sources.
63404 + */
63405 +#define CEPH_MAX_MDS           0x100
63406 +#define CEPH_NUM_STRAY         10
63407 +#define CEPH_MDS_INO_MDSDIR_OFFSET     (1 * CEPH_MAX_MDS)
63408 +#define CEPH_INO_SYSTEM_BASE           ((6*CEPH_MAX_MDS) + (CEPH_MAX_MDS * CEPH_NUM_STRAY))
63410 +static inline bool ceph_vino_is_reserved(const struct ceph_vino vino)
63412 +       if (vino.ino < CEPH_INO_SYSTEM_BASE &&
63413 +           vino.ino >= CEPH_MDS_INO_MDSDIR_OFFSET) {
63414 +               WARN_RATELIMIT(1, "Attempt to access reserved inode number 0x%llx", vino.ino);
63415 +               return true;
63416 +       }
63417 +       return false;
63420  static inline struct inode *ceph_find_inode(struct super_block *sb,
63421                                             struct ceph_vino vino)
63423 +       if (ceph_vino_is_reserved(vino))
63424 +               return NULL;
63426         /*
63427          * NB: The hashval will be run through the fs/inode.c hash function
63428          * anyway, so there is no need to squash the inode number down to
63429 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
63430 index 5ddd20b62484..fa896a1c8b07 100644
63431 --- a/fs/cifs/cifsfs.c
63432 +++ b/fs/cifs/cifsfs.c
63433 @@ -834,7 +834,7 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
63434                 goto out;
63435         }
63437 -       rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, old_ctx->UNC);
63438 +       rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, NULL);
63439         if (rc) {
63440                 root = ERR_PTR(rc);
63441                 goto out;
63442 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
63443 index 24668eb006c6..3d62d52d730b 100644
63444 --- a/fs/cifs/connect.c
63445 +++ b/fs/cifs/connect.c
63446 @@ -488,6 +488,7 @@ server_unresponsive(struct TCP_Server_Info *server)
63447          */
63448         if ((server->tcpStatus == CifsGood ||
63449             server->tcpStatus == CifsNeedNegotiate) &&
63450 +           (!server->ops->can_echo || server->ops->can_echo(server)) &&
63451             time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
63452                 cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n",
63453                          (3 * server->echo_interval) / HZ);
63454 @@ -3175,17 +3176,29 @@ static int do_dfs_failover(const char *path, const char *full_path, struct cifs_
63455  int
63456  cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const char *devname)
63458 -       int rc = 0;
63459 +       int rc;
63461 -       smb3_parse_devname(devname, ctx);
63462 +       if (devname) {
63463 +               cifs_dbg(FYI, "%s: devname=%s\n", __func__, devname);
63464 +               rc = smb3_parse_devname(devname, ctx);
63465 +               if (rc) {
63466 +                       cifs_dbg(VFS, "%s: failed to parse %s: %d\n", __func__, devname, rc);
63467 +                       return rc;
63468 +               }
63469 +       }
63471         if (mntopts) {
63472                 char *ip;
63474 -               cifs_dbg(FYI, "%s: mntopts=%s\n", __func__, mntopts);
63475                 rc = smb3_parse_opt(mntopts, "ip", &ip);
63476 -               if (!rc && !cifs_convert_address((struct sockaddr *)&ctx->dstaddr, ip,
63477 -                                                strlen(ip))) {
63478 +               if (rc) {
63479 +                       cifs_dbg(VFS, "%s: failed to parse ip options: %d\n", __func__, rc);
63480 +                       return rc;
63481 +               }
63483 +               rc = cifs_convert_address((struct sockaddr *)&ctx->dstaddr, ip, strlen(ip));
63484 +               kfree(ip);
63485 +               if (!rc) {
63486                         cifs_dbg(VFS, "%s: failed to convert ip address\n", __func__);
63487                         return -EINVAL;
63488                 }
63489 @@ -3205,7 +3218,7 @@ cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const c
63490                 return -EINVAL;
63491         }
63493 -       return rc;
63494 +       return 0;
63497  static int
63498 diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
63499 index 78889024a7ed..a7253eb2e955 100644
63500 --- a/fs/cifs/fs_context.c
63501 +++ b/fs/cifs/fs_context.c
63502 @@ -475,6 +475,7 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
63504         /* move "pos" up to delimiter or NULL */
63505         pos += len;
63506 +       kfree(ctx->UNC);
63507         ctx->UNC = kstrndup(devname, pos - devname, GFP_KERNEL);
63508         if (!ctx->UNC)
63509                 return -ENOMEM;
63510 @@ -485,6 +486,9 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
63511         if (*pos == '/' || *pos == '\\')
63512                 pos++;
63514 +       kfree(ctx->prepath);
63515 +       ctx->prepath = NULL;
63517         /* If pos is NULL then no prepath */
63518         if (!*pos)
63519                 return 0;
63520 @@ -995,6 +999,9 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
63521                         goto cifs_parse_mount_err;
63522                 }
63523                 ctx->max_channels = result.uint_32;
63524 +               /* If more than one channel requested ... they want multichan */
63525 +               if (result.uint_32 > 1)
63526 +                       ctx->multichannel = true;
63527                 break;
63528         case Opt_handletimeout:
63529                 ctx->handle_timeout = result.uint_32;
63530 diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
63531 index 63d517b9f2ff..a92a1fb7cb52 100644
63532 --- a/fs/cifs/sess.c
63533 +++ b/fs/cifs/sess.c
63534 @@ -97,6 +97,12 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
63535                 return 0;
63536         }
63538 +       if (!(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
63539 +               cifs_dbg(VFS, "server %s does not support multichannel\n", ses->server->hostname);
63540 +               ses->chan_max = 1;
63541 +               return 0;
63542 +       }
63544         /*
63545          * Make a copy of the iface list at the time and use that
63546          * instead so as to not hold the iface spinlock for opening
63547 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
63548 index f703204fb185..e9a530da4255 100644
63549 --- a/fs/cifs/smb2ops.c
63550 +++ b/fs/cifs/smb2ops.c
63551 @@ -1763,18 +1763,14 @@ smb2_ioctl_query_info(const unsigned int xid,
63552         }
63554   iqinf_exit:
63555 -       kfree(vars);
63556 -       kfree(buffer);
63557 -       SMB2_open_free(&rqst[0]);
63558 -       if (qi.flags & PASSTHRU_FSCTL)
63559 -               SMB2_ioctl_free(&rqst[1]);
63560 -       else
63561 -               SMB2_query_info_free(&rqst[1]);
63563 -       SMB2_close_free(&rqst[2]);
63564 +       cifs_small_buf_release(rqst[0].rq_iov[0].iov_base);
63565 +       cifs_small_buf_release(rqst[1].rq_iov[0].iov_base);
63566 +       cifs_small_buf_release(rqst[2].rq_iov[0].iov_base);
63567         free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
63568         free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
63569         free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
63570 +       kfree(vars);
63571 +       kfree(buffer);
63572         return rc;
63574  e_fault:
63575 @@ -1826,6 +1822,8 @@ smb2_copychunk_range(const unsigned int xid,
63576                         cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
63578                 /* Request server copy to target from src identified by key */
63579 +               kfree(retbuf);
63580 +               retbuf = NULL;
63581                 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
63582                         trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
63583                         true /* is_fsctl */, (char *)pcchunk,
63584 @@ -2232,7 +2230,7 @@ smb3_notify(const unsigned int xid, struct file *pfile,
63586         cifs_sb = CIFS_SB(inode->i_sb);
63588 -       utf16_path = cifs_convert_path_to_utf16(path + 1, cifs_sb);
63589 +       utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
63590         if (utf16_path == NULL) {
63591                 rc = -ENOMEM;
63592                 goto notify_exit;
63593 @@ -4178,7 +4176,7 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
63594         }
63595         spin_unlock(&cifs_tcp_ses_lock);
63597 -       return 1;
63598 +       return -EAGAIN;
63600  /*
63601   * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
63602 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
63603 index 2199a9bfae8f..29272d99102c 100644
63604 --- a/fs/cifs/smb2pdu.c
63605 +++ b/fs/cifs/smb2pdu.c
63606 @@ -841,6 +841,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
63607                 req->SecurityMode = 0;
63609         req->Capabilities = cpu_to_le32(server->vals->req_capabilities);
63610 +       if (ses->chan_max > 1)
63611 +               req->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
63613         /* ClientGUID must be zero for SMB2.02 dialect */
63614         if (server->vals->protocol_id == SMB20_PROT_ID)
63615 @@ -1032,6 +1034,9 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
63617         pneg_inbuf->Capabilities =
63618                         cpu_to_le32(server->vals->req_capabilities);
63619 +       if (tcon->ses->chan_max > 1)
63620 +               pneg_inbuf->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
63622         memcpy(pneg_inbuf->Guid, server->client_guid,
63623                                         SMB2_CLIENT_GUID_SIZE);
63625 diff --git a/fs/dax.c b/fs/dax.c
63626 index b3d27fdc6775..df5485b4bddf 100644
63627 --- a/fs/dax.c
63628 +++ b/fs/dax.c
63629 @@ -144,6 +144,16 @@ struct wait_exceptional_entry_queue {
63630         struct exceptional_entry_key key;
63631  };
63633 +/**
63634 + * enum dax_wake_mode: waitqueue wakeup behaviour
63635 + * @WAKE_ALL: wake all waiters in the waitqueue
63636 + * @WAKE_NEXT: wake only the first waiter in the waitqueue
63637 + */
63638 +enum dax_wake_mode {
63639 +       WAKE_ALL,
63640 +       WAKE_NEXT,
63643  static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
63644                 void *entry, struct exceptional_entry_key *key)
63646 @@ -182,7 +192,8 @@ static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
63647   * The important information it's conveying is whether the entry at
63648   * this index used to be a PMD entry.
63649   */
63650 -static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
63651 +static void dax_wake_entry(struct xa_state *xas, void *entry,
63652 +                          enum dax_wake_mode mode)
63654         struct exceptional_entry_key key;
63655         wait_queue_head_t *wq;
63656 @@ -196,7 +207,7 @@ static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
63657          * must be in the waitqueue and the following check will see them.
63658          */
63659         if (waitqueue_active(wq))
63660 -               __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
63661 +               __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key);
63664  /*
63665 @@ -264,11 +275,11 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
63666         finish_wait(wq, &ewait.wait);
63669 -static void put_unlocked_entry(struct xa_state *xas, void *entry)
63670 +static void put_unlocked_entry(struct xa_state *xas, void *entry,
63671 +                              enum dax_wake_mode mode)
63673 -       /* If we were the only waiter woken, wake the next one */
63674         if (entry && !dax_is_conflict(entry))
63675 -               dax_wake_entry(xas, entry, false);
63676 +               dax_wake_entry(xas, entry, mode);
63679  /*
63680 @@ -286,7 +297,7 @@ static void dax_unlock_entry(struct xa_state *xas, void *entry)
63681         old = xas_store(xas, entry);
63682         xas_unlock_irq(xas);
63683         BUG_ON(!dax_is_locked(old));
63684 -       dax_wake_entry(xas, entry, false);
63685 +       dax_wake_entry(xas, entry, WAKE_NEXT);
63688  /*
63689 @@ -524,7 +535,7 @@ static void *grab_mapping_entry(struct xa_state *xas,
63691                 dax_disassociate_entry(entry, mapping, false);
63692                 xas_store(xas, NULL);   /* undo the PMD join */
63693 -               dax_wake_entry(xas, entry, true);
63694 +               dax_wake_entry(xas, entry, WAKE_ALL);
63695                 mapping->nrexceptional--;
63696                 entry = NULL;
63697                 xas_set(xas, index);
63698 @@ -622,7 +633,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping,
63699                         entry = get_unlocked_entry(&xas, 0);
63700                 if (entry)
63701                         page = dax_busy_page(entry);
63702 -               put_unlocked_entry(&xas, entry);
63703 +               put_unlocked_entry(&xas, entry, WAKE_NEXT);
63704                 if (page)
63705                         break;
63706                 if (++scanned % XA_CHECK_SCHED)
63707 @@ -664,7 +675,7 @@ static int __dax_invalidate_entry(struct address_space *mapping,
63708         mapping->nrexceptional--;
63709         ret = 1;
63710  out:
63711 -       put_unlocked_entry(&xas, entry);
63712 +       put_unlocked_entry(&xas, entry, WAKE_ALL);
63713         xas_unlock_irq(&xas);
63714         return ret;
63716 @@ -937,13 +948,13 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
63717         xas_lock_irq(xas);
63718         xas_store(xas, entry);
63719         xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
63720 -       dax_wake_entry(xas, entry, false);
63721 +       dax_wake_entry(xas, entry, WAKE_NEXT);
63723         trace_dax_writeback_one(mapping->host, index, count);
63724         return ret;
63726   put_unlocked:
63727 -       put_unlocked_entry(xas, entry);
63728 +       put_unlocked_entry(xas, entry, WAKE_NEXT);
63729         return ret;
63732 @@ -1684,7 +1695,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
63733         /* Did we race with someone splitting entry or so? */
63734         if (!entry || dax_is_conflict(entry) ||
63735             (order == 0 && !dax_is_pte_entry(entry))) {
63736 -               put_unlocked_entry(&xas, entry);
63737 +               put_unlocked_entry(&xas, entry, WAKE_NEXT);
63738                 xas_unlock_irq(&xas);
63739                 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
63740                                                       VM_FAULT_NOPAGE);
63741 diff --git a/fs/dcache.c b/fs/dcache.c
63742 index 7d24ff7eb206..9deb97404201 100644
63743 --- a/fs/dcache.c
63744 +++ b/fs/dcache.c
63745 @@ -71,7 +71,7 @@
63746   * If no ancestor relationship:
63747   * arbitrary, since it's serialized on rename_lock
63748   */
63749 -int sysctl_vfs_cache_pressure __read_mostly = 100;
63750 +int sysctl_vfs_cache_pressure __read_mostly = 50;
63751  EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
63753  __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
63754 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
63755 index 22e86ae4dd5a..1d252164d97b 100644
63756 --- a/fs/debugfs/inode.c
63757 +++ b/fs/debugfs/inode.c
63758 @@ -35,7 +35,7 @@
63759  static struct vfsmount *debugfs_mount;
63760  static int debugfs_mount_count;
63761  static bool debugfs_registered;
63762 -static unsigned int debugfs_allow = DEFAULT_DEBUGFS_ALLOW_BITS;
63763 +static unsigned int debugfs_allow __ro_after_init = DEFAULT_DEBUGFS_ALLOW_BITS;
63765  /*
63766   * Don't allow access attributes to be changed whilst the kernel is locked down
63767 diff --git a/fs/dlm/config.c b/fs/dlm/config.c
63768 index 49c5f9407098..88d95d96e36c 100644
63769 --- a/fs/dlm/config.c
63770 +++ b/fs/dlm/config.c
63771 @@ -125,7 +125,7 @@ static ssize_t cluster_cluster_name_store(struct config_item *item,
63772  CONFIGFS_ATTR(cluster_, cluster_name);
63774  static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
63775 -                          int *info_field, bool (*check_cb)(unsigned int x),
63776 +                          int *info_field, int (*check_cb)(unsigned int x),
63777                            const char *buf, size_t len)
63779         unsigned int x;
63780 @@ -137,8 +137,11 @@ static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
63781         if (rc)
63782                 return rc;
63784 -       if (check_cb && check_cb(x))
63785 -               return -EINVAL;
63786 +       if (check_cb) {
63787 +               rc = check_cb(x);
63788 +               if (rc)
63789 +                       return rc;
63790 +       }
63792         *cl_field = x;
63793         *info_field = x;
63794 @@ -161,17 +164,53 @@ static ssize_t cluster_##name##_show(struct config_item *item, char *buf)     \
63795  }                                                                             \
63796  CONFIGFS_ATTR(cluster_, name);
63798 -static bool dlm_check_zero(unsigned int x)
63799 +static int dlm_check_protocol_and_dlm_running(unsigned int x)
63801 +       switch (x) {
63802 +       case 0:
63803 +               /* TCP */
63804 +               break;
63805 +       case 1:
63806 +               /* SCTP */
63807 +               break;
63808 +       default:
63809 +               return -EINVAL;
63810 +       }
63812 +       if (dlm_allow_conn)
63813 +               return -EBUSY;
63815 +       return 0;
63818 +static int dlm_check_zero_and_dlm_running(unsigned int x)
63820 +       if (!x)
63821 +               return -EINVAL;
63823 +       if (dlm_allow_conn)
63824 +               return -EBUSY;
63826 +       return 0;
63829 +static int dlm_check_zero(unsigned int x)
63831 -       return !x;
63832 +       if (!x)
63833 +               return -EINVAL;
63835 +       return 0;
63838 -static bool dlm_check_buffer_size(unsigned int x)
63839 +static int dlm_check_buffer_size(unsigned int x)
63841 -       return (x < DEFAULT_BUFFER_SIZE);
63842 +       if (x < DEFAULT_BUFFER_SIZE)
63843 +               return -EINVAL;
63845 +       return 0;
63848 -CLUSTER_ATTR(tcp_port, dlm_check_zero);
63849 +CLUSTER_ATTR(tcp_port, dlm_check_zero_and_dlm_running);
63850  CLUSTER_ATTR(buffer_size, dlm_check_buffer_size);
63851  CLUSTER_ATTR(rsbtbl_size, dlm_check_zero);
63852  CLUSTER_ATTR(recover_timer, dlm_check_zero);
63853 @@ -179,7 +218,7 @@ CLUSTER_ATTR(toss_secs, dlm_check_zero);
63854  CLUSTER_ATTR(scan_secs, dlm_check_zero);
63855  CLUSTER_ATTR(log_debug, NULL);
63856  CLUSTER_ATTR(log_info, NULL);
63857 -CLUSTER_ATTR(protocol, NULL);
63858 +CLUSTER_ATTR(protocol, dlm_check_protocol_and_dlm_running);
63859  CLUSTER_ATTR(mark, NULL);
63860  CLUSTER_ATTR(timewarn_cs, dlm_check_zero);
63861  CLUSTER_ATTR(waitwarn_us, NULL);
63862 @@ -688,6 +727,7 @@ static ssize_t comm_mark_show(struct config_item *item, char *buf)
63863  static ssize_t comm_mark_store(struct config_item *item, const char *buf,
63864                                size_t len)
63866 +       struct dlm_comm *comm;
63867         unsigned int mark;
63868         int rc;
63870 @@ -695,7 +735,15 @@ static ssize_t comm_mark_store(struct config_item *item, const char *buf,
63871         if (rc)
63872                 return rc;
63874 -       config_item_to_comm(item)->mark = mark;
63875 +       if (mark == 0)
63876 +               mark = dlm_config.ci_mark;
63878 +       comm = config_item_to_comm(item);
63879 +       rc = dlm_lowcomms_nodes_set_mark(comm->nodeid, mark);
63880 +       if (rc)
63881 +               return rc;
63883 +       comm->mark = mark;
63884         return len;
63887 @@ -870,24 +918,6 @@ int dlm_comm_seq(int nodeid, uint32_t *seq)
63888         return 0;
63891 -void dlm_comm_mark(int nodeid, unsigned int *mark)
63893 -       struct dlm_comm *cm;
63895 -       cm = get_comm(nodeid);
63896 -       if (!cm) {
63897 -               *mark = dlm_config.ci_mark;
63898 -               return;
63899 -       }
63901 -       if (cm->mark)
63902 -               *mark = cm->mark;
63903 -       else
63904 -               *mark = dlm_config.ci_mark;
63906 -       put_comm(cm);
63909  int dlm_our_nodeid(void)
63911         return local_comm ? local_comm->nodeid : 0;
63912 diff --git a/fs/dlm/config.h b/fs/dlm/config.h
63913 index c210250a2581..d2cd4bd20313 100644
63914 --- a/fs/dlm/config.h
63915 +++ b/fs/dlm/config.h
63916 @@ -48,7 +48,6 @@ void dlm_config_exit(void);
63917  int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
63918                      int *count_out);
63919  int dlm_comm_seq(int nodeid, uint32_t *seq);
63920 -void dlm_comm_mark(int nodeid, unsigned int *mark);
63921  int dlm_our_nodeid(void);
63922  int dlm_our_addr(struct sockaddr_storage *addr, int num);
63924 diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
63925 index d6bbccb0ed15..d5bd990bcab8 100644
63926 --- a/fs/dlm/debug_fs.c
63927 +++ b/fs/dlm/debug_fs.c
63928 @@ -542,6 +542,7 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
63930                 if (bucket >= ls->ls_rsbtbl_size) {
63931                         kfree(ri);
63932 +                       ++*pos;
63933                         return NULL;
63934                 }
63935                 tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep;
63936 diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
63937 index 561dcad08ad6..c14cf2b7faab 100644
63938 --- a/fs/dlm/lockspace.c
63939 +++ b/fs/dlm/lockspace.c
63940 @@ -404,12 +404,6 @@ static int threads_start(void)
63941         return error;
63944 -static void threads_stop(void)
63946 -       dlm_scand_stop();
63947 -       dlm_lowcomms_stop();
63950  static int new_lockspace(const char *name, const char *cluster,
63951                          uint32_t flags, int lvblen,
63952                          const struct dlm_lockspace_ops *ops, void *ops_arg,
63953 @@ -702,8 +696,11 @@ int dlm_new_lockspace(const char *name, const char *cluster,
63954                 ls_count++;
63955         if (error > 0)
63956                 error = 0;
63957 -       if (!ls_count)
63958 -               threads_stop();
63959 +       if (!ls_count) {
63960 +               dlm_scand_stop();
63961 +               dlm_lowcomms_shutdown();
63962 +               dlm_lowcomms_stop();
63963 +       }
63964   out:
63965         mutex_unlock(&ls_lock);
63966         return error;
63967 @@ -788,6 +785,11 @@ static int release_lockspace(struct dlm_ls *ls, int force)
63969         dlm_recoverd_stop(ls);
63971 +       if (ls_count == 1) {
63972 +               dlm_scand_stop();
63973 +               dlm_lowcomms_shutdown();
63974 +       }
63976         dlm_callback_stop(ls);
63978         remove_lockspace(ls);
63979 @@ -880,7 +882,7 @@ int dlm_release_lockspace(void *lockspace, int force)
63980         if (!error)
63981                 ls_count--;
63982         if (!ls_count)
63983 -               threads_stop();
63984 +               dlm_lowcomms_stop();
63985         mutex_unlock(&ls_lock);
63987         return error;
63988 diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
63989 index 372c34ff8594..45c2fdaf34c4 100644
63990 --- a/fs/dlm/lowcomms.c
63991 +++ b/fs/dlm/lowcomms.c
63992 @@ -116,6 +116,7 @@ struct writequeue_entry {
63993  struct dlm_node_addr {
63994         struct list_head list;
63995         int nodeid;
63996 +       int mark;
63997         int addr_count;
63998         int curr_addr_index;
63999         struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
64000 @@ -134,7 +135,7 @@ static DEFINE_SPINLOCK(dlm_node_addrs_spin);
64001  static struct listen_connection listen_con;
64002  static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
64003  static int dlm_local_count;
64004 -static int dlm_allow_conn;
64005 +int dlm_allow_conn;
64007  /* Work queues */
64008  static struct workqueue_struct *recv_workqueue;
64009 @@ -303,7 +304,8 @@ static int addr_compare(const struct sockaddr_storage *x,
64012  static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
64013 -                         struct sockaddr *sa_out, bool try_new_addr)
64014 +                         struct sockaddr *sa_out, bool try_new_addr,
64015 +                         unsigned int *mark)
64017         struct sockaddr_storage sas;
64018         struct dlm_node_addr *na;
64019 @@ -331,6 +333,8 @@ static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
64020         if (!na->addr_count)
64021                 return -ENOENT;
64023 +       *mark = na->mark;
64025         if (sas_out)
64026                 memcpy(sas_out, &sas, sizeof(struct sockaddr_storage));
64028 @@ -350,7 +354,8 @@ static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
64029         return 0;
64032 -static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
64033 +static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid,
64034 +                         unsigned int *mark)
64036         struct dlm_node_addr *na;
64037         int rv = -EEXIST;
64038 @@ -364,6 +369,7 @@ static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
64039                 for (addr_i = 0; addr_i < na->addr_count; addr_i++) {
64040                         if (addr_compare(na->addr[addr_i], addr)) {
64041                                 *nodeid = na->nodeid;
64042 +                               *mark = na->mark;
64043                                 rv = 0;
64044                                 goto unlock;
64045                         }
64046 @@ -412,6 +418,7 @@ int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
64047                 new_node->nodeid = nodeid;
64048                 new_node->addr[0] = new_addr;
64049                 new_node->addr_count = 1;
64050 +               new_node->mark = dlm_config.ci_mark;
64051                 list_add(&new_node->list, &dlm_node_addrs);
64052                 spin_unlock(&dlm_node_addrs_spin);
64053                 return 0;
64054 @@ -519,6 +526,23 @@ int dlm_lowcomms_connect_node(int nodeid)
64055         return 0;
64058 +int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark)
64060 +       struct dlm_node_addr *na;
64062 +       spin_lock(&dlm_node_addrs_spin);
64063 +       na = find_node_addr(nodeid);
64064 +       if (!na) {
64065 +               spin_unlock(&dlm_node_addrs_spin);
64066 +               return -ENOENT;
64067 +       }
64069 +       na->mark = mark;
64070 +       spin_unlock(&dlm_node_addrs_spin);
64072 +       return 0;
64075  static void lowcomms_error_report(struct sock *sk)
64077         struct connection *con;
64078 @@ -685,10 +709,7 @@ static void shutdown_connection(struct connection *con)
64080         int ret;
64082 -       if (cancel_work_sync(&con->swork)) {
64083 -               log_print("canceled swork for node %d", con->nodeid);
64084 -               clear_bit(CF_WRITE_PENDING, &con->flags);
64085 -       }
64086 +       flush_work(&con->swork);
64088         mutex_lock(&con->sock_mutex);
64089         /* nothing to shutdown */
64090 @@ -867,7 +888,7 @@ static int accept_from_sock(struct listen_connection *con)
64092         /* Get the new node's NODEID */
64093         make_sockaddr(&peeraddr, 0, &len);
64094 -       if (addr_to_nodeid(&peeraddr, &nodeid)) {
64095 +       if (addr_to_nodeid(&peeraddr, &nodeid, &mark)) {
64096                 unsigned char *b=(unsigned char *)&peeraddr;
64097                 log_print("connect from non cluster node");
64098                 print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE, 
64099 @@ -876,9 +897,6 @@ static int accept_from_sock(struct listen_connection *con)
64100                 return -1;
64101         }
64103 -       dlm_comm_mark(nodeid, &mark);
64104 -       sock_set_mark(newsock->sk, mark);
64106         log_print("got connection from %d", nodeid);
64108         /*  Check to see if we already have a connection to this node. This
64109 @@ -892,6 +910,8 @@ static int accept_from_sock(struct listen_connection *con)
64110                 goto accept_err;
64111         }
64113 +       sock_set_mark(newsock->sk, mark);
64115         mutex_lock(&newcon->sock_mutex);
64116         if (newcon->sock) {
64117                 struct connection *othercon = newcon->othercon;
64118 @@ -908,6 +928,7 @@ static int accept_from_sock(struct listen_connection *con)
64119                         result = dlm_con_init(othercon, nodeid);
64120                         if (result < 0) {
64121                                 kfree(othercon);
64122 +                               mutex_unlock(&newcon->sock_mutex);
64123                                 goto accept_err;
64124                         }
64126 @@ -1015,8 +1036,6 @@ static void sctp_connect_to_sock(struct connection *con)
64127         struct socket *sock;
64128         unsigned int mark;
64130 -       dlm_comm_mark(con->nodeid, &mark);
64132         mutex_lock(&con->sock_mutex);
64134         /* Some odd races can cause double-connects, ignore them */
64135 @@ -1029,7 +1048,7 @@ static void sctp_connect_to_sock(struct connection *con)
64136         }
64138         memset(&daddr, 0, sizeof(daddr));
64139 -       result = nodeid_to_addr(con->nodeid, &daddr, NULL, true);
64140 +       result = nodeid_to_addr(con->nodeid, &daddr, NULL, true, &mark);
64141         if (result < 0) {
64142                 log_print("no address for nodeid %d", con->nodeid);
64143                 goto out;
64144 @@ -1104,13 +1123,11 @@ static void sctp_connect_to_sock(struct connection *con)
64145  static void tcp_connect_to_sock(struct connection *con)
64147         struct sockaddr_storage saddr, src_addr;
64148 +       unsigned int mark;
64149         int addr_len;
64150         struct socket *sock = NULL;
64151 -       unsigned int mark;
64152         int result;
64154 -       dlm_comm_mark(con->nodeid, &mark);
64156         mutex_lock(&con->sock_mutex);
64157         if (con->retries++ > MAX_CONNECT_RETRIES)
64158                 goto out;
64159 @@ -1125,15 +1142,15 @@ static void tcp_connect_to_sock(struct connection *con)
64160         if (result < 0)
64161                 goto out_err;
64163 -       sock_set_mark(sock->sk, mark);
64165         memset(&saddr, 0, sizeof(saddr));
64166 -       result = nodeid_to_addr(con->nodeid, &saddr, NULL, false);
64167 +       result = nodeid_to_addr(con->nodeid, &saddr, NULL, false, &mark);
64168         if (result < 0) {
64169                 log_print("no address for nodeid %d", con->nodeid);
64170                 goto out_err;
64171         }
64173 +       sock_set_mark(sock->sk, mark);
64175         add_sock(sock, con);
64177         /* Bind to our cluster-known address connecting to avoid
64178 @@ -1355,9 +1372,11 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
64179         struct writequeue_entry *e;
64180         int offset = 0;
64182 -       if (len > LOWCOMMS_MAX_TX_BUFFER_LEN) {
64183 -               BUILD_BUG_ON(PAGE_SIZE < LOWCOMMS_MAX_TX_BUFFER_LEN);
64184 +       if (len > DEFAULT_BUFFER_SIZE ||
64185 +           len < sizeof(struct dlm_header)) {
64186 +               BUILD_BUG_ON(PAGE_SIZE < DEFAULT_BUFFER_SIZE);
64187                 log_print("failed to allocate a buffer of size %d", len);
64188 +               WARN_ON(1);
64189                 return NULL;
64190         }
64192 @@ -1589,6 +1608,29 @@ static int work_start(void)
64193         return 0;
64196 +static void shutdown_conn(struct connection *con)
64198 +       if (con->shutdown_action)
64199 +               con->shutdown_action(con);
64202 +void dlm_lowcomms_shutdown(void)
64204 +       /* Set all the flags to prevent any
64205 +        * socket activity.
64206 +        */
64207 +       dlm_allow_conn = 0;
64209 +       if (recv_workqueue)
64210 +               flush_workqueue(recv_workqueue);
64211 +       if (send_workqueue)
64212 +               flush_workqueue(send_workqueue);
64214 +       dlm_close_sock(&listen_con.sock);
64216 +       foreach_conn(shutdown_conn);
64219  static void _stop_conn(struct connection *con, bool and_other)
64221         mutex_lock(&con->sock_mutex);
64222 @@ -1610,12 +1652,6 @@ static void stop_conn(struct connection *con)
64223         _stop_conn(con, true);
64226 -static void shutdown_conn(struct connection *con)
64228 -       if (con->shutdown_action)
64229 -               con->shutdown_action(con);
64232  static void connection_release(struct rcu_head *rcu)
64234         struct connection *con = container_of(rcu, struct connection, rcu);
64235 @@ -1672,19 +1708,6 @@ static void work_flush(void)
64237  void dlm_lowcomms_stop(void)
64239 -       /* Set all the flags to prevent any
64240 -          socket activity.
64241 -       */
64242 -       dlm_allow_conn = 0;
64244 -       if (recv_workqueue)
64245 -               flush_workqueue(recv_workqueue);
64246 -       if (send_workqueue)
64247 -               flush_workqueue(send_workqueue);
64249 -       dlm_close_sock(&listen_con.sock);
64251 -       foreach_conn(shutdown_conn);
64252         work_flush();
64253         foreach_conn(free_conn);
64254         work_stop();
64255 diff --git a/fs/dlm/lowcomms.h b/fs/dlm/lowcomms.h
64256 index 0918f9376489..48bbc4e18761 100644
64257 --- a/fs/dlm/lowcomms.h
64258 +++ b/fs/dlm/lowcomms.h
64259 @@ -14,13 +14,18 @@
64261  #define LOWCOMMS_MAX_TX_BUFFER_LEN     4096
64263 +/* switch to check if dlm is running */
64264 +extern int dlm_allow_conn;
64266  int dlm_lowcomms_start(void);
64267 +void dlm_lowcomms_shutdown(void);
64268  void dlm_lowcomms_stop(void);
64269  void dlm_lowcomms_exit(void);
64270  int dlm_lowcomms_close(int nodeid);
64271  void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc);
64272  void dlm_lowcomms_commit_buffer(void *mh);
64273  int dlm_lowcomms_connect_node(int nodeid);
64274 +int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark);
64275  int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len);
64277  #endif                         /* __LOWCOMMS_DOT_H__ */
64278 diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
64279 index fde3a6afe4be..0bedfa8606a2 100644
64280 --- a/fs/dlm/midcomms.c
64281 +++ b/fs/dlm/midcomms.c
64282 @@ -49,9 +49,10 @@ int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int len)
64283                  * cannot deliver this message to upper layers
64284                  */
64285                 msglen = get_unaligned_le16(&hd->h_length);
64286 -               if (msglen > DEFAULT_BUFFER_SIZE) {
64287 -                       log_print("received invalid length header: %u, will abort message parsing",
64288 -                                 msglen);
64289 +               if (msglen > DEFAULT_BUFFER_SIZE ||
64290 +                   msglen < sizeof(struct dlm_header)) {
64291 +                       log_print("received invalid length header: %u from node %d, will abort message parsing",
64292 +                                 msglen, nodeid);
64293                         return -EBADMSG;
64294                 }
64296 diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
64297 index 943e523f4c9d..3d8623139538 100644
64298 --- a/fs/ecryptfs/crypto.c
64299 +++ b/fs/ecryptfs/crypto.c
64300 @@ -296,10 +296,8 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
64301         struct extent_crypt_result ecr;
64302         int rc = 0;
64304 -       if (!crypt_stat || !crypt_stat->tfm
64305 -              || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED))
64306 -               return -EINVAL;
64308 +       BUG_ON(!crypt_stat || !crypt_stat->tfm
64309 +              || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED));
64310         if (unlikely(ecryptfs_verbosity > 0)) {
64311                 ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
64312                                 crypt_stat->key_size);
64313 diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
64314 index cdf40a54a35d..cf772c72ab2b 100644
64315 --- a/fs/ecryptfs/main.c
64316 +++ b/fs/ecryptfs/main.c
64317 @@ -492,6 +492,12 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
64318                 goto out;
64319         }
64321 +       if (!dev_name) {
64322 +               rc = -EINVAL;
64323 +               err = "Device name cannot be null";
64324 +               goto out;
64325 +       }
64327         rc = ecryptfs_parse_options(sbi, raw_data, &check_ruid);
64328         if (rc) {
64329                 err = "Error parsing options";
64330 diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h
64331 index 9ad1615f4474..e8d04d808fa6 100644
64332 --- a/fs/erofs/erofs_fs.h
64333 +++ b/fs/erofs/erofs_fs.h
64334 @@ -75,6 +75,9 @@ static inline bool erofs_inode_is_data_compressed(unsigned int datamode)
64335  #define EROFS_I_VERSION_BIT             0
64336  #define EROFS_I_DATALAYOUT_BIT          1
64338 +#define EROFS_I_ALL    \
64339 +       ((1 << (EROFS_I_DATALAYOUT_BIT + EROFS_I_DATALAYOUT_BITS)) - 1)
64341  /* 32-byte reduced form of an ondisk inode */
64342  struct erofs_inode_compact {
64343         __le16 i_format;        /* inode format hints */
64344 diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
64345 index 119fdce1b520..7ed2d7391692 100644
64346 --- a/fs/erofs/inode.c
64347 +++ b/fs/erofs/inode.c
64348 @@ -44,6 +44,13 @@ static struct page *erofs_read_inode(struct inode *inode,
64349         dic = page_address(page) + *ofs;
64350         ifmt = le16_to_cpu(dic->i_format);
64352 +       if (ifmt & ~EROFS_I_ALL) {
64353 +               erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu",
64354 +                         ifmt, vi->nid);
64355 +               err = -EOPNOTSUPP;
64356 +               goto err_out;
64357 +       }
64359         vi->datalayout = erofs_inode_datalayout(ifmt);
64360         if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
64361                 erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
64362 diff --git a/fs/eventpoll.c b/fs/eventpoll.c
64363 index 3196474cbe24..e42477fcbfa0 100644
64364 --- a/fs/eventpoll.c
64365 +++ b/fs/eventpoll.c
64366 @@ -657,6 +657,12 @@ static void ep_done_scan(struct eventpoll *ep,
64367          */
64368         list_splice(txlist, &ep->rdllist);
64369         __pm_relax(ep->ws);
64371 +       if (!list_empty(&ep->rdllist)) {
64372 +               if (waitqueue_active(&ep->wq))
64373 +                       wake_up(&ep->wq);
64374 +       }
64376         write_unlock_irq(&ep->lock);
64379 diff --git a/fs/exec.c b/fs/exec.c
64380 index 18594f11c31f..c691d4d7720c 100644
64381 --- a/fs/exec.c
64382 +++ b/fs/exec.c
64383 @@ -1008,6 +1008,7 @@ static int exec_mmap(struct mm_struct *mm)
64384         active_mm = tsk->active_mm;
64385         tsk->active_mm = mm;
64386         tsk->mm = mm;
64387 +       lru_gen_add_mm(mm);
64388         /*
64389          * This prevents preemption while active_mm is being loaded and
64390          * it and mm are being updated, which could cause problems for
64391 @@ -1018,6 +1019,7 @@ static int exec_mmap(struct mm_struct *mm)
64392         if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
64393                 local_irq_enable();
64394         activate_mm(active_mm, mm);
64395 +       lru_gen_switch_mm(active_mm, mm);
64396         if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
64397                 local_irq_enable();
64398         tsk->mm->vmacache_seqnum = 0;
64399 diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
64400 index 761c79c3a4ba..411fb0a8da10 100644
64401 --- a/fs/exfat/balloc.c
64402 +++ b/fs/exfat/balloc.c
64403 @@ -141,10 +141,6 @@ void exfat_free_bitmap(struct exfat_sb_info *sbi)
64404         kfree(sbi->vol_amap);
64408 - * If the value of "clu" is 0, it means cluster 2 which is the first cluster of
64409 - * the cluster heap.
64410 - */
64411  int exfat_set_bitmap(struct inode *inode, unsigned int clu)
64413         int i, b;
64414 @@ -162,10 +158,6 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu)
64415         return 0;
64419 - * If the value of "clu" is 0, it means cluster 2 which is the first cluster of
64420 - * the cluster heap.
64421 - */
64422  void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
64424         int i, b;
64425 @@ -186,8 +178,7 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
64426                 int ret_discard;
64428                 ret_discard = sb_issue_discard(sb,
64429 -                       exfat_cluster_to_sector(sbi, clu +
64430 -                                               EXFAT_RESERVED_CLUSTERS),
64431 +                       exfat_cluster_to_sector(sbi, clu),
64432                         (1 << sbi->sect_per_clus_bits), GFP_NOFS, 0);
64434                 if (ret_discard == -EOPNOTSUPP) {
64435 diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
64436 index 7541d0b5d706..eda14f630def 100644
64437 --- a/fs/ext4/fast_commit.c
64438 +++ b/fs/ext4/fast_commit.c
64439 @@ -1088,8 +1088,10 @@ static int ext4_fc_perform_commit(journal_t *journal)
64440                 head.fc_tid = cpu_to_le32(
64441                         sbi->s_journal->j_running_transaction->t_tid);
64442                 if (!ext4_fc_add_tlv(sb, EXT4_FC_TAG_HEAD, sizeof(head),
64443 -                       (u8 *)&head, &crc))
64444 +                       (u8 *)&head, &crc)) {
64445 +                       ret = -ENOSPC;
64446                         goto out;
64447 +               }
64448         }
64450         spin_lock(&sbi->s_fc_lock);
64451 @@ -1734,7 +1736,7 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
64452                 }
64454                 /* Range is mapped and needs a state change */
64455 -               jbd_debug(1, "Converting from %d to %d %lld",
64456 +               jbd_debug(1, "Converting from %ld to %d %lld",
64457                                 map.m_flags & EXT4_MAP_UNWRITTEN,
64458                         ext4_ext_is_unwritten(ex), map.m_pblk);
64459                 ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
64460 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
64461 index 194f5d00fa32..7924634ab0bf 100644
64462 --- a/fs/ext4/file.c
64463 +++ b/fs/ext4/file.c
64464 @@ -371,15 +371,32 @@ static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
64465  static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
64466                                  int error, unsigned int flags)
64468 -       loff_t offset = iocb->ki_pos;
64469 +       loff_t pos = iocb->ki_pos;
64470         struct inode *inode = file_inode(iocb->ki_filp);
64472         if (error)
64473                 return error;
64475 -       if (size && flags & IOMAP_DIO_UNWRITTEN)
64476 -               return ext4_convert_unwritten_extents(NULL, inode,
64477 -                                                     offset, size);
64478 +       if (size && flags & IOMAP_DIO_UNWRITTEN) {
64479 +               error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
64480 +               if (error < 0)
64481 +                       return error;
64482 +       }
64483 +       /*
64484 +        * If we are extending the file, we have to update i_size here before
64485 +        * page cache gets invalidated in iomap_dio_rw(). Otherwise racing
64486 +        * buffered reads could zero out too much from page cache pages. Update
64487 +        * of on-disk size will happen later in ext4_dio_write_iter() where
64488 +        * we have enough information to also perform orphan list handling etc.
64489 +        * Note that we perform all extending writes synchronously under
64490 +        * i_rwsem held exclusively so i_size update is safe here in that case.
64491 +        * If the write was not extending, we cannot see pos > i_size here
64492 +        * because operations reducing i_size like truncate wait for all
64493 +        * outstanding DIO before updating i_size.
64494 +        */
64495 +       pos += size;
64496 +       if (pos > i_size_read(inode))
64497 +               i_size_write(inode, pos);
64499         return 0;
64501 diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
64502 index 633ae7becd61..71d321b3b984 100644
64503 --- a/fs/ext4/ialloc.c
64504 +++ b/fs/ext4/ialloc.c
64505 @@ -1292,7 +1292,8 @@ struct inode *__ext4_new_inode(struct user_namespace *mnt_userns,
64507         ei->i_extra_isize = sbi->s_want_extra_isize;
64508         ei->i_inline_off = 0;
64509 -       if (ext4_has_feature_inline_data(sb))
64510 +       if (ext4_has_feature_inline_data(sb) &&
64511 +           (!(ei->i_flags & EXT4_DAX_FL) || S_ISDIR(mode)))
64512                 ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
64513         ret = inode;
64514         err = dquot_alloc_inode(inode);
64515 @@ -1513,6 +1514,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
64516         handle_t *handle;
64517         ext4_fsblk_t blk;
64518         int num, ret = 0, used_blks = 0;
64519 +       unsigned long used_inos = 0;
64521         /* This should not happen, but just to be sure check this */
64522         if (sb_rdonly(sb)) {
64523 @@ -1543,22 +1545,37 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
64524          * used inodes so we need to skip blocks with used inodes in
64525          * inode table.
64526          */
64527 -       if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
64528 -               used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) -
64529 -                           ext4_itable_unused_count(sb, gdp)),
64530 -                           sbi->s_inodes_per_block);
64532 -       if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
64533 -           ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
64534 -                              ext4_itable_unused_count(sb, gdp)) <
64535 -                             EXT4_FIRST_INO(sb)))) {
64536 -               ext4_error(sb, "Something is wrong with group %u: "
64537 -                          "used itable blocks: %d; "
64538 -                          "itable unused count: %u",
64539 -                          group, used_blks,
64540 -                          ext4_itable_unused_count(sb, gdp));
64541 -               ret = 1;
64542 -               goto err_out;
64543 +       if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
64544 +               used_inos = EXT4_INODES_PER_GROUP(sb) -
64545 +                           ext4_itable_unused_count(sb, gdp);
64546 +               used_blks = DIV_ROUND_UP(used_inos, sbi->s_inodes_per_block);
64548 +               /* Bogus inode unused count? */
64549 +               if (used_blks < 0 || used_blks > sbi->s_itb_per_group) {
64550 +                       ext4_error(sb, "Something is wrong with group %u: "
64551 +                                  "used itable blocks: %d; "
64552 +                                  "itable unused count: %u",
64553 +                                  group, used_blks,
64554 +                                  ext4_itable_unused_count(sb, gdp));
64555 +                       ret = 1;
64556 +                       goto err_out;
64557 +               }
64559 +               used_inos += group * EXT4_INODES_PER_GROUP(sb);
64560 +               /*
64561 +                * Are there some uninitialized inodes in the inode table
64562 +                * before the first normal inode?
64563 +                */
64564 +               if ((used_blks != sbi->s_itb_per_group) &&
64565 +                    (used_inos < EXT4_FIRST_INO(sb))) {
64566 +                       ext4_error(sb, "Something is wrong with group %u: "
64567 +                                  "itable unused count: %u; "
64568 +                                  "itables initialized count: %ld",
64569 +                                  group, ext4_itable_unused_count(sb, gdp),
64570 +                                  used_inos);
64571 +                       ret = 1;
64572 +                       goto err_out;
64573 +               }
64574         }
64576         blk = ext4_inode_table(sb, gdp) + used_blks;
64577 diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
64578 index a2cf35066f46..0796bfa72829 100644
64579 --- a/fs/ext4/ioctl.c
64580 +++ b/fs/ext4/ioctl.c
64581 @@ -315,6 +315,12 @@ static void ext4_dax_dontcache(struct inode *inode, unsigned int flags)
64582  static bool dax_compatible(struct inode *inode, unsigned int oldflags,
64583                            unsigned int flags)
64585 +       /* Allow the DAX flag to be changed on inline directories */
64586 +       if (S_ISDIR(inode->i_mode)) {
64587 +               flags &= ~EXT4_INLINE_DATA_FL;
64588 +               oldflags &= ~EXT4_INLINE_DATA_FL;
64589 +       }
64591         if (flags & EXT4_DAX_FL) {
64592                 if ((oldflags & EXT4_DAX_MUT_EXCL) ||
64593                      ext4_test_inode_state(inode,
64594 diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
64595 index 795c3ff2907c..68fbeedd627b 100644
64596 --- a/fs/ext4/mmp.c
64597 +++ b/fs/ext4/mmp.c
64598 @@ -56,7 +56,7 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
64599         wait_on_buffer(bh);
64600         sb_end_write(sb);
64601         if (unlikely(!buffer_uptodate(bh)))
64602 -               return 1;
64603 +               return -EIO;
64605         return 0;
64607 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
64608 index b9693680463a..77c1cb258262 100644
64609 --- a/fs/ext4/super.c
64610 +++ b/fs/ext4/super.c
64611 @@ -667,9 +667,6 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
64612                         ext4_commit_super(sb);
64613         }
64615 -       if (sb_rdonly(sb) || continue_fs)
64616 -               return;
64618         /*
64619          * We force ERRORS_RO behavior when system is rebooting. Otherwise we
64620          * could panic during 'reboot -f' as the underlying device got already
64621 @@ -679,6 +676,10 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
64622                 panic("EXT4-fs (device %s): panic forced after error\n",
64623                         sb->s_id);
64624         }
64626 +       if (sb_rdonly(sb) || continue_fs)
64627 +               return;
64629         ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
64630         /*
64631          * Make sure updated value of ->s_mount_flags will be visible before
64632 @@ -3023,9 +3024,6 @@ static void ext4_orphan_cleanup(struct super_block *sb,
64633                 sb->s_flags &= ~SB_RDONLY;
64634         }
64635  #ifdef CONFIG_QUOTA
64636 -       /* Needed for iput() to work correctly and not trash data */
64637 -       sb->s_flags |= SB_ACTIVE;
64639         /*
64640          * Turn on quotas which were not enabled for read-only mounts if
64641          * filesystem has quota feature, so that they are updated correctly.
64642 @@ -5561,8 +5559,10 @@ static int ext4_commit_super(struct super_block *sb)
64643         struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
64644         int error = 0;
64646 -       if (!sbh || block_device_ejected(sb))
64647 -               return error;
64648 +       if (!sbh)
64649 +               return -EINVAL;
64650 +       if (block_device_ejected(sb))
64651 +               return -ENODEV;
64653         ext4_update_super(sb);
64655 diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
64656 index 77fa342de38f..582b11afb0d5 100644
64657 --- a/fs/f2fs/compress.c
64658 +++ b/fs/f2fs/compress.c
64659 @@ -123,19 +123,6 @@ static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
64660         f2fs_drop_rpages(cc, len, true);
64663 -static void f2fs_put_rpages_mapping(struct address_space *mapping,
64664 -                               pgoff_t start, int len)
64666 -       int i;
64668 -       for (i = 0; i < len; i++) {
64669 -               struct page *page = find_get_page(mapping, start + i);
64671 -               put_page(page);
64672 -               put_page(page);
64673 -       }
64676  static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
64677                 struct writeback_control *wbc, bool redirty, int unlock)
64679 @@ -164,13 +151,14 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc)
64680         return cc->rpages ? 0 : -ENOMEM;
64683 -void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
64684 +void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
64686         page_array_free(cc->inode, cc->rpages, cc->cluster_size);
64687         cc->rpages = NULL;
64688         cc->nr_rpages = 0;
64689         cc->nr_cpages = 0;
64690 -       cc->cluster_idx = NULL_CLUSTER;
64691 +       if (!reuse)
64692 +               cc->cluster_idx = NULL_CLUSTER;
64695  void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
64696 @@ -351,8 +339,8 @@ static const struct f2fs_compress_ops f2fs_lz4_ops = {
64698  static int zstd_init_compress_ctx(struct compress_ctx *cc)
64700 -       ZSTD_parameters params;
64701 -       ZSTD_CStream *stream;
64702 +       zstd_parameters params;
64703 +       zstd_cstream *stream;
64704         void *workspace;
64705         unsigned int workspace_size;
64706         unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
64707 @@ -361,17 +349,17 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc)
64708         if (!level)
64709                 level = F2FS_ZSTD_DEFAULT_CLEVEL;
64711 -       params = ZSTD_getParams(level, cc->rlen, 0);
64712 -       workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
64713 +       params = zstd_get_params(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen);
64714 +       workspace_size = zstd_cstream_workspace_bound(&params.cParams);
64716         workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
64717                                         workspace_size, GFP_NOFS);
64718         if (!workspace)
64719                 return -ENOMEM;
64721 -       stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
64722 +       stream = zstd_init_cstream(&params, 0, workspace, workspace_size);
64723         if (!stream) {
64724 -               printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
64725 +               printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_cstream failed\n",
64726                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
64727                                 __func__);
64728                 kvfree(workspace);
64729 @@ -394,9 +382,9 @@ static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
64731  static int zstd_compress_pages(struct compress_ctx *cc)
64733 -       ZSTD_CStream *stream = cc->private2;
64734 -       ZSTD_inBuffer inbuf;
64735 -       ZSTD_outBuffer outbuf;
64736 +       zstd_cstream *stream = cc->private2;
64737 +       zstd_in_buffer inbuf;
64738 +       zstd_out_buffer outbuf;
64739         int src_size = cc->rlen;
64740         int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
64741         int ret;
64742 @@ -409,19 +397,19 @@ static int zstd_compress_pages(struct compress_ctx *cc)
64743         outbuf.dst = cc->cbuf->cdata;
64744         outbuf.size = dst_size;
64746 -       ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
64747 -       if (ZSTD_isError(ret)) {
64748 -               printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
64749 +       ret = zstd_compress_stream(stream, &outbuf, &inbuf);
64750 +       if (zstd_is_error(ret)) {
64751 +               printk_ratelimited("%sF2FS-fs (%s): %s zstd_compress_stream failed, ret: %d\n",
64752                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
64753 -                               __func__, ZSTD_getErrorCode(ret));
64754 +                               __func__, zstd_get_error_code(ret));
64755                 return -EIO;
64756         }
64758 -       ret = ZSTD_endStream(stream, &outbuf);
64759 -       if (ZSTD_isError(ret)) {
64760 -               printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
64761 +       ret = zstd_end_stream(stream, &outbuf);
64762 +       if (zstd_is_error(ret)) {
64763 +               printk_ratelimited("%sF2FS-fs (%s): %s zstd_end_stream returned %d\n",
64764                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
64765 -                               __func__, ZSTD_getErrorCode(ret));
64766 +                               __func__, zstd_get_error_code(ret));
64767                 return -EIO;
64768         }
64770 @@ -438,22 +426,22 @@ static int zstd_compress_pages(struct compress_ctx *cc)
64772  static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
64774 -       ZSTD_DStream *stream;
64775 +       zstd_dstream *stream;
64776         void *workspace;
64777         unsigned int workspace_size;
64778         unsigned int max_window_size =
64779                         MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
64781 -       workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size);
64782 +       workspace_size = zstd_dstream_workspace_bound(max_window_size);
64784         workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
64785                                         workspace_size, GFP_NOFS);
64786         if (!workspace)
64787                 return -ENOMEM;
64789 -       stream = ZSTD_initDStream(max_window_size, workspace, workspace_size);
64790 +       stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
64791         if (!stream) {
64792 -               printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
64793 +               printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_dstream failed\n",
64794                                 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
64795                                 __func__);
64796                 kvfree(workspace);
64797 @@ -475,9 +463,9 @@ static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
64799  static int zstd_decompress_pages(struct decompress_io_ctx *dic)
64801 -       ZSTD_DStream *stream = dic->private2;
64802 -       ZSTD_inBuffer inbuf;
64803 -       ZSTD_outBuffer outbuf;
64804 +       zstd_dstream *stream = dic->private2;
64805 +       zstd_in_buffer inbuf;
64806 +       zstd_out_buffer outbuf;
64807         int ret;
64809         inbuf.pos = 0;
64810 @@ -488,11 +476,11 @@ static int zstd_decompress_pages(struct decompress_io_ctx *dic)
64811         outbuf.dst = dic->rbuf;
64812         outbuf.size = dic->rlen;
64814 -       ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
64815 -       if (ZSTD_isError(ret)) {
64816 -               printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
64817 +       ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
64818 +       if (zstd_is_error(ret)) {
64819 +               printk_ratelimited("%sF2FS-fs (%s): %s zstd_decompress_stream failed, ret: %d\n",
64820                                 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
64821 -                               __func__, ZSTD_getErrorCode(ret));
64822 +                               __func__, zstd_get_error_code(ret));
64823                 return -EIO;
64824         }
64826 @@ -1048,7 +1036,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
64827                 }
64829                 if (PageUptodate(page))
64830 -                       unlock_page(page);
64831 +                       f2fs_put_page(page, 1);
64832                 else
64833                         f2fs_compress_ctx_add_page(cc, page);
64834         }
64835 @@ -1058,33 +1046,35 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
64837                 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
64838                                         &last_block_in_bio, false, true);
64839 -               f2fs_destroy_compress_ctx(cc);
64840 +               f2fs_put_rpages(cc);
64841 +               f2fs_destroy_compress_ctx(cc, true);
64842                 if (ret)
64843 -                       goto release_pages;
64844 +                       goto out;
64845                 if (bio)
64846                         f2fs_submit_bio(sbi, bio, DATA);
64848                 ret = f2fs_init_compress_ctx(cc);
64849                 if (ret)
64850 -                       goto release_pages;
64851 +                       goto out;
64852         }
64854         for (i = 0; i < cc->cluster_size; i++) {
64855                 f2fs_bug_on(sbi, cc->rpages[i]);
64857                 page = find_lock_page(mapping, start_idx + i);
64858 -               f2fs_bug_on(sbi, !page);
64859 +               if (!page) {
64860 +                       /* page can be truncated */
64861 +                       goto release_and_retry;
64862 +               }
64864                 f2fs_wait_on_page_writeback(page, DATA, true, true);
64866                 f2fs_compress_ctx_add_page(cc, page);
64867 -               f2fs_put_page(page, 0);
64869                 if (!PageUptodate(page)) {
64870 +release_and_retry:
64871 +                       f2fs_put_rpages(cc);
64872                         f2fs_unlock_rpages(cc, i + 1);
64873 -                       f2fs_put_rpages_mapping(mapping, start_idx,
64874 -                                       cc->cluster_size);
64875 -                       f2fs_destroy_compress_ctx(cc);
64876 +                       f2fs_destroy_compress_ctx(cc, true);
64877                         goto retry;
64878                 }
64879         }
64880 @@ -1115,10 +1105,10 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
64881         }
64883  unlock_pages:
64884 +       f2fs_put_rpages(cc);
64885         f2fs_unlock_rpages(cc, i);
64886 -release_pages:
64887 -       f2fs_put_rpages_mapping(mapping, start_idx, i);
64888 -       f2fs_destroy_compress_ctx(cc);
64889 +       f2fs_destroy_compress_ctx(cc, true);
64890 +out:
64891         return ret;
64894 @@ -1153,7 +1143,7 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
64895                 set_cluster_dirty(&cc);
64897         f2fs_put_rpages_wbc(&cc, NULL, false, 1);
64898 -       f2fs_destroy_compress_ctx(&cc);
64899 +       f2fs_destroy_compress_ctx(&cc, false);
64901         return first_index;
64903 @@ -1372,7 +1362,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
64904         f2fs_put_rpages(cc);
64905         page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
64906         cc->cpages = NULL;
64907 -       f2fs_destroy_compress_ctx(cc);
64908 +       f2fs_destroy_compress_ctx(cc, false);
64909         return 0;
64911  out_destroy_crypt:
64912 @@ -1383,7 +1373,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
64913         for (i = 0; i < cc->nr_cpages; i++) {
64914                 if (!cc->cpages[i])
64915                         continue;
64916 -               f2fs_put_page(cc->cpages[i], 1);
64917 +               f2fs_compress_free_page(cc->cpages[i]);
64918 +               cc->cpages[i] = NULL;
64919         }
64920  out_put_cic:
64921         kmem_cache_free(cic_entry_slab, cic);
64922 @@ -1533,7 +1524,7 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
64923         err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
64924         f2fs_put_rpages_wbc(cc, wbc, false, 0);
64925  destroy_out:
64926 -       f2fs_destroy_compress_ctx(cc);
64927 +       f2fs_destroy_compress_ctx(cc, false);
64928         return err;
64931 diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
64932 index 4e5257c763d0..8804a5d51380 100644
64933 --- a/fs/f2fs/data.c
64934 +++ b/fs/f2fs/data.c
64935 @@ -2276,7 +2276,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
64936                                                         max_nr_pages,
64937                                                         &last_block_in_bio,
64938                                                         rac != NULL, false);
64939 -                               f2fs_destroy_compress_ctx(&cc);
64940 +                               f2fs_destroy_compress_ctx(&cc, false);
64941                                 if (ret)
64942                                         goto set_error_page;
64943                         }
64944 @@ -2321,7 +2321,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
64945                                                         max_nr_pages,
64946                                                         &last_block_in_bio,
64947                                                         rac != NULL, false);
64948 -                               f2fs_destroy_compress_ctx(&cc);
64949 +                               f2fs_destroy_compress_ctx(&cc, false);
64950                         }
64951                 }
64952  #endif
64953 @@ -3022,7 +3022,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
64954                 }
64955         }
64956         if (f2fs_compressed_file(inode))
64957 -               f2fs_destroy_compress_ctx(&cc);
64958 +               f2fs_destroy_compress_ctx(&cc, false);
64959  #endif
64960         if (retry) {
64961                 index = 0;
64962 diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
64963 index e2d302ae3a46..f3fabb1edfe9 100644
64964 --- a/fs/f2fs/f2fs.h
64965 +++ b/fs/f2fs/f2fs.h
64966 @@ -3376,6 +3376,7 @@ block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
64967  int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
64968  void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
64969  int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
64970 +bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno);
64971  void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
64972  void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi);
64973  void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi);
64974 @@ -3383,7 +3384,7 @@ void f2fs_get_new_segment(struct f2fs_sb_info *sbi,
64975                         unsigned int *newseg, bool new_sec, int dir);
64976  void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
64977                                         unsigned int start, unsigned int end);
64978 -void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type);
64979 +void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type);
64980  void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
64981  int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
64982  bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
64983 @@ -3547,7 +3548,7 @@ void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
64984  int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
64985  void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
64986  block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
64987 -int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background,
64988 +int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, bool force,
64989                         unsigned int segno);
64990  void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
64991  int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count);
64992 @@ -3949,7 +3950,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
64993  void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
64994  void f2fs_put_page_dic(struct page *page);
64995  int f2fs_init_compress_ctx(struct compress_ctx *cc);
64996 -void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
64997 +void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
64998  void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
64999  int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
65000  void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
65001 diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
65002 index d26ff2ae3f5e..dc79694e512c 100644
65003 --- a/fs/f2fs/file.c
65004 +++ b/fs/f2fs/file.c
65005 @@ -1619,9 +1619,10 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
65006         struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
65007                         .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
65008                         .m_may_create = true };
65009 -       pgoff_t pg_end;
65010 +       pgoff_t pg_start, pg_end;
65011         loff_t new_size = i_size_read(inode);
65012         loff_t off_end;
65013 +       block_t expanded = 0;
65014         int err;
65016         err = inode_newsize_ok(inode, (len + offset));
65017 @@ -1634,11 +1635,12 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
65019         f2fs_balance_fs(sbi, true);
65021 +       pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
65022         pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
65023         off_end = (offset + len) & (PAGE_SIZE - 1);
65025 -       map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
65026 -       map.m_len = pg_end - map.m_lblk;
65027 +       map.m_lblk = pg_start;
65028 +       map.m_len = pg_end - pg_start;
65029         if (off_end)
65030                 map.m_len++;
65032 @@ -1646,19 +1648,15 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
65033                 return 0;
65035         if (f2fs_is_pinned_file(inode)) {
65036 -               block_t len = (map.m_len >> sbi->log_blocks_per_seg) <<
65037 -                                       sbi->log_blocks_per_seg;
65038 -               block_t done = 0;
65040 -               if (map.m_len % sbi->blocks_per_seg)
65041 -                       len += sbi->blocks_per_seg;
65042 +               block_t sec_blks = BLKS_PER_SEC(sbi);
65043 +               block_t sec_len = roundup(map.m_len, sec_blks);
65045 -               map.m_len = sbi->blocks_per_seg;
65046 +               map.m_len = sec_blks;
65047  next_alloc:
65048                 if (has_not_enough_free_secs(sbi, 0,
65049                         GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
65050                         down_write(&sbi->gc_lock);
65051 -                       err = f2fs_gc(sbi, true, false, NULL_SEGNO);
65052 +                       err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
65053                         if (err && err != -ENODATA && err != -EAGAIN)
65054                                 goto out_err;
65055                 }
65056 @@ -1666,7 +1664,7 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
65057                 down_write(&sbi->pin_sem);
65059                 f2fs_lock_op(sbi);
65060 -               f2fs_allocate_new_segment(sbi, CURSEG_COLD_DATA_PINNED);
65061 +               f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED);
65062                 f2fs_unlock_op(sbi);
65064                 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
65065 @@ -1674,24 +1672,25 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
65067                 up_write(&sbi->pin_sem);
65069 -               done += map.m_len;
65070 -               len -= map.m_len;
65071 +               expanded += map.m_len;
65072 +               sec_len -= map.m_len;
65073                 map.m_lblk += map.m_len;
65074 -               if (!err && len)
65075 +               if (!err && sec_len)
65076                         goto next_alloc;
65078 -               map.m_len = done;
65079 +               map.m_len = expanded;
65080         } else {
65081                 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
65082 +               expanded = map.m_len;
65083         }
65084  out_err:
65085         if (err) {
65086                 pgoff_t last_off;
65088 -               if (!map.m_len)
65089 +               if (!expanded)
65090                         return err;
65092 -               last_off = map.m_lblk + map.m_len - 1;
65093 +               last_off = pg_start + expanded - 1;
65095                 /* update new size to the failed position */
65096                 new_size = (last_off == pg_end) ? offset + len :
65097 @@ -2489,7 +2488,7 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
65098                 down_write(&sbi->gc_lock);
65099         }
65101 -       ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
65102 +       ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
65103  out:
65104         mnt_drop_write_file(filp);
65105         return ret;
65106 @@ -2525,7 +2524,8 @@ static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
65107                 down_write(&sbi->gc_lock);
65108         }
65110 -       ret = f2fs_gc(sbi, range->sync, true, GET_SEGNO(sbi, range->start));
65111 +       ret = f2fs_gc(sbi, range->sync, true, false,
65112 +                               GET_SEGNO(sbi, range->start));
65113         if (ret) {
65114                 if (ret == -EBUSY)
65115                         ret = -EAGAIN;
65116 @@ -2978,7 +2978,7 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
65117                 sm->last_victim[GC_CB] = end_segno + 1;
65118                 sm->last_victim[GC_GREEDY] = end_segno + 1;
65119                 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
65120 -               ret = f2fs_gc(sbi, true, true, start_segno);
65121 +               ret = f2fs_gc(sbi, true, true, true, start_segno);
65122                 if (ret == -EAGAIN)
65123                         ret = 0;
65124                 else if (ret < 0)
65125 diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
65126 index 39330ad3c44e..a8567cb47621 100644
65127 --- a/fs/f2fs/gc.c
65128 +++ b/fs/f2fs/gc.c
65129 @@ -112,7 +112,7 @@ static int gc_thread_func(void *data)
65130                 sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
65132                 /* if return value is not zero, no victim was selected */
65133 -               if (f2fs_gc(sbi, sync_mode, true, NULL_SEGNO))
65134 +               if (f2fs_gc(sbi, sync_mode, true, false, NULL_SEGNO))
65135                         wait_ms = gc_th->no_gc_sleep_time;
65137                 trace_f2fs_background_gc(sbi->sb, wait_ms,
65138 @@ -392,10 +392,6 @@ static void add_victim_entry(struct f2fs_sb_info *sbi,
65139                 if (p->gc_mode == GC_AT &&
65140                         get_valid_blocks(sbi, segno, true) == 0)
65141                         return;
65143 -               if (p->alloc_mode == AT_SSR &&
65144 -                       get_seg_entry(sbi, segno)->ckpt_valid_blocks == 0)
65145 -                       return;
65146         }
65148         for (i = 0; i < sbi->segs_per_sec; i++)
65149 @@ -728,11 +724,27 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
65151                 if (sec_usage_check(sbi, secno))
65152                         goto next;
65154                 /* Don't touch checkpointed data */
65155 -               if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
65156 -                                       get_ckpt_valid_blocks(sbi, segno) &&
65157 -                                       p.alloc_mode == LFS))
65158 -                       goto next;
65159 +               if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
65160 +                       if (p.alloc_mode == LFS) {
65161 +                               /*
65162 +                                * LFS is set to find source section during GC.
65163 +                                * The victim should have no checkpointed data.
65164 +                                */
65165 +                               if (get_ckpt_valid_blocks(sbi, segno, true))
65166 +                                       goto next;
65167 +                       } else {
65168 +                               /*
65169 +                                * SSR | AT_SSR are set to find target segment
65170 +                                * for writes which can be full by checkpointed
65171 +                                * and newly written blocks.
65172 +                                */
65173 +                               if (!f2fs_segment_has_free_slot(sbi, segno))
65174 +                                       goto next;
65175 +                       }
65176 +               }
65178                 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
65179                         goto next;
65181 @@ -1354,7 +1366,8 @@ static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
65182   * the victim data block is ignored.
65183   */
65184  static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
65185 -               struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
65186 +               struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
65187 +               bool force_migrate)
65189         struct super_block *sb = sbi->sb;
65190         struct f2fs_summary *entry;
65191 @@ -1383,8 +1396,8 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
65192                  * race condition along with SSR block allocation.
65193                  */
65194                 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
65195 -                               get_valid_blocks(sbi, segno, true) ==
65196 -                                                       BLKS_PER_SEC(sbi))
65197 +                       (!force_migrate && get_valid_blocks(sbi, segno, true) ==
65198 +                                                       BLKS_PER_SEC(sbi)))
65199                         return submitted;
65201                 if (check_valid_map(sbi, segno, off) == 0)
65202 @@ -1519,7 +1532,8 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
65204  static int do_garbage_collect(struct f2fs_sb_info *sbi,
65205                                 unsigned int start_segno,
65206 -                               struct gc_inode_list *gc_list, int gc_type)
65207 +                               struct gc_inode_list *gc_list, int gc_type,
65208 +                               bool force_migrate)
65210         struct page *sum_page;
65211         struct f2fs_summary_block *sum;
65212 @@ -1606,7 +1620,8 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
65213                                                                 gc_type);
65214                 else
65215                         submitted += gc_data_segment(sbi, sum->entries, gc_list,
65216 -                                                       segno, gc_type);
65217 +                                                       segno, gc_type,
65218 +                                                       force_migrate);
65220                 stat_inc_seg_count(sbi, type, gc_type);
65221                 migrated++;
65222 @@ -1634,7 +1649,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
65225  int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
65226 -                       bool background, unsigned int segno)
65227 +                       bool background, bool force, unsigned int segno)
65229         int gc_type = sync ? FG_GC : BG_GC;
65230         int sec_freed = 0, seg_freed = 0, total_freed = 0;
65231 @@ -1696,7 +1711,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
65232         if (ret)
65233                 goto stop;
65235 -       seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
65236 +       seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, force);
65237         if (gc_type == FG_GC &&
65238                 seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
65239                 sec_freed++;
65240 @@ -1835,7 +1850,7 @@ static int free_segment_range(struct f2fs_sb_info *sbi,
65241                         .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
65242                 };
65244 -               do_garbage_collect(sbi, segno, &gc_list, FG_GC);
65245 +               do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
65246                 put_gc_inode(&gc_list);
65248                 if (!gc_only && get_valid_blocks(sbi, segno, true)) {
65249 @@ -1974,7 +1989,20 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
65251         /* stop CP to protect MAIN_SEC in free_segment_range */
65252         f2fs_lock_op(sbi);
65254 +       spin_lock(&sbi->stat_lock);
65255 +       if (shrunk_blocks + valid_user_blocks(sbi) +
65256 +               sbi->current_reserved_blocks + sbi->unusable_block_count +
65257 +               F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
65258 +               err = -ENOSPC;
65259 +       spin_unlock(&sbi->stat_lock);
65261 +       if (err)
65262 +               goto out_unlock;
65264         err = free_segment_range(sbi, secs, true);
65266 +out_unlock:
65267         f2fs_unlock_op(sbi);
65268         up_write(&sbi->gc_lock);
65269         if (err)
65270 diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
65271 index 993caefcd2bb..92652ca7a7c8 100644
65272 --- a/fs/f2fs/inline.c
65273 +++ b/fs/f2fs/inline.c
65274 @@ -219,7 +219,8 @@ int f2fs_convert_inline_inode(struct inode *inode)
65276         f2fs_put_page(page, 1);
65278 -       f2fs_balance_fs(sbi, dn.node_changed);
65279 +       if (!err)
65280 +               f2fs_balance_fs(sbi, dn.node_changed);
65282         return err;
65284 diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
65285 index 4b0e2e3c2c88..45c8cf1afe66 100644
65286 --- a/fs/f2fs/node.c
65287 +++ b/fs/f2fs/node.c
65288 @@ -2785,6 +2785,9 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
65289                 struct f2fs_nat_entry raw_ne;
65290                 nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
65292 +               if (f2fs_check_nid_range(sbi, nid))
65293 +                       continue;
65295                 raw_ne = nat_in_journal(journal, i);
65297                 ne = __lookup_nat_cache(nm_i, nid);
65298 diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
65299 index c2866561263e..bb6d86255741 100644
65300 --- a/fs/f2fs/segment.c
65301 +++ b/fs/f2fs/segment.c
65302 @@ -186,7 +186,10 @@ void f2fs_register_inmem_page(struct inode *inode, struct page *page)
65304         struct inmem_pages *new;
65306 -       f2fs_set_page_private(page, ATOMIC_WRITTEN_PAGE);
65307 +       if (PagePrivate(page))
65308 +               set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
65309 +       else
65310 +               f2fs_set_page_private(page, ATOMIC_WRITTEN_PAGE);
65312         new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
65314 @@ -324,23 +327,27 @@ void f2fs_drop_inmem_pages(struct inode *inode)
65315         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
65316         struct f2fs_inode_info *fi = F2FS_I(inode);
65318 -       while (!list_empty(&fi->inmem_pages)) {
65319 +       do {
65320                 mutex_lock(&fi->inmem_lock);
65321 +               if (list_empty(&fi->inmem_pages)) {
65322 +                       fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
65324 +                       spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
65325 +                       if (!list_empty(&fi->inmem_ilist))
65326 +                               list_del_init(&fi->inmem_ilist);
65327 +                       if (f2fs_is_atomic_file(inode)) {
65328 +                               clear_inode_flag(inode, FI_ATOMIC_FILE);
65329 +                               sbi->atomic_files--;
65330 +                       }
65331 +                       spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
65333 +                       mutex_unlock(&fi->inmem_lock);
65334 +                       break;
65335 +               }
65336                 __revoke_inmem_pages(inode, &fi->inmem_pages,
65337                                                 true, false, true);
65338                 mutex_unlock(&fi->inmem_lock);
65339 -       }
65341 -       fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
65343 -       spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
65344 -       if (!list_empty(&fi->inmem_ilist))
65345 -               list_del_init(&fi->inmem_ilist);
65346 -       if (f2fs_is_atomic_file(inode)) {
65347 -               clear_inode_flag(inode, FI_ATOMIC_FILE);
65348 -               sbi->atomic_files--;
65349 -       }
65350 -       spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
65351 +       } while (1);
65354  void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
65355 @@ -504,7 +511,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
65356          */
65357         if (has_not_enough_free_secs(sbi, 0, 0)) {
65358                 down_write(&sbi->gc_lock);
65359 -               f2fs_gc(sbi, false, false, NULL_SEGNO);
65360 +               f2fs_gc(sbi, false, false, false, NULL_SEGNO);
65361         }
65364 @@ -861,7 +868,7 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
65365         mutex_lock(&dirty_i->seglist_lock);
65367         valid_blocks = get_valid_blocks(sbi, segno, false);
65368 -       ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno);
65369 +       ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false);
65371         if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
65372                 ckpt_valid_blocks == usable_blocks)) {
65373 @@ -946,7 +953,7 @@ static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
65374         for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
65375                 if (get_valid_blocks(sbi, segno, false))
65376                         continue;
65377 -               if (get_ckpt_valid_blocks(sbi, segno))
65378 +               if (get_ckpt_valid_blocks(sbi, segno, false))
65379                         continue;
65380                 mutex_unlock(&dirty_i->seglist_lock);
65381                 return segno;
65382 @@ -2636,6 +2643,23 @@ static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
65383                 seg->next_blkoff++;
65386 +bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
65388 +       struct seg_entry *se = get_seg_entry(sbi, segno);
65389 +       int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
65390 +       unsigned long *target_map = SIT_I(sbi)->tmp_map;
65391 +       unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
65392 +       unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
65393 +       int i, pos;
65395 +       for (i = 0; i < entries; i++)
65396 +               target_map[i] = ckpt_map[i] | cur_map[i];
65398 +       pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, 0);
65400 +       return pos < sbi->blocks_per_seg;
65403  /*
65404   * This function always allocates a used segment(from dirty seglist) by SSR
65405   * manner, so it should recover the existing segment information of valid blocks
65406 @@ -2893,7 +2917,8 @@ void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
65407         up_read(&SM_I(sbi)->curseg_lock);
65410 -static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type)
65411 +static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
65412 +                                                               bool new_sec)
65414         struct curseg_info *curseg = CURSEG_I(sbi, type);
65415         unsigned int old_segno;
65416 @@ -2901,32 +2926,42 @@ static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type)
65417         if (!curseg->inited)
65418                 goto alloc;
65420 -       if (!curseg->next_blkoff &&
65421 -               !get_valid_blocks(sbi, curseg->segno, false) &&
65422 -               !get_ckpt_valid_blocks(sbi, curseg->segno))
65423 -               return;
65424 +       if (curseg->next_blkoff ||
65425 +               get_valid_blocks(sbi, curseg->segno, new_sec))
65426 +               goto alloc;
65428 +       if (!get_ckpt_valid_blocks(sbi, curseg->segno, new_sec))
65429 +               return;
65430  alloc:
65431         old_segno = curseg->segno;
65432         SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
65433         locate_dirty_segment(sbi, old_segno);
65436 -void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type)
65437 +static void __allocate_new_section(struct f2fs_sb_info *sbi, int type)
65439 +       __allocate_new_segment(sbi, type, true);
65442 +void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type)
65444 +       down_read(&SM_I(sbi)->curseg_lock);
65445         down_write(&SIT_I(sbi)->sentry_lock);
65446 -       __allocate_new_segment(sbi, type);
65447 +       __allocate_new_section(sbi, type);
65448         up_write(&SIT_I(sbi)->sentry_lock);
65449 +       up_read(&SM_I(sbi)->curseg_lock);
65452  void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
65454         int i;
65456 +       down_read(&SM_I(sbi)->curseg_lock);
65457         down_write(&SIT_I(sbi)->sentry_lock);
65458         for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
65459 -               __allocate_new_segment(sbi, i);
65460 +               __allocate_new_segment(sbi, i, false);
65461         up_write(&SIT_I(sbi)->sentry_lock);
65462 +       up_read(&SM_I(sbi)->curseg_lock);
65465  static const struct segment_allocation default_salloc_ops = {
65466 @@ -3365,12 +3400,12 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
65467                 f2fs_inode_chksum_set(sbi, page);
65468         }
65470 -       if (F2FS_IO_ALIGNED(sbi))
65471 -               fio->retry = false;
65473         if (fio) {
65474                 struct f2fs_bio_info *io;
65476 +               if (F2FS_IO_ALIGNED(sbi))
65477 +                       fio->retry = false;
65479                 INIT_LIST_HEAD(&fio->list);
65480                 fio->in_list = true;
65481                 io = sbi->write_io[fio->type] + fio->temp;
65482 diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
65483 index e9a7a637d688..afb175739de5 100644
65484 --- a/fs/f2fs/segment.h
65485 +++ b/fs/f2fs/segment.h
65486 @@ -361,8 +361,20 @@ static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
65489  static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
65490 -                               unsigned int segno)
65491 +                               unsigned int segno, bool use_section)
65493 +       if (use_section && __is_large_section(sbi)) {
65494 +               unsigned int start_segno = START_SEGNO(segno);
65495 +               unsigned int blocks = 0;
65496 +               int i;
65498 +               for (i = 0; i < sbi->segs_per_sec; i++, start_segno++) {
65499 +                       struct seg_entry *se = get_seg_entry(sbi, start_segno);
65501 +                       blocks += se->ckpt_valid_blocks;
65502 +               }
65503 +               return blocks;
65504 +       }
65505         return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
65508 diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
65509 index 82592b19b4e0..3c8426709f34 100644
65510 --- a/fs/f2fs/super.c
65511 +++ b/fs/f2fs/super.c
65512 @@ -525,7 +525,7 @@ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
65513         if (kstrtouint(str + 1, 10, &level))
65514                 return -EINVAL;
65516 -       if (!level || level > ZSTD_maxCLevel()) {
65517 +       if (!level || level > zstd_max_clevel()) {
65518                 f2fs_info(sbi, "invalid zstd compress level: %d", level);
65519                 return -EINVAL;
65520         }
65521 @@ -1865,7 +1865,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
65523         while (!f2fs_time_over(sbi, DISABLE_TIME)) {
65524                 down_write(&sbi->gc_lock);
65525 -               err = f2fs_gc(sbi, true, false, NULL_SEGNO);
65526 +               err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
65527                 if (err == -ENODATA) {
65528                         err = 0;
65529                         break;
65530 @@ -3929,10 +3929,18 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
65531                  * previous checkpoint was not done by clean system shutdown.
65532                  */
65533                 if (f2fs_hw_is_readonly(sbi)) {
65534 -                       if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))
65535 -                               f2fs_err(sbi, "Need to recover fsync data, but write access unavailable");
65536 -                       else
65537 -                               f2fs_info(sbi, "write access unavailable, skipping recovery");
65538 +                       if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
65539 +                               err = f2fs_recover_fsync_data(sbi, true);
65540 +                               if (err > 0) {
65541 +                                       err = -EROFS;
65542 +                                       f2fs_err(sbi, "Need to recover fsync data, but "
65543 +                                               "write access unavailable, please try "
65544 +                                               "mount w/ disable_roll_forward or norecovery");
65545 +                               }
65546 +                               if (err < 0)
65547 +                                       goto free_meta;
65548 +                       }
65549 +                       f2fs_info(sbi, "write access unavailable, skipping recovery");
65550                         goto reset_checkpoint;
65551                 }
65553 diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
65554 index 054ec852b5ea..15ba36926fad 100644
65555 --- a/fs/f2fs/verity.c
65556 +++ b/fs/f2fs/verity.c
65557 @@ -152,40 +152,73 @@ static int f2fs_end_enable_verity(struct file *filp, const void *desc,
65558                                   size_t desc_size, u64 merkle_tree_size)
65560         struct inode *inode = file_inode(filp);
65561 +       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
65562         u64 desc_pos = f2fs_verity_metadata_pos(inode) + merkle_tree_size;
65563         struct fsverity_descriptor_location dloc = {
65564                 .version = cpu_to_le32(F2FS_VERIFY_VER),
65565                 .size = cpu_to_le32(desc_size),
65566                 .pos = cpu_to_le64(desc_pos),
65567         };
65568 -       int err = 0;
65569 +       int err = 0, err2 = 0;
65571 -       if (desc != NULL) {
65572 -               /* Succeeded; write the verity descriptor. */
65573 -               err = pagecache_write(inode, desc, desc_size, desc_pos);
65574 +       /*
65575 +        * If an error already occurred (which fs/verity/ signals by passing
65576 +        * desc == NULL), then only clean-up is needed.
65577 +        */
65578 +       if (desc == NULL)
65579 +               goto cleanup;
65581 -               /* Write all pages before clearing FI_VERITY_IN_PROGRESS. */
65582 -               if (!err)
65583 -                       err = filemap_write_and_wait(inode->i_mapping);
65584 -       }
65585 +       /* Append the verity descriptor. */
65586 +       err = pagecache_write(inode, desc, desc_size, desc_pos);
65587 +       if (err)
65588 +               goto cleanup;
65590 +       /*
65591 +        * Write all pages (both data and verity metadata).  Note that this must
65592 +        * happen before clearing FI_VERITY_IN_PROGRESS; otherwise pages beyond
65593 +        * i_size won't be written properly.  For crash consistency, this also
65594 +        * must happen before the verity inode flag gets persisted.
65595 +        */
65596 +       err = filemap_write_and_wait(inode->i_mapping);
65597 +       if (err)
65598 +               goto cleanup;
65600 +       /* Set the verity xattr. */
65601 +       err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_VERITY,
65602 +                           F2FS_XATTR_NAME_VERITY, &dloc, sizeof(dloc),
65603 +                           NULL, XATTR_CREATE);
65604 +       if (err)
65605 +               goto cleanup;
65607 -       /* If we failed, truncate anything we wrote past i_size. */
65608 -       if (desc == NULL || err)
65609 -               f2fs_truncate(inode);
65610 +       /* Finally, set the verity inode flag. */
65611 +       file_set_verity(inode);
65612 +       f2fs_set_inode_flags(inode);
65613 +       f2fs_mark_inode_dirty_sync(inode, true);
65615         clear_inode_flag(inode, FI_VERITY_IN_PROGRESS);
65616 +       return 0;
65618 -       if (desc != NULL && !err) {
65619 -               err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_VERITY,
65620 -                                   F2FS_XATTR_NAME_VERITY, &dloc, sizeof(dloc),
65621 -                                   NULL, XATTR_CREATE);
65622 -               if (!err) {
65623 -                       file_set_verity(inode);
65624 -                       f2fs_set_inode_flags(inode);
65625 -                       f2fs_mark_inode_dirty_sync(inode, true);
65626 -               }
65627 +cleanup:
65628 +       /*
65629 +        * Verity failed to be enabled, so clean up by truncating any verity
65630 +        * metadata that was written beyond i_size (both from cache and from
65631 +        * disk) and clearing FI_VERITY_IN_PROGRESS.
65632 +        *
65633 +        * Taking i_gc_rwsem[WRITE] is needed to stop f2fs garbage collection
65634 +        * from re-instantiating cached pages we are truncating (since unlike
65635 +        * normal file accesses, garbage collection isn't limited by i_size).
65636 +        */
65637 +       down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
65638 +       truncate_inode_pages(inode->i_mapping, inode->i_size);
65639 +       err2 = f2fs_truncate(inode);
65640 +       if (err2) {
65641 +               f2fs_err(sbi, "Truncating verity metadata failed (errno=%d)",
65642 +                        err2);
65643 +               set_sbi_flag(sbi, SBI_NEED_FSCK);
65644         }
65645 -       return err;
65646 +       up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
65647 +       clear_inode_flag(inode, FI_VERITY_IN_PROGRESS);
65648 +       return err ?: err2;
65651  static int f2fs_get_verity_descriptor(struct inode *inode, void *buf,
65652 diff --git a/fs/file.c b/fs/file.c
65653 index f633348029a5..b56c4dd78a19 100644
65654 --- a/fs/file.c
65655 +++ b/fs/file.c
65656 @@ -771,6 +771,7 @@ int __close_fd_get_file(unsigned int fd, struct file **res)
65657         *res = NULL;
65658         return -ENOENT;
65660 +EXPORT_SYMBOL(close_fd_get_file);
65662  /*
65663   * variant of close_fd that gets a ref on the file for later fput.
65664 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
65665 index 45082269e698..a37528b51798 100644
65666 --- a/fs/fuse/cuse.c
65667 +++ b/fs/fuse/cuse.c
65668 @@ -627,6 +627,8 @@ static int __init cuse_init(void)
65669         cuse_channel_fops.owner         = THIS_MODULE;
65670         cuse_channel_fops.open          = cuse_channel_open;
65671         cuse_channel_fops.release       = cuse_channel_release;
65672 +       /* CUSE is not prepared for FUSE_DEV_IOC_CLONE */
65673 +       cuse_channel_fops.unlocked_ioctl        = NULL;
65675         cuse_class = class_create(THIS_MODULE, "cuse");
65676         if (IS_ERR(cuse_class))
65677 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
65678 index c0fee830a34e..f784c118f00f 100644
65679 --- a/fs/fuse/dev.c
65680 +++ b/fs/fuse/dev.c
65681 @@ -784,7 +784,8 @@ static int fuse_check_page(struct page *page)
65682                1 << PG_lru |
65683                1 << PG_active |
65684                1 << PG_reclaim |
65685 -              1 << PG_waiters))) {
65686 +              1 << PG_waiters |
65687 +              LRU_GEN_MASK | LRU_USAGE_MASK))) {
65688                 dump_page(page, "fuse: trying to steal weird page");
65689                 return 1;
65690         }
65691 @@ -2233,11 +2234,8 @@ static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
65692         int oldfd;
65693         struct fuse_dev *fud = NULL;
65695 -       if (_IOC_TYPE(cmd) != FUSE_DEV_IOC_MAGIC)
65696 -               return -ENOTTY;
65698 -       switch (_IOC_NR(cmd)) {
65699 -       case _IOC_NR(FUSE_DEV_IOC_CLONE):
65700 +       switch (cmd) {
65701 +       case FUSE_DEV_IOC_CLONE:
65702                 res = -EFAULT;
65703                 if (!get_user(oldfd, (__u32 __user *)arg)) {
65704                         struct file *old = fget(oldfd);
65705 diff --git a/fs/fuse/file.c b/fs/fuse/file.c
65706 index 8cccecb55fb8..6e6d1e599869 100644
65707 --- a/fs/fuse/file.c
65708 +++ b/fs/fuse/file.c
65709 @@ -1099,6 +1099,7 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
65710         struct fuse_file *ff = file->private_data;
65711         struct fuse_mount *fm = ff->fm;
65712         unsigned int offset, i;
65713 +       bool short_write;
65714         int err;
65716         for (i = 0; i < ap->num_pages; i++)
65717 @@ -1113,32 +1114,38 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
65718         if (!err && ia->write.out.size > count)
65719                 err = -EIO;
65721 +       short_write = ia->write.out.size < count;
65722         offset = ap->descs[0].offset;
65723         count = ia->write.out.size;
65724         for (i = 0; i < ap->num_pages; i++) {
65725                 struct page *page = ap->pages[i];
65727 -               if (!err && !offset && count >= PAGE_SIZE)
65728 -                       SetPageUptodate(page);
65730 -               if (count > PAGE_SIZE - offset)
65731 -                       count -= PAGE_SIZE - offset;
65732 -               else
65733 -                       count = 0;
65734 -               offset = 0;
65736 -               unlock_page(page);
65737 +               if (err) {
65738 +                       ClearPageUptodate(page);
65739 +               } else {
65740 +                       if (count >= PAGE_SIZE - offset)
65741 +                               count -= PAGE_SIZE - offset;
65742 +                       else {
65743 +                               if (short_write)
65744 +                                       ClearPageUptodate(page);
65745 +                               count = 0;
65746 +                       }
65747 +                       offset = 0;
65748 +               }
65749 +               if (ia->write.page_locked && (i == ap->num_pages - 1))
65750 +                       unlock_page(page);
65751                 put_page(page);
65752         }
65754         return err;
65757 -static ssize_t fuse_fill_write_pages(struct fuse_args_pages *ap,
65758 +static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
65759                                      struct address_space *mapping,
65760                                      struct iov_iter *ii, loff_t pos,
65761                                      unsigned int max_pages)
65763 +       struct fuse_args_pages *ap = &ia->ap;
65764         struct fuse_conn *fc = get_fuse_conn(mapping->host);
65765         unsigned offset = pos & (PAGE_SIZE - 1);
65766         size_t count = 0;
65767 @@ -1191,6 +1198,16 @@ static ssize_t fuse_fill_write_pages(struct fuse_args_pages *ap,
65768                 if (offset == PAGE_SIZE)
65769                         offset = 0;
65771 +               /* If we copied full page, mark it uptodate */
65772 +               if (tmp == PAGE_SIZE)
65773 +                       SetPageUptodate(page);
65775 +               if (PageUptodate(page)) {
65776 +                       unlock_page(page);
65777 +               } else {
65778 +                       ia->write.page_locked = true;
65779 +                       break;
65780 +               }
65781                 if (!fc->big_writes)
65782                         break;
65783         } while (iov_iter_count(ii) && count < fc->max_write &&
65784 @@ -1234,7 +1251,7 @@ static ssize_t fuse_perform_write(struct kiocb *iocb,
65785                         break;
65786                 }
65788 -               count = fuse_fill_write_pages(ap, mapping, ii, pos, nr_pages);
65789 +               count = fuse_fill_write_pages(&ia, mapping, ii, pos, nr_pages);
65790                 if (count <= 0) {
65791                         err = count;
65792                 } else {
65793 @@ -1759,8 +1776,17 @@ static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
65794                 container_of(args, typeof(*wpa), ia.ap.args);
65795         struct inode *inode = wpa->inode;
65796         struct fuse_inode *fi = get_fuse_inode(inode);
65797 +       struct fuse_conn *fc = get_fuse_conn(inode);
65799         mapping_set_error(inode->i_mapping, error);
65800 +       /*
65801 +        * A writeback finished and this might have updated mtime/ctime on
65802 +        * server making local mtime/ctime stale.  Hence invalidate attrs.
65803 +        * Do this only if writeback_cache is not enabled.  If writeback_cache
65804 +        * is enabled, we trust local ctime/mtime.
65805 +        */
65806 +       if (!fc->writeback_cache)
65807 +               fuse_invalidate_attr(inode);
65808         spin_lock(&fi->lock);
65809         rb_erase(&wpa->writepages_entry, &fi->writepages);
65810         while (wpa->next) {
65811 diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
65812 index 63d97a15ffde..74d888c78fa4 100644
65813 --- a/fs/fuse/fuse_i.h
65814 +++ b/fs/fuse/fuse_i.h
65815 @@ -912,6 +912,7 @@ struct fuse_io_args {
65816                 struct {
65817                         struct fuse_write_in in;
65818                         struct fuse_write_out out;
65819 +                       bool page_locked;
65820                 } write;
65821         };
65822         struct fuse_args_pages ap;
65823 diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
65824 index 4ee6f734ba83..005209b1cd50 100644
65825 --- a/fs/fuse/virtio_fs.c
65826 +++ b/fs/fuse/virtio_fs.c
65827 @@ -896,6 +896,7 @@ static int virtio_fs_probe(struct virtio_device *vdev)
65828  out_vqs:
65829         vdev->config->reset(vdev);
65830         virtio_fs_cleanup_vqs(vdev, fs);
65831 +       kfree(fs->vqs);
65833  out:
65834         vdev->priv = NULL;
65835 @@ -1436,8 +1437,7 @@ static int virtio_fs_get_tree(struct fs_context *fsc)
65836         if (!fm)
65837                 goto out_err;
65839 -       fuse_conn_init(fc, fm, get_user_ns(current_user_ns()),
65840 -                      &virtio_fs_fiq_ops, fs);
65841 +       fuse_conn_init(fc, fm, fsc->user_ns, &virtio_fs_fiq_ops, fs);
65842         fc->release = fuse_free_conn;
65843         fc->delete_stale = true;
65844         fc->auto_submounts = true;
65845 diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
65846 index a930ddd15681..7054a542689f 100644
65847 --- a/fs/hfsplus/extents.c
65848 +++ b/fs/hfsplus/extents.c
65849 @@ -598,13 +598,15 @@ void hfsplus_file_truncate(struct inode *inode)
65850                 res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
65851                 if (res)
65852                         break;
65853 -               hfs_brec_remove(&fd);
65855 -               mutex_unlock(&fd.tree->tree_lock);
65856                 start = hip->cached_start;
65857 +               if (blk_cnt <= start)
65858 +                       hfs_brec_remove(&fd);
65859 +               mutex_unlock(&fd.tree->tree_lock);
65860                 hfsplus_free_extents(sb, hip->cached_extents,
65861                                      alloc_cnt - start, alloc_cnt - blk_cnt);
65862                 hfsplus_dump_extent(hip->cached_extents);
65863 +               mutex_lock(&fd.tree->tree_lock);
65864                 if (blk_cnt > start) {
65865                         hip->extent_state |= HFSPLUS_EXT_DIRTY;
65866                         break;
65867 @@ -612,7 +614,6 @@ void hfsplus_file_truncate(struct inode *inode)
65868                 alloc_cnt = start;
65869                 hip->cached_start = hip->cached_blocks = 0;
65870                 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
65871 -               mutex_lock(&fd.tree->tree_lock);
65872         }
65873         hfs_find_exit(&fd);
65875 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
65876 index 701c82c36138..c63d0a7f7ba4 100644
65877 --- a/fs/hugetlbfs/inode.c
65878 +++ b/fs/hugetlbfs/inode.c
65879 @@ -131,6 +131,7 @@ static void huge_pagevec_release(struct pagevec *pvec)
65880  static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
65882         struct inode *inode = file_inode(file);
65883 +       struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
65884         loff_t len, vma_len;
65885         int ret;
65886         struct hstate *h = hstate_file(file);
65887 @@ -146,6 +147,10 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
65888         vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
65889         vma->vm_ops = &hugetlb_vm_ops;
65891 +       ret = seal_check_future_write(info->seals, vma);
65892 +       if (ret)
65893 +               return ret;
65895         /*
65896          * page based offset in vm_pgoff could be sufficiently large to
65897          * overflow a loff_t when converted to byte offset.  This can
65898 @@ -527,7 +532,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
65899                          * the subpool and global reserve usage count can need
65900                          * to be adjusted.
65901                          */
65902 -                       VM_BUG_ON(PagePrivate(page));
65903 +                       VM_BUG_ON(HPageRestoreReserve(page));
65904                         remove_huge_page(page);
65905                         freed++;
65906                         if (!truncate_op) {
65907 diff --git a/fs/inode.c b/fs/inode.c
65908 index a047ab306f9a..c5e1dd13fd40 100644
65909 --- a/fs/inode.c
65910 +++ b/fs/inode.c
65911 @@ -139,6 +139,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
65912         inode->i_blkbits = sb->s_blocksize_bits;
65913         inode->i_flags = 0;
65914         atomic64_set(&inode->i_sequence, 0);
65915 +       atomic64_set(&inode->i_sequence2, 0);
65916         atomic_set(&inode->i_count, 1);
65917         inode->i_op = &empty_iops;
65918         inode->i_fop = &no_open_fops;
65919 diff --git a/fs/io_uring.c b/fs/io_uring.c
65920 index dff34975d86b..144056b0cac9 100644
65921 --- a/fs/io_uring.c
65922 +++ b/fs/io_uring.c
65923 @@ -238,7 +238,7 @@ struct fixed_rsrc_data {
65924  struct io_buffer {
65925         struct list_head list;
65926         __u64 addr;
65927 -       __s32 len;
65928 +       __u32 len;
65929         __u16 bid;
65930  };
65932 @@ -614,7 +614,7 @@ struct io_splice {
65933  struct io_provide_buf {
65934         struct file                     *file;
65935         __u64                           addr;
65936 -       __s32                           len;
65937 +       __u32                           len;
65938         __u32                           bgid;
65939         __u16                           nbufs;
65940         __u16                           bid;
65941 @@ -1008,7 +1008,7 @@ static void io_uring_del_task_file(unsigned long index);
65942  static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
65943                                          struct task_struct *task,
65944                                          struct files_struct *files);
65945 -static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx);
65946 +static void io_uring_cancel_sqpoll(struct io_sq_data *sqd);
65947  static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node);
65948  static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
65949                         struct io_ring_ctx *ctx);
65950 @@ -3979,7 +3979,7 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
65951  static int io_provide_buffers_prep(struct io_kiocb *req,
65952                                    const struct io_uring_sqe *sqe)
65954 -       unsigned long size;
65955 +       unsigned long size, tmp_check;
65956         struct io_provide_buf *p = &req->pbuf;
65957         u64 tmp;
65959 @@ -3993,6 +3993,12 @@ static int io_provide_buffers_prep(struct io_kiocb *req,
65960         p->addr = READ_ONCE(sqe->addr);
65961         p->len = READ_ONCE(sqe->len);
65963 +       if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
65964 +                               &size))
65965 +               return -EOVERFLOW;
65966 +       if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
65967 +               return -EOVERFLOW;
65969         size = (unsigned long)p->len * p->nbufs;
65970         if (!access_ok(u64_to_user_ptr(p->addr), size))
65971                 return -EFAULT;
65972 @@ -4017,7 +4023,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
65973                         break;
65975                 buf->addr = addr;
65976 -               buf->len = pbuf->len;
65977 +               buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
65978                 buf->bid = bid;
65979                 addr += pbuf->len;
65980                 bid++;
65981 @@ -6710,6 +6716,10 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
65982                 if (!list_empty(&ctx->iopoll_list))
65983                         io_do_iopoll(ctx, &nr_events, 0);
65985 +               /*
65986 +                * Don't submit if refs are dying, good for io_uring_register(),
65987 +                * but also it is relied upon by io_ring_exit_work()
65988 +                */
65989                 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
65990                     !(ctx->flags & IORING_SETUP_R_DISABLED))
65991                         ret = io_submit_sqes(ctx, to_submit);
65992 @@ -6832,15 +6842,14 @@ static int io_sq_thread(void *data)
65993                 timeout = jiffies + sqd->sq_thread_idle;
65994         }
65996 -       list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
65997 -               io_uring_cancel_sqpoll(ctx);
65998 +       io_uring_cancel_sqpoll(sqd);
65999         sqd->thread = NULL;
66000         list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
66001                 io_ring_set_wakeup_flag(ctx);
66002 -       mutex_unlock(&sqd->lock);
66004         io_run_task_work();
66005         io_run_task_work_head(&sqd->park_task_work);
66006 +       mutex_unlock(&sqd->lock);
66008         complete(&sqd->exited);
66009         do_exit(0);
66011 @@ -7200,8 +7209,6 @@ static void io_sq_thread_finish(struct io_ring_ctx *ctx)
66013                 io_put_sq_data(sqd);
66014                 ctx->sq_data = NULL;
66015 -               if (ctx->sq_creds)
66016 -                       put_cred(ctx->sq_creds);
66017         }
66020 @@ -8469,6 +8476,8 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
66021         mutex_unlock(&ctx->uring_lock);
66022         io_eventfd_unregister(ctx);
66023         io_destroy_buffers(ctx);
66024 +       if (ctx->sq_creds)
66025 +               put_cred(ctx->sq_creds);
66027  #if defined(CONFIG_UNIX)
66028         if (ctx->ring_sock) {
66029 @@ -8568,6 +8577,13 @@ static void io_tctx_exit_cb(struct callback_head *cb)
66030         complete(&work->completion);
66033 +static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
66035 +       struct io_kiocb *req = container_of(work, struct io_kiocb, work);
66037 +       return req->ctx == data;
66040  static void io_ring_exit_work(struct work_struct *work)
66042         struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
66043 @@ -8576,14 +8592,6 @@ static void io_ring_exit_work(struct work_struct *work)
66044         struct io_tctx_node *node;
66045         int ret;
66047 -       /* prevent SQPOLL from submitting new requests */
66048 -       if (ctx->sq_data) {
66049 -               io_sq_thread_park(ctx->sq_data);
66050 -               list_del_init(&ctx->sqd_list);
66051 -               io_sqd_update_thread_idle(ctx->sq_data);
66052 -               io_sq_thread_unpark(ctx->sq_data);
66053 -       }
66055         /*
66056          * If we're doing polled IO and end up having requests being
66057          * submitted async (out-of-line), then completions can come in while
66058 @@ -8592,6 +8600,17 @@ static void io_ring_exit_work(struct work_struct *work)
66059          */
66060         do {
66061                 io_uring_try_cancel_requests(ctx, NULL, NULL);
66062 +               if (ctx->sq_data) {
66063 +                       struct io_sq_data *sqd = ctx->sq_data;
66064 +                       struct task_struct *tsk;
66066 +                       io_sq_thread_park(sqd);
66067 +                       tsk = sqd->thread;
66068 +                       if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
66069 +                               io_wq_cancel_cb(tsk->io_uring->io_wq,
66070 +                                               io_cancel_ctx_cb, ctx, true);
66071 +                       io_sq_thread_unpark(sqd);
66072 +               }
66074                 WARN_ON_ONCE(time_after(jiffies, timeout));
66075         } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
66076 @@ -8736,13 +8755,6 @@ static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
66077         return true;
66080 -static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
66082 -       struct io_kiocb *req = container_of(work, struct io_kiocb, work);
66084 -       return req->ctx == data;
66087  static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
66089         struct io_tctx_node *node;
66090 @@ -8935,11 +8947,11 @@ static s64 tctx_inflight(struct io_uring_task *tctx)
66091  static void io_sqpoll_cancel_cb(struct callback_head *cb)
66093         struct io_tctx_exit *work = container_of(cb, struct io_tctx_exit, task_work);
66094 -       struct io_ring_ctx *ctx = work->ctx;
66095 -       struct io_sq_data *sqd = ctx->sq_data;
66096 +       struct io_sq_data *sqd = work->ctx->sq_data;
66098         if (sqd->thread)
66099 -               io_uring_cancel_sqpoll(ctx);
66100 +               io_uring_cancel_sqpoll(sqd);
66101 +       list_del_init(&work->ctx->sqd_list);
66102         complete(&work->completion);
66105 @@ -8950,7 +8962,6 @@ static void io_sqpoll_cancel_sync(struct io_ring_ctx *ctx)
66106         struct task_struct *task;
66108         io_sq_thread_park(sqd);
66109 -       list_del_init(&ctx->sqd_list);
66110         io_sqd_update_thread_idle(sqd);
66111         task = sqd->thread;
66112         if (task) {
66113 @@ -8958,6 +8969,8 @@ static void io_sqpoll_cancel_sync(struct io_ring_ctx *ctx)
66114                 init_task_work(&work.task_work, io_sqpoll_cancel_cb);
66115                 io_task_work_add_head(&sqd->park_task_work, &work.task_work);
66116                 wake_up_process(task);
66117 +       } else {
66118 +               list_del_init(&ctx->sqd_list);
66119         }
66120         io_sq_thread_unpark(sqd);
66122 @@ -8991,14 +9004,16 @@ void __io_uring_files_cancel(struct files_struct *files)
66125  /* should only be called by SQPOLL task */
66126 -static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
66127 +static void io_uring_cancel_sqpoll(struct io_sq_data *sqd)
66129 -       struct io_sq_data *sqd = ctx->sq_data;
66130         struct io_uring_task *tctx = current->io_uring;
66131 +       struct io_ring_ctx *ctx;
66132         s64 inflight;
66133         DEFINE_WAIT(wait);
66135 -       WARN_ON_ONCE(!sqd || ctx->sq_data->thread != current);
66136 +       if (!current->io_uring)
66137 +               return;
66138 +       WARN_ON_ONCE(!sqd || sqd->thread != current);
66140         atomic_inc(&tctx->in_idle);
66141         do {
66142 @@ -9006,7 +9021,8 @@ static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
66143                 inflight = tctx_inflight(tctx);
66144                 if (!inflight)
66145                         break;
66146 -               io_uring_try_cancel_requests(ctx, current, NULL);
66147 +               list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
66148 +                       io_uring_try_cancel_requests(ctx, current, NULL);
66150                 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
66151                 /*
66152 diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
66153 index 69f18fe20923..d47a0d96bf30 100644
66154 --- a/fs/jbd2/recovery.c
66155 +++ b/fs/jbd2/recovery.c
66156 @@ -245,15 +245,14 @@ static int fc_do_one_pass(journal_t *journal,
66157                 return 0;
66159         while (next_fc_block <= journal->j_fc_last) {
66160 -               jbd_debug(3, "Fast commit replay: next block %ld",
66161 +               jbd_debug(3, "Fast commit replay: next block %ld\n",
66162                           next_fc_block);
66163                 err = jread(&bh, journal, next_fc_block);
66164                 if (err) {
66165 -                       jbd_debug(3, "Fast commit replay: read error");
66166 +                       jbd_debug(3, "Fast commit replay: read error\n");
66167                         break;
66168                 }
66170 -               jbd_debug(3, "Processing fast commit blk with seq %d");
66171                 err = journal->j_fc_replay_callback(journal, bh, pass,
66172                                         next_fc_block - journal->j_fc_first,
66173                                         expected_commit_id);
66174 diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
66175 index 9396666b7314..e8fc45fd751f 100644
66176 --- a/fs/jbd2/transaction.c
66177 +++ b/fs/jbd2/transaction.c
66178 @@ -349,7 +349,12 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
66179         }
66181  alloc_transaction:
66182 -       if (!journal->j_running_transaction) {
66183 +       /*
66184 +        * This check is racy but it is just an optimization of allocating new
66185 +        * transaction early if there are high chances we'll need it. If we
66186 +        * guess wrong, we'll retry or free unused transaction.
66187 +        */
66188 +       if (!data_race(journal->j_running_transaction)) {
66189                 /*
66190                  * If __GFP_FS is not present, then we may be being called from
66191                  * inside the fs writeback layer, so we MUST NOT fail.
66192 @@ -1474,8 +1479,8 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
66193          * crucial to catch bugs so let's do a reliable check until the
66194          * lockless handling is fully proven.
66195          */
66196 -       if (jh->b_transaction != transaction &&
66197 -           jh->b_next_transaction != transaction) {
66198 +       if (data_race(jh->b_transaction != transaction &&
66199 +           jh->b_next_transaction != transaction)) {
66200                 spin_lock(&jh->b_state_lock);
66201                 J_ASSERT_JH(jh, jh->b_transaction == transaction ||
66202                                 jh->b_next_transaction == transaction);
66203 @@ -1483,8 +1488,8 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
66204         }
66205         if (jh->b_modified == 1) {
66206                 /* If it's in our transaction it must be in BJ_Metadata list. */
66207 -               if (jh->b_transaction == transaction &&
66208 -                   jh->b_jlist != BJ_Metadata) {
66209 +               if (data_race(jh->b_transaction == transaction &&
66210 +                   jh->b_jlist != BJ_Metadata)) {
66211                         spin_lock(&jh->b_state_lock);
66212                         if (jh->b_transaction == transaction &&
66213                             jh->b_jlist != BJ_Metadata)
66214 diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
66215 index f8fb89b10227..4fc8cd698d1a 100644
66216 --- a/fs/jffs2/file.c
66217 +++ b/fs/jffs2/file.c
66218 @@ -57,6 +57,7 @@ const struct file_operations jffs2_file_operations =
66219         .mmap =         generic_file_readonly_mmap,
66220         .fsync =        jffs2_fsync,
66221         .splice_read =  generic_file_splice_read,
66222 +       .splice_write = iter_file_splice_write,
66223  };
66225  /* jffs2_file_inode_operations */
66226 diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
66227 index db72a9d2d0af..b676056826be 100644
66228 --- a/fs/jffs2/scan.c
66229 +++ b/fs/jffs2/scan.c
66230 @@ -1079,7 +1079,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
66231         memcpy(&fd->name, rd->name, checkedlen);
66232         fd->name[checkedlen] = 0;
66234 -       crc = crc32(0, fd->name, rd->nsize);
66235 +       crc = crc32(0, fd->name, checkedlen);
66236         if (crc != je32_to_cpu(rd->name_crc)) {
66237                 pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
66238                           __func__, ofs, je32_to_cpu(rd->name_crc), crc);
66239 diff --git a/fs/namespace.c b/fs/namespace.c
66240 index 56bb5a5fdc0d..4d2e827ddb59 100644
66241 --- a/fs/namespace.c
66242 +++ b/fs/namespace.c
66243 @@ -3853,8 +3853,12 @@ static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
66244         if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP))
66245                 return -EINVAL;
66247 +       /* Don't yet support filesystem mountable in user namespaces. */
66248 +       if (m->mnt_sb->s_user_ns != &init_user_ns)
66249 +               return -EINVAL;
66251         /* We're not controlling the superblock. */
66252 -       if (!ns_capable(m->mnt_sb->s_user_ns, CAP_SYS_ADMIN))
66253 +       if (!capable(CAP_SYS_ADMIN))
66254                 return -EPERM;
66256         /* Mount has already been visible in the filesystem hierarchy. */
66257 diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
66258 index f7786e00a6a7..ed9d580826f5 100644
66259 --- a/fs/nfs/callback_proc.c
66260 +++ b/fs/nfs/callback_proc.c
66261 @@ -137,12 +137,12 @@ static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
66262                 list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
66263                         if (!pnfs_layout_is_valid(lo))
66264                                 continue;
66265 -                       if (stateid != NULL &&
66266 -                           !nfs4_stateid_match_other(stateid, &lo->plh_stateid))
66267 +                       if (!nfs4_stateid_match_other(stateid, &lo->plh_stateid))
66268                                 continue;
66269 -                       if (!nfs_sb_active(server->super))
66270 -                               continue;
66271 -                       inode = igrab(lo->plh_inode);
66272 +                       if (nfs_sb_active(server->super))
66273 +                               inode = igrab(lo->plh_inode);
66274 +                       else
66275 +                               inode = ERR_PTR(-EAGAIN);
66276                         rcu_read_unlock();
66277                         if (inode)
66278                                 return inode;
66279 @@ -176,9 +176,10 @@ static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
66280                                 continue;
66281                         if (nfsi->layout != lo)
66282                                 continue;
66283 -                       if (!nfs_sb_active(server->super))
66284 -                               continue;
66285 -                       inode = igrab(lo->plh_inode);
66286 +                       if (nfs_sb_active(server->super))
66287 +                               inode = igrab(lo->plh_inode);
66288 +                       else
66289 +                               inode = ERR_PTR(-EAGAIN);
66290                         rcu_read_unlock();
66291                         if (inode)
66292                                 return inode;
66293 diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
66294 index fc4f490f2d78..0cd7c59a6601 100644
66295 --- a/fs/nfs/dir.c
66296 +++ b/fs/nfs/dir.c
66297 @@ -866,6 +866,8 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc,
66298                         break;
66299                 }
66301 +               verf_arg = verf_res;
66303                 status = nfs_readdir_page_filler(desc, entry, pages, pglen,
66304                                                  arrays, narrays);
66305         } while (!status && nfs_readdir_page_needs_filling(page));
66306 @@ -927,7 +929,12 @@ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc)
66307                         }
66308                         return res;
66309                 }
66310 -               memcpy(nfsi->cookieverf, verf, sizeof(nfsi->cookieverf));
66311 +               /*
66312 +                * Set the cookie verifier if the page cache was empty
66313 +                */
66314 +               if (desc->page_index == 0)
66315 +                       memcpy(nfsi->cookieverf, verf,
66316 +                              sizeof(nfsi->cookieverf));
66317         }
66318         res = nfs_readdir_search_array(desc);
66319         if (res == 0) {
66320 @@ -974,10 +981,10 @@ static int readdir_search_pagecache(struct nfs_readdir_descriptor *desc)
66321  /*
66322   * Once we've found the start of the dirent within a page: fill 'er up...
66323   */
66324 -static void nfs_do_filldir(struct nfs_readdir_descriptor *desc)
66325 +static void nfs_do_filldir(struct nfs_readdir_descriptor *desc,
66326 +                          const __be32 *verf)
66328         struct file     *file = desc->file;
66329 -       struct nfs_inode *nfsi = NFS_I(file_inode(file));
66330         struct nfs_cache_array *array;
66331         unsigned int i = 0;
66333 @@ -991,7 +998,7 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc)
66334                         desc->eof = true;
66335                         break;
66336                 }
66337 -               memcpy(desc->verf, nfsi->cookieverf, sizeof(desc->verf));
66338 +               memcpy(desc->verf, verf, sizeof(desc->verf));
66339                 if (i < (array->size-1))
66340                         desc->dir_cookie = array->array[i+1].cookie;
66341                 else
66342 @@ -1048,7 +1055,7 @@ static int uncached_readdir(struct nfs_readdir_descriptor *desc)
66344         for (i = 0; !desc->eof && i < sz && arrays[i]; i++) {
66345                 desc->page = arrays[i];
66346 -               nfs_do_filldir(desc);
66347 +               nfs_do_filldir(desc, verf);
66348         }
66349         desc->page = NULL;
66351 @@ -1069,6 +1076,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
66353         struct dentry   *dentry = file_dentry(file);
66354         struct inode    *inode = d_inode(dentry);
66355 +       struct nfs_inode *nfsi = NFS_I(inode);
66356         struct nfs_open_dir_context *dir_ctx = file->private_data;
66357         struct nfs_readdir_descriptor *desc;
66358         int res;
66359 @@ -1122,7 +1130,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
66360                         break;
66361                 }
66362                 if (res == -ETOOSMALL && desc->plus) {
66363 -                       clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
66364 +                       clear_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags);
66365                         nfs_zap_caches(inode);
66366                         desc->page_index = 0;
66367                         desc->plus = false;
66368 @@ -1132,7 +1140,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
66369                 if (res < 0)
66370                         break;
66372 -               nfs_do_filldir(desc);
66373 +               nfs_do_filldir(desc, nfsi->cookieverf);
66374                 nfs_readdir_page_unlock_and_put_cached(desc);
66375         } while (!desc->eof);
66377 diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
66378 index 872112bffcab..d383de00d486 100644
66379 --- a/fs/nfs/flexfilelayout/flexfilelayout.c
66380 +++ b/fs/nfs/flexfilelayout/flexfilelayout.c
66381 @@ -106,7 +106,7 @@ static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
66382         if (unlikely(!p))
66383                 return -ENOBUFS;
66384         fh->size = be32_to_cpup(p++);
66385 -       if (fh->size > sizeof(struct nfs_fh)) {
66386 +       if (fh->size > NFS_MAXFHSIZE) {
66387                 printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
66388                        fh->size);
66389                 return -EOVERFLOW;
66390 diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
66391 index 971a9251c1d9..902db1262d2b 100644
66392 --- a/fs/nfs/fs_context.c
66393 +++ b/fs/nfs/fs_context.c
66394 @@ -973,6 +973,15 @@ static int nfs23_parse_monolithic(struct fs_context *fc,
66395                         memset(mntfh->data + mntfh->size, 0,
66396                                sizeof(mntfh->data) - mntfh->size);
66398 +               /*
66399 +                * for proto == XPRT_TRANSPORT_UDP, which is what uses
66400 +                * to_exponential, implying shift: limit the shift value
66401 +                * to BITS_PER_LONG (majortimeo is unsigned long)
66402 +                */
66403 +               if (!(data->flags & NFS_MOUNT_TCP)) /* this will be UDP */
66404 +                       if (data->retrans >= 64) /* shift value is too large */
66405 +                               goto out_invalid_data;
66407                 /*
66408                  * Translate to nfs_fs_context, which nfs_fill_super
66409                  * can deal with.
66410 @@ -1073,6 +1082,9 @@ static int nfs23_parse_monolithic(struct fs_context *fc,
66412  out_invalid_fh:
66413         return nfs_invalf(fc, "NFS: invalid root filehandle");
66415 +out_invalid_data:
66416 +       return nfs_invalf(fc, "NFS: invalid binary mount data");
66419  #if IS_ENABLED(CONFIG_NFS_V4)
66420 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
66421 index a7fb076a5f44..ae8bc84e39fb 100644
66422 --- a/fs/nfs/inode.c
66423 +++ b/fs/nfs/inode.c
66424 @@ -219,15 +219,16 @@ void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
66425                                 | NFS_INO_INVALID_SIZE
66426                                 | NFS_INO_REVAL_PAGECACHE
66427                                 | NFS_INO_INVALID_XATTR);
66428 -       }
66429 +       } else if (flags & NFS_INO_REVAL_PAGECACHE)
66430 +               flags |= NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE;
66432         if (!nfs_has_xattr_cache(nfsi))
66433                 flags &= ~NFS_INO_INVALID_XATTR;
66434 +       if (flags & NFS_INO_INVALID_DATA)
66435 +               nfs_fscache_invalidate(inode);
66436         if (inode->i_mapping->nrpages == 0)
66437                 flags &= ~(NFS_INO_INVALID_DATA|NFS_INO_DATA_INVAL_DEFER);
66438         nfsi->cache_validity |= flags;
66439 -       if (flags & NFS_INO_INVALID_DATA)
66440 -               nfs_fscache_invalidate(inode);
66442  EXPORT_SYMBOL_GPL(nfs_set_cache_invalid);
66444 @@ -1662,10 +1663,10 @@ EXPORT_SYMBOL_GPL(_nfs_display_fhandle);
66445   */
66446  static int nfs_inode_attrs_need_update(const struct inode *inode, const struct nfs_fattr *fattr)
66448 -       const struct nfs_inode *nfsi = NFS_I(inode);
66449 +       unsigned long attr_gencount = NFS_I(inode)->attr_gencount;
66451 -       return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 ||
66452 -               ((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0);
66453 +       return (long)(fattr->gencount - attr_gencount) > 0 ||
66454 +              (long)(attr_gencount - nfs_read_attr_generation_counter()) > 0;
66457  static int nfs_refresh_inode_locked(struct inode *inode, struct nfs_fattr *fattr)
66458 @@ -2094,7 +2095,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
66459                         nfsi->attrtimeo_timestamp = now;
66460                 }
66461                 /* Set the barrier to be more recent than this fattr */
66462 -               if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0)
66463 +               if ((long)(fattr->gencount - nfsi->attr_gencount) > 0)
66464                         nfsi->attr_gencount = fattr->gencount;
66465         }
66467 diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
66468 index 094024b0aca1..3875120ef3ef 100644
66469 --- a/fs/nfs/nfs42proc.c
66470 +++ b/fs/nfs/nfs42proc.c
66471 @@ -46,11 +46,12 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
66473         struct inode *inode = file_inode(filep);
66474         struct nfs_server *server = NFS_SERVER(inode);
66475 +       u32 bitmask[3];
66476         struct nfs42_falloc_args args = {
66477                 .falloc_fh      = NFS_FH(inode),
66478                 .falloc_offset  = offset,
66479                 .falloc_length  = len,
66480 -               .falloc_bitmask = nfs4_fattr_bitmap,
66481 +               .falloc_bitmask = bitmask,
66482         };
66483         struct nfs42_falloc_res res = {
66484                 .falloc_server  = server,
66485 @@ -68,6 +69,10 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
66486                 return status;
66487         }
66489 +       memcpy(bitmask, server->cache_consistency_bitmask, sizeof(bitmask));
66490 +       if (server->attr_bitmask[1] & FATTR4_WORD1_SPACE_USED)
66491 +               bitmask[1] |= FATTR4_WORD1_SPACE_USED;
66493         res.falloc_fattr = nfs_alloc_fattr();
66494         if (!res.falloc_fattr)
66495                 return -ENOMEM;
66496 @@ -75,7 +80,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
66497         status = nfs4_call_sync(server->client, server, msg,
66498                                 &args.seq_args, &res.seq_res, 0);
66499         if (status == 0)
66500 -               status = nfs_post_op_update_inode(inode, res.falloc_fattr);
66501 +               status = nfs_post_op_update_inode_force_wcc(inode,
66502 +                                                           res.falloc_fattr);
66504         kfree(res.falloc_fattr);
66505         return status;
66506 @@ -84,7 +90,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
66507  static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
66508                                 loff_t offset, loff_t len)
66510 -       struct nfs_server *server = NFS_SERVER(file_inode(filep));
66511 +       struct inode *inode = file_inode(filep);
66512 +       struct nfs_server *server = NFS_SERVER(inode);
66513         struct nfs4_exception exception = { };
66514         struct nfs_lock_context *lock;
66515         int err;
66516 @@ -93,9 +100,13 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
66517         if (IS_ERR(lock))
66518                 return PTR_ERR(lock);
66520 -       exception.inode = file_inode(filep);
66521 +       exception.inode = inode;
66522         exception.state = lock->open_context->state;
66524 +       err = nfs_sync_inode(inode);
66525 +       if (err)
66526 +               goto out;
66528         do {
66529                 err = _nfs42_proc_fallocate(msg, filep, lock, offset, len);
66530                 if (err == -ENOTSUPP) {
66531 @@ -104,7 +115,7 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
66532                 }
66533                 err = nfs4_handle_exception(server, err, &exception);
66534         } while (exception.retry);
66536 +out:
66537         nfs_put_lock_context(lock);
66538         return err;
66540 @@ -142,16 +153,13 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
66541                 return -EOPNOTSUPP;
66543         inode_lock(inode);
66544 -       err = nfs_sync_inode(inode);
66545 -       if (err)
66546 -               goto out_unlock;
66548         err = nfs42_proc_fallocate(&msg, filep, offset, len);
66549         if (err == 0)
66550                 truncate_pagecache_range(inode, offset, (offset + len) -1);
66551         if (err == -EOPNOTSUPP)
66552                 NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE;
66553 -out_unlock:
66555         inode_unlock(inode);
66556         return err;
66558 @@ -261,6 +269,33 @@ static int process_copy_commit(struct file *dst, loff_t pos_dst,
66559         return status;
66562 +/**
66563 + * nfs42_copy_dest_done - perform inode cache updates after clone/copy offload
66564 + * @inode: pointer to destination inode
66565 + * @pos: destination offset
66566 + * @len: copy length
66567 + *
66568 + * Punch a hole in the inode page cache, so that the NFS client will
66569 + * know to retrieve new data.
66570 + * Update the file size if necessary, and then mark the inode as having
66571 + * invalid cached values for change attribute, ctime, mtime and space used.
66572 + */
66573 +static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len)
66575 +       loff_t newsize = pos + len;
66576 +       loff_t end = newsize - 1;
66578 +       truncate_pagecache_range(inode, pos, end);
66579 +       spin_lock(&inode->i_lock);
66580 +       if (newsize > i_size_read(inode))
66581 +               i_size_write(inode, newsize);
66582 +       nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
66583 +                                            NFS_INO_INVALID_CTIME |
66584 +                                            NFS_INO_INVALID_MTIME |
66585 +                                            NFS_INO_INVALID_BLOCKS);
66586 +       spin_unlock(&inode->i_lock);
66589  static ssize_t _nfs42_proc_copy(struct file *src,
66590                                 struct nfs_lock_context *src_lock,
66591                                 struct file *dst,
66592 @@ -354,14 +389,8 @@ static ssize_t _nfs42_proc_copy(struct file *src,
66593                         goto out;
66594         }
66596 -       truncate_pagecache_range(dst_inode, pos_dst,
66597 -                                pos_dst + res->write_res.count);
66598 -       spin_lock(&dst_inode->i_lock);
66599 -       nfs_set_cache_invalid(
66600 -               dst_inode, NFS_INO_REVAL_PAGECACHE | NFS_INO_REVAL_FORCED |
66601 -                                  NFS_INO_INVALID_SIZE | NFS_INO_INVALID_ATTR |
66602 -                                  NFS_INO_INVALID_DATA);
66603 -       spin_unlock(&dst_inode->i_lock);
66604 +       nfs42_copy_dest_done(dst_inode, pos_dst, res->write_res.count);
66606         spin_lock(&src_inode->i_lock);
66607         nfs_set_cache_invalid(src_inode, NFS_INO_REVAL_PAGECACHE |
66608                                                  NFS_INO_REVAL_FORCED |
66609 @@ -659,7 +688,10 @@ static loff_t _nfs42_proc_llseek(struct file *filep,
66610         if (status)
66611                 return status;
66613 -       return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
66614 +       if (whence == SEEK_DATA && res.sr_eof)
66615 +               return -NFS4ERR_NXIO;
66616 +       else
66617 +               return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
66620  loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
66621 @@ -1044,8 +1076,10 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
66623         status = nfs4_call_sync(server->client, server, msg,
66624                                 &args.seq_args, &res.seq_res, 0);
66625 -       if (status == 0)
66626 +       if (status == 0) {
66627 +               nfs42_copy_dest_done(dst_inode, dst_offset, count);
66628                 status = nfs_post_op_update_inode(dst_inode, res.dst_fattr);
66629 +       }
66631         kfree(res.dst_fattr);
66632         return status;
66633 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
66634 index c65c4b41e2c1..820abae88cf0 100644
66635 --- a/fs/nfs/nfs4proc.c
66636 +++ b/fs/nfs/nfs4proc.c
66637 @@ -108,9 +108,10 @@ static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
66638  static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
66639                 const struct cred *, bool);
66640  #endif
66641 -static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode,
66642 -               struct nfs_server *server,
66643 -               struct nfs4_label *label);
66644 +static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ],
66645 +                            const __u32 *src, struct inode *inode,
66646 +                            struct nfs_server *server,
66647 +                            struct nfs4_label *label);
66649  #ifdef CONFIG_NFS_V4_SECURITY_LABEL
66650  static inline struct nfs4_label *
66651 @@ -3591,6 +3592,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
66652         struct nfs4_closedata *calldata = data;
66653         struct nfs4_state *state = calldata->state;
66654         struct inode *inode = calldata->inode;
66655 +       struct nfs_server *server = NFS_SERVER(inode);
66656         struct pnfs_layout_hdr *lo;
66657         bool is_rdonly, is_wronly, is_rdwr;
66658         int call_close = 0;
66659 @@ -3647,8 +3649,10 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
66660         if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) {
66661                 /* Close-to-open cache consistency revalidation */
66662                 if (!nfs4_have_delegation(inode, FMODE_READ)) {
66663 -                       calldata->arg.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
66664 -                       nfs4_bitmask_adjust(calldata->arg.bitmask, inode, NFS_SERVER(inode), NULL);
66665 +                       nfs4_bitmask_set(calldata->arg.bitmask_store,
66666 +                                        server->cache_consistency_bitmask,
66667 +                                        inode, server, NULL);
66668 +                       calldata->arg.bitmask = calldata->arg.bitmask_store;
66669                 } else
66670                         calldata->arg.bitmask = NULL;
66671         }
66672 @@ -5416,19 +5420,17 @@ bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
66673         return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
66676 -static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode,
66677 -                               struct nfs_server *server,
66678 -                               struct nfs4_label *label)
66679 +static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ], const __u32 *src,
66680 +                            struct inode *inode, struct nfs_server *server,
66681 +                            struct nfs4_label *label)
66684         unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
66685 +       unsigned int i;
66687 -       if ((cache_validity & NFS_INO_INVALID_DATA) ||
66688 -               (cache_validity & NFS_INO_REVAL_PAGECACHE) ||
66689 -               (cache_validity & NFS_INO_REVAL_FORCED) ||
66690 -               (cache_validity & NFS_INO_INVALID_OTHER))
66691 -               nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, label), inode);
66692 +       memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ);
66694 +       if (cache_validity & (NFS_INO_INVALID_CHANGE | NFS_INO_REVAL_PAGECACHE))
66695 +               bitmask[0] |= FATTR4_WORD0_CHANGE;
66696         if (cache_validity & NFS_INO_INVALID_ATIME)
66697                 bitmask[1] |= FATTR4_WORD1_TIME_ACCESS;
66698         if (cache_validity & NFS_INO_INVALID_OTHER)
66699 @@ -5437,16 +5439,22 @@ static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode,
66700                                 FATTR4_WORD1_NUMLINKS;
66701         if (label && label->len && cache_validity & NFS_INO_INVALID_LABEL)
66702                 bitmask[2] |= FATTR4_WORD2_SECURITY_LABEL;
66703 -       if (cache_validity & NFS_INO_INVALID_CHANGE)
66704 -               bitmask[0] |= FATTR4_WORD0_CHANGE;
66705         if (cache_validity & NFS_INO_INVALID_CTIME)
66706                 bitmask[1] |= FATTR4_WORD1_TIME_METADATA;
66707         if (cache_validity & NFS_INO_INVALID_MTIME)
66708                 bitmask[1] |= FATTR4_WORD1_TIME_MODIFY;
66709 -       if (cache_validity & NFS_INO_INVALID_SIZE)
66710 -               bitmask[0] |= FATTR4_WORD0_SIZE;
66711         if (cache_validity & NFS_INO_INVALID_BLOCKS)
66712                 bitmask[1] |= FATTR4_WORD1_SPACE_USED;
66714 +       if (nfs4_have_delegation(inode, FMODE_READ) &&
66715 +           !(cache_validity & NFS_INO_REVAL_FORCED))
66716 +               bitmask[0] &= ~FATTR4_WORD0_SIZE;
66717 +       else if (cache_validity &
66718 +                (NFS_INO_INVALID_SIZE | NFS_INO_REVAL_PAGECACHE))
66719 +               bitmask[0] |= FATTR4_WORD0_SIZE;
66721 +       for (i = 0; i < NFS4_BITMASK_SZ; i++)
66722 +               bitmask[i] &= server->attr_bitmask[i];
66725  static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
66726 @@ -5459,8 +5467,10 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
66727                 hdr->args.bitmask = NULL;
66728                 hdr->res.fattr = NULL;
66729         } else {
66730 -               hdr->args.bitmask = server->cache_consistency_bitmask;
66731 -               nfs4_bitmask_adjust(hdr->args.bitmask, hdr->inode, server, NULL);
66732 +               nfs4_bitmask_set(hdr->args.bitmask_store,
66733 +                                server->cache_consistency_bitmask,
66734 +                                hdr->inode, server, NULL);
66735 +               hdr->args.bitmask = hdr->args.bitmask_store;
66736         }
66738         if (!hdr->pgio_done_cb)
66739 @@ -6502,8 +6512,10 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
66741         data->args.fhandle = &data->fh;
66742         data->args.stateid = &data->stateid;
66743 -       data->args.bitmask = server->cache_consistency_bitmask;
66744 -       nfs4_bitmask_adjust(data->args.bitmask, inode, server, NULL);
66745 +       nfs4_bitmask_set(data->args.bitmask_store,
66746 +                        server->cache_consistency_bitmask, inode, server,
66747 +                        NULL);
66748 +       data->args.bitmask = data->args.bitmask_store;
66749         nfs_copy_fh(&data->fh, NFS_FH(inode));
66750         nfs4_stateid_copy(&data->stateid, stateid);
66751         data->res.fattr = &data->fattr;
66752 diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
66753 index 102b66e0bdef..f726f8b12b7e 100644
66754 --- a/fs/nfs/pnfs.c
66755 +++ b/fs/nfs/pnfs.c
66756 @@ -1344,7 +1344,7 @@ _pnfs_return_layout(struct inode *ino)
66757         }
66758         valid_layout = pnfs_layout_is_valid(lo);
66759         pnfs_clear_layoutcommit(ino, &tmp_list);
66760 -       pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0);
66761 +       pnfs_mark_matching_lsegs_return(lo, &tmp_list, NULL, 0);
66763         if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
66764                 struct pnfs_layout_range range = {
66765 @@ -2468,6 +2468,9 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
66767         assert_spin_locked(&lo->plh_inode->i_lock);
66769 +       if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
66770 +               tmp_list = &lo->plh_return_segs;
66772         list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
66773                 if (pnfs_match_lseg_recall(lseg, return_range, seq)) {
66774                         dprintk("%s: marking lseg %p iomode %d "
66775 @@ -2475,6 +2478,8 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
66776                                 lseg, lseg->pls_range.iomode,
66777                                 lseg->pls_range.offset,
66778                                 lseg->pls_range.length);
66779 +                       if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
66780 +                               tmp_list = &lo->plh_return_segs;
66781                         if (mark_lseg_invalid(lseg, tmp_list))
66782                                 continue;
66783                         remaining++;
66784 diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
66785 index dd9f38d072dd..e13c4c81fb89 100644
66786 --- a/fs/nfsd/nfs4proc.c
66787 +++ b/fs/nfsd/nfs4proc.c
66788 @@ -1538,8 +1538,8 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
66789                 if (!nfs4_init_copy_state(nn, copy))
66790                         goto out_err;
66791                 refcount_set(&async_copy->refcount, 1);
66792 -               memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid,
66793 -                       sizeof(copy->cp_stateid));
66794 +               memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid.stid,
66795 +                       sizeof(copy->cp_res.cb_stateid));
66796                 dup_copy_fields(copy, async_copy);
66797                 async_copy->copy_task = kthread_create(nfsd4_do_async_copy,
66798                                 async_copy, "%s", "copy thread");
66799 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
66800 index 97447a64bad0..886e50ed07c2 100644
66801 --- a/fs/nfsd/nfs4state.c
66802 +++ b/fs/nfsd/nfs4state.c
66803 @@ -4869,6 +4869,11 @@ static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
66804         if (nf)
66805                 nfsd_file_put(nf);
66807 +       status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode,
66808 +                                                               access));
66809 +       if (status)
66810 +               goto out_put_access;
66812         status = nfsd4_truncate(rqstp, cur_fh, open);
66813         if (status)
66814                 goto out_put_access;
66815 @@ -6849,11 +6854,20 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
66816  static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
66818         struct nfsd_file *nf;
66819 -       __be32 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
66820 -       if (!err) {
66821 -               err = nfserrno(vfs_test_lock(nf->nf_file, lock));
66822 -               nfsd_file_put(nf);
66823 -       }
66824 +       __be32 err;
66826 +       err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
66827 +       if (err)
66828 +               return err;
66829 +       fh_lock(fhp); /* to block new leases till after test_lock: */
66830 +       err = nfserrno(nfsd_open_break_lease(fhp->fh_dentry->d_inode,
66831 +                                                       NFSD_MAY_READ));
66832 +       if (err)
66833 +               goto out;
66834 +       err = nfserrno(vfs_test_lock(nf->nf_file, lock));
66835 +out:
66836 +       fh_unlock(fhp);
66837 +       nfsd_file_put(nf);
66838         return err;
66841 diff --git a/fs/ntfs3/Kconfig b/fs/ntfs3/Kconfig
66842 new file mode 100644
66843 index 000000000000..6e4cbc48ab8e
66844 --- /dev/null
66845 +++ b/fs/ntfs3/Kconfig
66846 @@ -0,0 +1,46 @@
66847 +# SPDX-License-Identifier: GPL-2.0-only
66848 +config NTFS3_FS
66849 +       tristate "NTFS Read-Write file system support"
66850 +       select NLS
66851 +       help
66852 +         Windows OS native file system (NTFS) support up to NTFS version 3.1.
66854 +         Y or M enables the NTFS3 driver with full features enabled (read,
66855 +         write, journal replaying, sparse/compressed files support).
66856 +         File system type to use on mount is "ntfs3". Module name (M option)
66857 +         is also "ntfs3".
66859 +         Documentation: <file:Documentation/filesystems/ntfs3.rst>
66861 +config NTFS3_64BIT_CLUSTER
66862 +       bool "64 bits per NTFS clusters"
66863 +       depends on NTFS3_FS && 64BIT
66864 +       help
66865 +         Windows implementation of ntfs.sys uses 32 bits per clusters.
66866 +         If activated 64 bits per clusters you will be able to use 4k cluster
66867 +         for 16T+ volumes. Windows will not be able to mount such volumes.
66869 +         It is recommended to say N here.
66871 +config NTFS3_LZX_XPRESS
66872 +       bool "activate support of external compressions lzx/xpress"
66873 +       depends on NTFS3_FS
66874 +       help
66875 +         In Windows 10 one can use command "compact" to compress any files.
66876 +         4 possible variants of compression are: xpress4k, xpress8k, xpress16k and lzx.
66877 +         If activated you will be able to read such files correctly.
66879 +         It is recommended to say Y here.
66881 +config NTFS3_FS_POSIX_ACL
66882 +       bool "NTFS POSIX Access Control Lists"
66883 +       depends on NTFS3_FS
66884 +       select FS_POSIX_ACL
66885 +       help
66886 +         POSIX Access Control Lists (ACLs) support additional access rights
66887 +         for users and groups beyond the standard owner/group/world scheme,
66888 +         and this option selects support for ACLs specifically for ntfs
66889 +         filesystems.
66890 +         NOTE: this is linux only feature. Windows will ignore these ACLs.
66892 +         If you don't know what Access Control Lists are, say N.
66893 diff --git a/fs/ntfs3/Makefile b/fs/ntfs3/Makefile
66894 new file mode 100644
66895 index 000000000000..5adc54ebac5a
66896 --- /dev/null
66897 +++ b/fs/ntfs3/Makefile
66898 @@ -0,0 +1,38 @@
66899 +# SPDX-License-Identifier: GPL-2.0
66901 +# Makefile for the ntfs3 filesystem support.
66904 +# to check robot warnings
66905 +ccflags-y += -Wint-to-pointer-cast
66906 +condflags := \
66907 +       $(call cc-option, -Wunused-but-set-variable) \
66908 +       $(call cc-option, -Wold-style-declaration)
66909 +ccflags-y += $(condflags)
66911 +obj-$(CONFIG_NTFS3_FS) += ntfs3.o
66913 +ntfs3-y :=     attrib.o \
66914 +               attrlist.o \
66915 +               bitfunc.o \
66916 +               bitmap.o \
66917 +               dir.o \
66918 +               fsntfs.o \
66919 +               frecord.o \
66920 +               file.o \
66921 +               fslog.o \
66922 +               inode.o \
66923 +               index.o \
66924 +               lznt.o \
66925 +               namei.o \
66926 +               record.o \
66927 +               run.o \
66928 +               super.o \
66929 +               upcase.o \
66930 +               xattr.o
66932 +ntfs3-$(CONFIG_NTFS3_LZX_XPRESS) += $(addprefix lib/,\
66933 +               decompress_common.o \
66934 +               lzx_decompress.o \
66935 +               xpress_decompress.o \
66936 +               )
66937 diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
66938 new file mode 100644
66939 index 000000000000..bca85e7b6eaf
66940 --- /dev/null
66941 +++ b/fs/ntfs3/attrib.c
66942 @@ -0,0 +1,2082 @@
66943 +// SPDX-License-Identifier: GPL-2.0
66945 + *
66946 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
66947 + *
66948 + * TODO: merge attr_set_size/attr_data_get_block/attr_allocate_frame?
66949 + */
66951 +#include <linux/blkdev.h>
66952 +#include <linux/buffer_head.h>
66953 +#include <linux/fs.h>
66954 +#include <linux/hash.h>
66955 +#include <linux/nls.h>
66956 +#include <linux/ratelimit.h>
66957 +#include <linux/slab.h>
66959 +#include "debug.h"
66960 +#include "ntfs.h"
66961 +#include "ntfs_fs.h"
66964 + * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
66965 + * preallocate algorithm
66966 + */
66967 +#ifndef NTFS_MIN_LOG2_OF_CLUMP
66968 +#define NTFS_MIN_LOG2_OF_CLUMP 16
66969 +#endif
66971 +#ifndef NTFS_MAX_LOG2_OF_CLUMP
66972 +#define NTFS_MAX_LOG2_OF_CLUMP 26
66973 +#endif
66975 +// 16M
66976 +#define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
66977 +// 16G
66978 +#define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
66981 + * get_pre_allocated
66982 + *
66983 + */
66984 +static inline u64 get_pre_allocated(u64 size)
66986 +       u32 clump;
66987 +       u8 align_shift;
66988 +       u64 ret;
66990 +       if (size <= NTFS_CLUMP_MIN) {
66991 +               clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
66992 +               align_shift = NTFS_MIN_LOG2_OF_CLUMP;
66993 +       } else if (size >= NTFS_CLUMP_MAX) {
66994 +               clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
66995 +               align_shift = NTFS_MAX_LOG2_OF_CLUMP;
66996 +       } else {
66997 +               align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
66998 +                             __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
66999 +               clump = 1u << align_shift;
67000 +       }
67002 +       ret = (((size + clump - 1) >> align_shift)) << align_shift;
67004 +       return ret;
67008 + * attr_must_be_resident
67009 + *
67010 + * returns true if attribute must be resident
67011 + */
67012 +static inline bool attr_must_be_resident(struct ntfs_sb_info *sbi,
67013 +                                        enum ATTR_TYPE type)
67015 +       const struct ATTR_DEF_ENTRY *de;
67017 +       switch (type) {
67018 +       case ATTR_STD:
67019 +       case ATTR_NAME:
67020 +       case ATTR_ID:
67021 +       case ATTR_LABEL:
67022 +       case ATTR_VOL_INFO:
67023 +       case ATTR_ROOT:
67024 +       case ATTR_EA_INFO:
67025 +               return true;
67026 +       default:
67027 +               de = ntfs_query_def(sbi, type);
67028 +               if (de && (de->flags & NTFS_ATTR_MUST_BE_RESIDENT))
67029 +                       return true;
67030 +               return false;
67031 +       }
67035 + * attr_load_runs
67036 + *
67037 + * load all runs stored in 'attr'
67038 + */
67039 +int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
67040 +                  struct runs_tree *run, const CLST *vcn)
67042 +       int err;
67043 +       CLST svcn = le64_to_cpu(attr->nres.svcn);
67044 +       CLST evcn = le64_to_cpu(attr->nres.evcn);
67045 +       u32 asize;
67046 +       u16 run_off;
67048 +       if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
67049 +               return 0;
67051 +       if (vcn && (evcn < *vcn || *vcn < svcn))
67052 +               return -EINVAL;
67054 +       asize = le32_to_cpu(attr->size);
67055 +       run_off = le16_to_cpu(attr->nres.run_off);
67056 +       err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
67057 +                           vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
67058 +                           asize - run_off);
67059 +       if (err < 0)
67060 +               return err;
67062 +       return 0;
67066 + * int run_deallocate_ex
67067 + *
67068 + * Deallocate clusters
67069 + */
67070 +static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
67071 +                            CLST vcn, CLST len, CLST *done, bool trim)
67073 +       int err = 0;
67074 +       CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
67075 +       size_t idx;
67077 +       if (!len)
67078 +               goto out;
67080 +       if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
67081 +failed:
67082 +               run_truncate(run, vcn0);
67083 +               err = -EINVAL;
67084 +               goto out;
67085 +       }
67087 +       for (;;) {
67088 +               if (clen > len)
67089 +                       clen = len;
67091 +               if (!clen) {
67092 +                       err = -EINVAL;
67093 +                       goto out;
67094 +               }
67096 +               if (lcn != SPARSE_LCN) {
67097 +                       mark_as_free_ex(sbi, lcn, clen, trim);
67098 +                       dn += clen;
67099 +               }
67101 +               len -= clen;
67102 +               if (!len)
67103 +                       break;
67105 +               vcn_next = vcn + clen;
67106 +               if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
67107 +                   vcn != vcn_next) {
67108 +                       // save memory - don't load entire run
67109 +                       goto failed;
67110 +               }
67111 +       }
67113 +out:
67114 +       if (done)
67115 +               *done += dn;
67117 +       return err;
67121 + * attr_allocate_clusters
67122 + *
67123 + * find free space, mark it as used and store in 'run'
67124 + */
67125 +int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
67126 +                          CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
67127 +                          enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
67128 +                          CLST *new_lcn)
67130 +       int err;
67131 +       CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
67132 +       struct wnd_bitmap *wnd = &sbi->used.bitmap;
67133 +       size_t cnt = run->count;
67135 +       for (;;) {
67136 +               err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
67137 +                                              opt);
67139 +               if (err == -ENOSPC && pre) {
67140 +                       pre = 0;
67141 +                       if (*pre_alloc)
67142 +                               *pre_alloc = 0;
67143 +                       continue;
67144 +               }
67146 +               if (err)
67147 +                       goto out;
67149 +               if (new_lcn && vcn == vcn0)
67150 +                       *new_lcn = lcn;
67152 +               /* Add new fragment into run storage */
67153 +               if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
67154 +                       down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
67155 +                       wnd_set_free(wnd, lcn, flen);
67156 +                       up_write(&wnd->rw_lock);
67157 +                       err = -ENOMEM;
67158 +                       goto out;
67159 +               }
67161 +               vcn += flen;
67163 +               if (flen >= len || opt == ALLOCATE_MFT ||
67164 +                   (fr && run->count - cnt >= fr)) {
67165 +                       *alen = vcn - vcn0;
67166 +                       return 0;
67167 +               }
67169 +               len -= flen;
67170 +       }
67172 +out:
67173 +       /* undo */
67174 +       run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
67175 +       run_truncate(run, vcn0);
67177 +       return err;
67181 + * if page is not NULL - it is already contains resident data
67182 + * and locked (called from ni_write_frame)
67183 + */
67184 +int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
67185 +                         struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
67186 +                         u64 new_size, struct runs_tree *run,
67187 +                         struct ATTRIB **ins_attr, struct page *page)
67189 +       struct ntfs_sb_info *sbi;
67190 +       struct ATTRIB *attr_s;
67191 +       struct MFT_REC *rec;
67192 +       u32 used, asize, rsize, aoff, align;
67193 +       bool is_data;
67194 +       CLST len, alen;
67195 +       char *next;
67196 +       int err;
67198 +       if (attr->non_res) {
67199 +               *ins_attr = attr;
67200 +               return 0;
67201 +       }
67203 +       sbi = mi->sbi;
67204 +       rec = mi->mrec;
67205 +       attr_s = NULL;
67206 +       used = le32_to_cpu(rec->used);
67207 +       asize = le32_to_cpu(attr->size);
67208 +       next = Add2Ptr(attr, asize);
67209 +       aoff = PtrOffset(rec, attr);
67210 +       rsize = le32_to_cpu(attr->res.data_size);
67211 +       is_data = attr->type == ATTR_DATA && !attr->name_len;
67213 +       align = sbi->cluster_size;
67214 +       if (is_attr_compressed(attr))
67215 +               align <<= COMPRESSION_UNIT;
67216 +       len = (rsize + align - 1) >> sbi->cluster_bits;
67218 +       run_init(run);
67220 +       /* make a copy of original attribute */
67221 +       attr_s = ntfs_memdup(attr, asize);
67222 +       if (!attr_s) {
67223 +               err = -ENOMEM;
67224 +               goto out;
67225 +       }
67227 +       if (!len) {
67228 +               /* empty resident -> empty nonresident */
67229 +               alen = 0;
67230 +       } else {
67231 +               const char *data = resident_data(attr);
67233 +               err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
67234 +                                            ALLOCATE_DEF, &alen, 0, NULL);
67235 +               if (err)
67236 +                       goto out1;
67238 +               if (!rsize) {
67239 +                       /* empty resident -> non empty nonresident */
67240 +               } else if (!is_data) {
67241 +                       err = ntfs_sb_write_run(sbi, run, 0, data, rsize);
67242 +                       if (err)
67243 +                               goto out2;
67244 +               } else if (!page) {
67245 +                       char *kaddr;
67247 +                       page = grab_cache_page(ni->vfs_inode.i_mapping, 0);
67248 +                       if (!page) {
67249 +                               err = -ENOMEM;
67250 +                               goto out2;
67251 +                       }
67252 +                       kaddr = kmap_atomic(page);
67253 +                       memcpy(kaddr, data, rsize);
67254 +                       memset(kaddr + rsize, 0, PAGE_SIZE - rsize);
67255 +                       kunmap_atomic(kaddr);
67256 +                       flush_dcache_page(page);
67257 +                       SetPageUptodate(page);
67258 +                       set_page_dirty(page);
67259 +                       unlock_page(page);
67260 +                       put_page(page);
67261 +               }
67262 +       }
67264 +       /* remove original attribute */
67265 +       used -= asize;
67266 +       memmove(attr, Add2Ptr(attr, asize), used - aoff);
67267 +       rec->used = cpu_to_le32(used);
67268 +       mi->dirty = true;
67269 +       if (le)
67270 +               al_remove_le(ni, le);
67272 +       err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
67273 +                                   attr_s->name_len, run, 0, alen,
67274 +                                   attr_s->flags, &attr, NULL);
67275 +       if (err)
67276 +               goto out3;
67278 +       ntfs_free(attr_s);
67279 +       attr->nres.data_size = cpu_to_le64(rsize);
67280 +       attr->nres.valid_size = attr->nres.data_size;
67282 +       *ins_attr = attr;
67284 +       if (is_data)
67285 +               ni->ni_flags &= ~NI_FLAG_RESIDENT;
67287 +       /* Resident attribute becomes non resident */
67288 +       return 0;
67290 +out3:
67291 +       attr = Add2Ptr(rec, aoff);
67292 +       memmove(next, attr, used - aoff);
67293 +       memcpy(attr, attr_s, asize);
67294 +       rec->used = cpu_to_le32(used + asize);
67295 +       mi->dirty = true;
67296 +out2:
67297 +       /* undo: do not trim new allocated clusters */
67298 +       run_deallocate(sbi, run, false);
67299 +       run_close(run);
67300 +out1:
67301 +       ntfs_free(attr_s);
67302 +       /*reinsert le*/
67303 +out:
67304 +       return err;
67308 + * attr_set_size_res
67309 + *
67310 + * helper for attr_set_size
67311 + */
67312 +static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
67313 +                            struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
67314 +                            u64 new_size, struct runs_tree *run,
67315 +                            struct ATTRIB **ins_attr)
67317 +       struct ntfs_sb_info *sbi = mi->sbi;
67318 +       struct MFT_REC *rec = mi->mrec;
67319 +       u32 used = le32_to_cpu(rec->used);
67320 +       u32 asize = le32_to_cpu(attr->size);
67321 +       u32 aoff = PtrOffset(rec, attr);
67322 +       u32 rsize = le32_to_cpu(attr->res.data_size);
67323 +       u32 tail = used - aoff - asize;
67324 +       char *next = Add2Ptr(attr, asize);
67325 +       s64 dsize = QuadAlign(new_size) - QuadAlign(rsize);
67327 +       if (dsize < 0) {
67328 +               memmove(next + dsize, next, tail);
67329 +       } else if (dsize > 0) {
67330 +               if (used + dsize > sbi->max_bytes_per_attr)
67331 +                       return attr_make_nonresident(ni, attr, le, mi, new_size,
67332 +                                                    run, ins_attr, NULL);
67334 +               memmove(next + dsize, next, tail);
67335 +               memset(next, 0, dsize);
67336 +       }
67338 +       if (new_size > rsize)
67339 +               memset(Add2Ptr(resident_data(attr), rsize), 0,
67340 +                      new_size - rsize);
67342 +       rec->used = cpu_to_le32(used + dsize);
67343 +       attr->size = cpu_to_le32(asize + dsize);
67344 +       attr->res.data_size = cpu_to_le32(new_size);
67345 +       mi->dirty = true;
67346 +       *ins_attr = attr;
67348 +       return 0;
67352 + * attr_set_size
67353 + *
67354 + * change the size of attribute
67355 + * Extend:
67356 + *   - sparse/compressed: no allocated clusters
67357 + *   - normal: append allocated and preallocated new clusters
67358 + * Shrink:
67359 + *   - no deallocate if keep_prealloc is set
67360 + */
67361 +int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
67362 +                 const __le16 *name, u8 name_len, struct runs_tree *run,
67363 +                 u64 new_size, const u64 *new_valid, bool keep_prealloc,
67364 +                 struct ATTRIB **ret)
67366 +       int err = 0;
67367 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
67368 +       u8 cluster_bits = sbi->cluster_bits;
67369 +       bool is_mft =
67370 +               ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && !name_len;
67371 +       u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
67372 +       struct ATTRIB *attr = NULL, *attr_b;
67373 +       struct ATTR_LIST_ENTRY *le, *le_b;
67374 +       struct mft_inode *mi, *mi_b;
67375 +       CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
67376 +       CLST next_svcn, pre_alloc = -1, done = 0;
67377 +       bool is_ext;
67378 +       u32 align;
67379 +       struct MFT_REC *rec;
67381 +again:
67382 +       le_b = NULL;
67383 +       attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
67384 +                             &mi_b);
67385 +       if (!attr_b) {
67386 +               err = -ENOENT;
67387 +               goto out;
67388 +       }
67390 +       if (!attr_b->non_res) {
67391 +               err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
67392 +                                       &attr_b);
67393 +               if (err || !attr_b->non_res)
67394 +                       goto out;
67396 +               /* layout of records may be changed, so do a full search */
67397 +               goto again;
67398 +       }
67400 +       is_ext = is_attr_ext(attr_b);
67402 +again_1:
67403 +       align = sbi->cluster_size;
67405 +       if (is_ext) {
67406 +               align <<= attr_b->nres.c_unit;
67407 +               if (is_attr_sparsed(attr_b))
67408 +                       keep_prealloc = false;
67409 +       }
67411 +       old_valid = le64_to_cpu(attr_b->nres.valid_size);
67412 +       old_size = le64_to_cpu(attr_b->nres.data_size);
67413 +       old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
67414 +       old_alen = old_alloc >> cluster_bits;
67416 +       new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
67417 +       new_alen = new_alloc >> cluster_bits;
67419 +       if (keep_prealloc && is_ext)
67420 +               keep_prealloc = false;
67422 +       if (keep_prealloc && new_size < old_size) {
67423 +               attr_b->nres.data_size = cpu_to_le64(new_size);
67424 +               mi_b->dirty = true;
67425 +               goto ok;
67426 +       }
67428 +       vcn = old_alen - 1;
67430 +       svcn = le64_to_cpu(attr_b->nres.svcn);
67431 +       evcn = le64_to_cpu(attr_b->nres.evcn);
67433 +       if (svcn <= vcn && vcn <= evcn) {
67434 +               attr = attr_b;
67435 +               le = le_b;
67436 +               mi = mi_b;
67437 +       } else if (!le_b) {
67438 +               err = -EINVAL;
67439 +               goto out;
67440 +       } else {
67441 +               le = le_b;
67442 +               attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
67443 +                                   &mi);
67444 +               if (!attr) {
67445 +                       err = -EINVAL;
67446 +                       goto out;
67447 +               }
67449 +next_le_1:
67450 +               svcn = le64_to_cpu(attr->nres.svcn);
67451 +               evcn = le64_to_cpu(attr->nres.evcn);
67452 +       }
67454 +next_le:
67455 +       rec = mi->mrec;
67457 +       err = attr_load_runs(attr, ni, run, NULL);
67458 +       if (err)
67459 +               goto out;
67461 +       if (new_size > old_size) {
67462 +               CLST to_allocate;
67463 +               size_t free;
67465 +               if (new_alloc <= old_alloc) {
67466 +                       attr_b->nres.data_size = cpu_to_le64(new_size);
67467 +                       mi_b->dirty = true;
67468 +                       goto ok;
67469 +               }
67471 +               to_allocate = new_alen - old_alen;
67472 +add_alloc_in_same_attr_seg:
67473 +               lcn = 0;
67474 +               if (is_mft) {
67475 +                       /* mft allocates clusters from mftzone */
67476 +                       pre_alloc = 0;
67477 +               } else if (is_ext) {
67478 +                       /* no preallocate for sparse/compress */
67479 +                       pre_alloc = 0;
67480 +               } else if (pre_alloc == -1) {
67481 +                       pre_alloc = 0;
67482 +                       if (type == ATTR_DATA && !name_len &&
67483 +                           sbi->options.prealloc) {
67484 +                               CLST new_alen2 = bytes_to_cluster(
67485 +                                       sbi, get_pre_allocated(new_size));
67486 +                               pre_alloc = new_alen2 - new_alen;
67487 +                       }
67489 +                       /* Get the last lcn to allocate from */
67490 +                       if (old_alen &&
67491 +                           !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
67492 +                               lcn = SPARSE_LCN;
67493 +                       }
67495 +                       if (lcn == SPARSE_LCN)
67496 +                               lcn = 0;
67497 +                       else if (lcn)
67498 +                               lcn += 1;
67500 +                       free = wnd_zeroes(&sbi->used.bitmap);
67501 +                       if (to_allocate > free) {
67502 +                               err = -ENOSPC;
67503 +                               goto out;
67504 +                       }
67506 +                       if (pre_alloc && to_allocate + pre_alloc > free)
67507 +                               pre_alloc = 0;
67508 +               }
67510 +               vcn = old_alen;
67512 +               if (is_ext) {
67513 +                       if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
67514 +                                          false)) {
67515 +                               err = -ENOMEM;
67516 +                               goto out;
67517 +                       }
67518 +                       alen = to_allocate;
67519 +               } else {
67520 +                       /* ~3 bytes per fragment */
67521 +                       err = attr_allocate_clusters(
67522 +                               sbi, run, vcn, lcn, to_allocate, &pre_alloc,
67523 +                               is_mft ? ALLOCATE_MFT : 0, &alen,
67524 +                               is_mft ? 0
67525 +                                      : (sbi->record_size -
67526 +                                         le32_to_cpu(rec->used) + 8) /
67527 +                                                        3 +
67528 +                                                1,
67529 +                               NULL);
67530 +                       if (err)
67531 +                               goto out;
67532 +               }
67534 +               done += alen;
67535 +               vcn += alen;
67536 +               if (to_allocate > alen)
67537 +                       to_allocate -= alen;
67538 +               else
67539 +                       to_allocate = 0;
67541 +pack_runs:
67542 +               err = mi_pack_runs(mi, attr, run, vcn - svcn);
67543 +               if (err)
67544 +                       goto out;
67546 +               next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
67547 +               new_alloc_tmp = (u64)next_svcn << cluster_bits;
67548 +               attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
67549 +               mi_b->dirty = true;
67551 +               if (next_svcn >= vcn && !to_allocate) {
67552 +                       /* Normal way. update attribute and exit */
67553 +                       attr_b->nres.data_size = cpu_to_le64(new_size);
67554 +                       goto ok;
67555 +               }
67557 +               /* at least two mft to avoid recursive loop*/
67558 +               if (is_mft && next_svcn == vcn &&
67559 +                   ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
67560 +                       new_size = new_alloc_tmp;
67561 +                       attr_b->nres.data_size = attr_b->nres.alloc_size;
67562 +                       goto ok;
67563 +               }
67565 +               if (le32_to_cpu(rec->used) < sbi->record_size) {
67566 +                       old_alen = next_svcn;
67567 +                       evcn = old_alen - 1;
67568 +                       goto add_alloc_in_same_attr_seg;
67569 +               }
67571 +               attr_b->nres.data_size = attr_b->nres.alloc_size;
67572 +               if (new_alloc_tmp < old_valid)
67573 +                       attr_b->nres.valid_size = attr_b->nres.data_size;
67575 +               if (type == ATTR_LIST) {
67576 +                       err = ni_expand_list(ni);
67577 +                       if (err)
67578 +                               goto out;
67579 +                       if (next_svcn < vcn)
67580 +                               goto pack_runs;
67582 +                       /* layout of records is changed */
67583 +                       goto again;
67584 +               }
67586 +               if (!ni->attr_list.size) {
67587 +                       err = ni_create_attr_list(ni);
67588 +                       if (err)
67589 +                               goto out;
67590 +                       /* layout of records is changed */
67591 +               }
67593 +               if (next_svcn >= vcn) {
67594 +                       /* this is mft data, repeat */
67595 +                       goto again;
67596 +               }
67598 +               /* insert new attribute segment */
67599 +               err = ni_insert_nonresident(ni, type, name, name_len, run,
67600 +                                           next_svcn, vcn - next_svcn,
67601 +                                           attr_b->flags, &attr, &mi);
67602 +               if (err)
67603 +                       goto out;
67605 +               if (!is_mft)
67606 +                       run_truncate_head(run, evcn + 1);
67608 +               svcn = le64_to_cpu(attr->nres.svcn);
67609 +               evcn = le64_to_cpu(attr->nres.evcn);
67611 +               le_b = NULL;
67612 +               /* layout of records maybe changed */
67613 +               /* find base attribute to update*/
67614 +               attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
67615 +                                     NULL, &mi_b);
67616 +               if (!attr_b) {
67617 +                       err = -ENOENT;
67618 +                       goto out;
67619 +               }
67621 +               attr_b->nres.alloc_size = cpu_to_le64((u64)vcn << cluster_bits);
67622 +               attr_b->nres.data_size = attr_b->nres.alloc_size;
67623 +               attr_b->nres.valid_size = attr_b->nres.alloc_size;
67624 +               mi_b->dirty = true;
67625 +               goto again_1;
67626 +       }
67628 +       if (new_size != old_size ||
67629 +           (new_alloc != old_alloc && !keep_prealloc)) {
67630 +               vcn = max(svcn, new_alen);
67631 +               new_alloc_tmp = (u64)vcn << cluster_bits;
67633 +               alen = 0;
67634 +               err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &alen,
67635 +                                       true);
67636 +               if (err)
67637 +                       goto out;
67639 +               run_truncate(run, vcn);
67641 +               if (vcn > svcn) {
67642 +                       err = mi_pack_runs(mi, attr, run, vcn - svcn);
67643 +                       if (err)
67644 +                               goto out;
67645 +               } else if (le && le->vcn) {
67646 +                       u16 le_sz = le16_to_cpu(le->size);
67648 +                       /*
67649 +                        * NOTE: list entries for one attribute are always
67650 +                        * the same size. We deal with last entry (vcn==0)
67651 +                        * and it is not first in entries array
67652 +                        * (list entry for std attribute always first)
67653 +                        * So it is safe to step back
67654 +                        */
67655 +                       mi_remove_attr(mi, attr);
67657 +                       if (!al_remove_le(ni, le)) {
67658 +                               err = -EINVAL;
67659 +                               goto out;
67660 +                       }
67662 +                       le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
67663 +               } else {
67664 +                       attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
67665 +                       mi->dirty = true;
67666 +               }
67668 +               attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
67670 +               if (vcn == new_alen) {
67671 +                       attr_b->nres.data_size = cpu_to_le64(new_size);
67672 +                       if (new_size < old_valid)
67673 +                               attr_b->nres.valid_size =
67674 +                                       attr_b->nres.data_size;
67675 +               } else {
67676 +                       if (new_alloc_tmp <=
67677 +                           le64_to_cpu(attr_b->nres.data_size))
67678 +                               attr_b->nres.data_size =
67679 +                                       attr_b->nres.alloc_size;
67680 +                       if (new_alloc_tmp <
67681 +                           le64_to_cpu(attr_b->nres.valid_size))
67682 +                               attr_b->nres.valid_size =
67683 +                                       attr_b->nres.alloc_size;
67684 +               }
67686 +               if (is_ext)
67687 +                       le64_sub_cpu(&attr_b->nres.total_size,
67688 +                                    ((u64)alen << cluster_bits));
67690 +               mi_b->dirty = true;
67692 +               if (new_alloc_tmp <= new_alloc)
67693 +                       goto ok;
67695 +               old_size = new_alloc_tmp;
67696 +               vcn = svcn - 1;
67698 +               if (le == le_b) {
67699 +                       attr = attr_b;
67700 +                       mi = mi_b;
67701 +                       evcn = svcn - 1;
67702 +                       svcn = 0;
67703 +                       goto next_le;
67704 +               }
67706 +               if (le->type != type || le->name_len != name_len ||
67707 +                   memcmp(le_name(le), name, name_len * sizeof(short))) {
67708 +                       err = -EINVAL;
67709 +                       goto out;
67710 +               }
67712 +               err = ni_load_mi(ni, le, &mi);
67713 +               if (err)
67714 +                       goto out;
67716 +               attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
67717 +               if (!attr) {
67718 +                       err = -EINVAL;
67719 +                       goto out;
67720 +               }
67721 +               goto next_le_1;
67722 +       }
67724 +ok:
67725 +       if (new_valid) {
67726 +               __le64 valid = cpu_to_le64(min(*new_valid, new_size));
67728 +               if (attr_b->nres.valid_size != valid) {
67729 +                       attr_b->nres.valid_size = valid;
67730 +                       mi_b->dirty = true;
67731 +               }
67732 +       }
67734 +out:
67735 +       if (!err && attr_b && ret)
67736 +               *ret = attr_b;
67738 +       /* update inode_set_bytes*/
67739 +       if (!err && ((type == ATTR_DATA && !name_len) ||
67740 +                    (type == ATTR_ALLOC && name == I30_NAME))) {
67741 +               bool dirty = false;
67743 +               if (ni->vfs_inode.i_size != new_size) {
67744 +                       ni->vfs_inode.i_size = new_size;
67745 +                       dirty = true;
67746 +               }
67748 +               if (attr_b && attr_b->non_res) {
67749 +                       new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
67750 +                       if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
67751 +                               inode_set_bytes(&ni->vfs_inode, new_alloc);
67752 +                               dirty = true;
67753 +                       }
67754 +               }
67756 +               if (dirty) {
67757 +                       ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
67758 +                       mark_inode_dirty(&ni->vfs_inode);
67759 +               }
67760 +       }
67762 +       return err;
67765 +int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
67766 +                       CLST *len, bool *new)
67768 +       int err = 0;
67769 +       struct runs_tree *run = &ni->file.run;
67770 +       struct ntfs_sb_info *sbi;
67771 +       u8 cluster_bits;
67772 +       struct ATTRIB *attr = NULL, *attr_b;
67773 +       struct ATTR_LIST_ENTRY *le, *le_b;
67774 +       struct mft_inode *mi, *mi_b;
67775 +       CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end;
67776 +       u64 total_size;
67777 +       u32 clst_per_frame;
67778 +       bool ok;
67780 +       if (new)
67781 +               *new = false;
67783 +       down_read(&ni->file.run_lock);
67784 +       ok = run_lookup_entry(run, vcn, lcn, len, NULL);
67785 +       up_read(&ni->file.run_lock);
67787 +       if (ok && (*lcn != SPARSE_LCN || !new)) {
67788 +               /* normal way */
67789 +               return 0;
67790 +       }
67792 +       if (!clen)
67793 +               clen = 1;
67795 +       if (ok && clen > *len)
67796 +               clen = *len;
67798 +       sbi = ni->mi.sbi;
67799 +       cluster_bits = sbi->cluster_bits;
67801 +       ni_lock(ni);
67802 +       down_write(&ni->file.run_lock);
67804 +       le_b = NULL;
67805 +       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
67806 +       if (!attr_b) {
67807 +               err = -ENOENT;
67808 +               goto out;
67809 +       }
67811 +       if (!attr_b->non_res) {
67812 +               *lcn = RESIDENT_LCN;
67813 +               *len = 1;
67814 +               goto out;
67815 +       }
67817 +       asize = le64_to_cpu(attr_b->nres.alloc_size) >> sbi->cluster_bits;
67818 +       if (vcn >= asize) {
67819 +               err = -EINVAL;
67820 +               goto out;
67821 +       }
67823 +       clst_per_frame = 1u << attr_b->nres.c_unit;
67824 +       to_alloc = (clen + clst_per_frame - 1) & ~(clst_per_frame - 1);
67826 +       if (vcn + to_alloc > asize)
67827 +               to_alloc = asize - vcn;
67829 +       svcn = le64_to_cpu(attr_b->nres.svcn);
67830 +       evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
67832 +       attr = attr_b;
67833 +       le = le_b;
67834 +       mi = mi_b;
67836 +       if (le_b && (vcn < svcn || evcn1 <= vcn)) {
67837 +               attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
67838 +                                   &mi);
67839 +               if (!attr) {
67840 +                       err = -EINVAL;
67841 +                       goto out;
67842 +               }
67843 +               svcn = le64_to_cpu(attr->nres.svcn);
67844 +               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
67845 +       }
67847 +       err = attr_load_runs(attr, ni, run, NULL);
67848 +       if (err)
67849 +               goto out;
67851 +       if (!ok) {
67852 +               ok = run_lookup_entry(run, vcn, lcn, len, NULL);
67853 +               if (ok && (*lcn != SPARSE_LCN || !new)) {
67854 +                       /* normal way */
67855 +                       err = 0;
67856 +                       goto ok;
67857 +               }
67859 +               if (!ok && !new) {
67860 +                       *len = 0;
67861 +                       err = 0;
67862 +                       goto ok;
67863 +               }
67865 +               if (ok && clen > *len) {
67866 +                       clen = *len;
67867 +                       to_alloc = (clen + clst_per_frame - 1) &
67868 +                                  ~(clst_per_frame - 1);
67869 +               }
67870 +       }
67872 +       if (!is_attr_ext(attr_b)) {
67873 +               err = -EINVAL;
67874 +               goto out;
67875 +       }
67877 +       /* Get the last lcn to allocate from */
67878 +       hint = 0;
67880 +       if (vcn > evcn1) {
67881 +               if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
67882 +                                  false)) {
67883 +                       err = -ENOMEM;
67884 +                       goto out;
67885 +               }
67886 +       } else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
67887 +               hint = -1;
67888 +       }
67890 +       err = attr_allocate_clusters(
67891 +               sbi, run, vcn, hint + 1, to_alloc, NULL, 0, len,
67892 +               (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1,
67893 +               lcn);
67894 +       if (err)
67895 +               goto out;
67896 +       *new = true;
67898 +       end = vcn + *len;
67900 +       total_size = le64_to_cpu(attr_b->nres.total_size) +
67901 +                    ((u64)*len << cluster_bits);
67903 +repack:
67904 +       err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
67905 +       if (err)
67906 +               goto out;
67908 +       attr_b->nres.total_size = cpu_to_le64(total_size);
67909 +       inode_set_bytes(&ni->vfs_inode, total_size);
67910 +       ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
67912 +       mi_b->dirty = true;
67913 +       mark_inode_dirty(&ni->vfs_inode);
67915 +       /* stored [vcn : next_svcn) from [vcn : end) */
67916 +       next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
67918 +       if (end <= evcn1) {
67919 +               if (next_svcn == evcn1) {
67920 +                       /* Normal way. update attribute and exit */
67921 +                       goto ok;
67922 +               }
67923 +               /* add new segment [next_svcn : evcn1 - next_svcn )*/
67924 +               if (!ni->attr_list.size) {
67925 +                       err = ni_create_attr_list(ni);
67926 +                       if (err)
67927 +                               goto out;
67928 +                       /* layout of records is changed */
67929 +                       le_b = NULL;
67930 +                       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
67931 +                                             0, NULL, &mi_b);
67932 +                       if (!attr_b) {
67933 +                               err = -ENOENT;
67934 +                               goto out;
67935 +                       }
67937 +                       attr = attr_b;
67938 +                       le = le_b;
67939 +                       mi = mi_b;
67940 +                       goto repack;
67941 +               }
67942 +       }
67944 +       svcn = evcn1;
67946 +       /* Estimate next attribute */
67947 +       attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
67949 +       if (attr) {
67950 +               CLST alloc = bytes_to_cluster(
67951 +                       sbi, le64_to_cpu(attr_b->nres.alloc_size));
67952 +               CLST evcn = le64_to_cpu(attr->nres.evcn);
67954 +               if (end < next_svcn)
67955 +                       end = next_svcn;
67956 +               while (end > evcn) {
67957 +                       /* remove segment [svcn : evcn)*/
67958 +                       mi_remove_attr(mi, attr);
67960 +                       if (!al_remove_le(ni, le)) {
67961 +                               err = -EINVAL;
67962 +                               goto out;
67963 +                       }
67965 +                       if (evcn + 1 >= alloc) {
67966 +                               /* last attribute segment */
67967 +                               evcn1 = evcn + 1;
67968 +                               goto ins_ext;
67969 +                       }
67971 +                       if (ni_load_mi(ni, le, &mi)) {
67972 +                               attr = NULL;
67973 +                               goto out;
67974 +                       }
67976 +                       attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
67977 +                                           &le->id);
67978 +                       if (!attr) {
67979 +                               err = -EINVAL;
67980 +                               goto out;
67981 +                       }
67982 +                       svcn = le64_to_cpu(attr->nres.svcn);
67983 +                       evcn = le64_to_cpu(attr->nres.evcn);
67984 +               }
67986 +               if (end < svcn)
67987 +                       end = svcn;
67989 +               err = attr_load_runs(attr, ni, run, &end);
67990 +               if (err)
67991 +                       goto out;
67993 +               evcn1 = evcn + 1;
67994 +               attr->nres.svcn = cpu_to_le64(next_svcn);
67995 +               err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
67996 +               if (err)
67997 +                       goto out;
67999 +               le->vcn = cpu_to_le64(next_svcn);
68000 +               ni->attr_list.dirty = true;
68001 +               mi->dirty = true;
68003 +               next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
68004 +       }
68005 +ins_ext:
68006 +       if (evcn1 > next_svcn) {
68007 +               err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
68008 +                                           next_svcn, evcn1 - next_svcn,
68009 +                                           attr_b->flags, &attr, &mi);
68010 +               if (err)
68011 +                       goto out;
68012 +       }
68013 +ok:
68014 +       run_truncate_around(run, vcn);
68015 +out:
68016 +       up_write(&ni->file.run_lock);
68017 +       ni_unlock(ni);
68019 +       return err;
68022 +int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
68024 +       u64 vbo;
68025 +       struct ATTRIB *attr;
68026 +       u32 data_size;
68028 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
68029 +       if (!attr)
68030 +               return -EINVAL;
68032 +       if (attr->non_res)
68033 +               return E_NTFS_NONRESIDENT;
68035 +       vbo = page->index << PAGE_SHIFT;
68036 +       data_size = le32_to_cpu(attr->res.data_size);
68037 +       if (vbo < data_size) {
68038 +               const char *data = resident_data(attr);
68039 +               char *kaddr = kmap_atomic(page);
68040 +               u32 use = data_size - vbo;
68042 +               if (use > PAGE_SIZE)
68043 +                       use = PAGE_SIZE;
68045 +               memcpy(kaddr, data + vbo, use);
68046 +               memset(kaddr + use, 0, PAGE_SIZE - use);
68047 +               kunmap_atomic(kaddr);
68048 +               flush_dcache_page(page);
68049 +               SetPageUptodate(page);
68050 +       } else if (!PageUptodate(page)) {
68051 +               zero_user_segment(page, 0, PAGE_SIZE);
68052 +               SetPageUptodate(page);
68053 +       }
68055 +       return 0;
68058 +int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
68060 +       u64 vbo;
68061 +       struct mft_inode *mi;
68062 +       struct ATTRIB *attr;
68063 +       u32 data_size;
68065 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
68066 +       if (!attr)
68067 +               return -EINVAL;
68069 +       if (attr->non_res) {
68070 +               /*return special error code to check this case*/
68071 +               return E_NTFS_NONRESIDENT;
68072 +       }
68074 +       vbo = page->index << PAGE_SHIFT;
68075 +       data_size = le32_to_cpu(attr->res.data_size);
68076 +       if (vbo < data_size) {
68077 +               char *data = resident_data(attr);
68078 +               char *kaddr = kmap_atomic(page);
68079 +               u32 use = data_size - vbo;
68081 +               if (use > PAGE_SIZE)
68082 +                       use = PAGE_SIZE;
68083 +               memcpy(data + vbo, kaddr, use);
68084 +               kunmap_atomic(kaddr);
68085 +               mi->dirty = true;
68086 +       }
68087 +       ni->i_valid = data_size;
68089 +       return 0;
68093 + * attr_load_runs_vcn
68094 + *
68095 + * load runs with vcn
68096 + */
68097 +int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
68098 +                      const __le16 *name, u8 name_len, struct runs_tree *run,
68099 +                      CLST vcn)
68101 +       struct ATTRIB *attr;
68102 +       int err;
68103 +       CLST svcn, evcn;
68104 +       u16 ro;
68106 +       attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
68107 +       if (!attr)
68108 +               return -ENOENT;
68110 +       svcn = le64_to_cpu(attr->nres.svcn);
68111 +       evcn = le64_to_cpu(attr->nres.evcn);
68113 +       if (evcn < vcn || vcn < svcn)
68114 +               return -EINVAL;
68116 +       ro = le16_to_cpu(attr->nres.run_off);
68117 +       err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
68118 +                           Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
68119 +       if (err < 0)
68120 +               return err;
68121 +       return 0;
68125 + * load runs for given range [from to)
68126 + */
68127 +int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
68128 +                        const __le16 *name, u8 name_len, struct runs_tree *run,
68129 +                        u64 from, u64 to)
68131 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
68132 +       u8 cluster_bits = sbi->cluster_bits;
68133 +       CLST vcn = from >> cluster_bits;
68134 +       CLST vcn_last = (to - 1) >> cluster_bits;
68135 +       CLST lcn, clen;
68136 +       int err;
68138 +       for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
68139 +               if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
68140 +                       err = attr_load_runs_vcn(ni, type, name, name_len, run,
68141 +                                                vcn);
68142 +                       if (err)
68143 +                               return err;
68144 +                       clen = 0; /*next run_lookup_entry(vcn) must be success*/
68145 +               }
68146 +       }
68148 +       return 0;
68151 +#ifdef CONFIG_NTFS3_LZX_XPRESS
68153 + * attr_wof_frame_info
68154 + *
68155 + * read header of xpress/lzx file to get info about frame
68156 + */
68157 +int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
68158 +                       struct runs_tree *run, u64 frame, u64 frames,
68159 +                       u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
68161 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
68162 +       u64 vbo[2], off[2], wof_size;
68163 +       u32 voff;
68164 +       u8 bytes_per_off;
68165 +       char *addr;
68166 +       struct page *page;
68167 +       int i, err;
68168 +       __le32 *off32;
68169 +       __le64 *off64;
68171 +       if (ni->vfs_inode.i_size < 0x100000000ull) {
68172 +               /* file starts with array of 32 bit offsets */
68173 +               bytes_per_off = sizeof(__le32);
68174 +               vbo[1] = frame << 2;
68175 +               *vbo_data = frames << 2;
68176 +       } else {
68177 +               /* file starts with array of 64 bit offsets */
68178 +               bytes_per_off = sizeof(__le64);
68179 +               vbo[1] = frame << 3;
68180 +               *vbo_data = frames << 3;
68181 +       }
68183 +       /*
68184 +        * read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts
68185 +        * read 4/8 bytes at [vbo] == offset where compressed frame ends
68186 +        */
68187 +       if (!attr->non_res) {
68188 +               if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
68189 +                       ntfs_inode_err(&ni->vfs_inode, "is corrupted");
68190 +                       return -EINVAL;
68191 +               }
68192 +               addr = resident_data(attr);
68194 +               if (bytes_per_off == sizeof(__le32)) {
68195 +                       off32 = Add2Ptr(addr, vbo[1]);
68196 +                       off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
68197 +                       off[1] = le32_to_cpu(off32[0]);
68198 +               } else {
68199 +                       off64 = Add2Ptr(addr, vbo[1]);
68200 +                       off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
68201 +                       off[1] = le64_to_cpu(off64[0]);
68202 +               }
68204 +               *vbo_data += off[0];
68205 +               *ondisk_size = off[1] - off[0];
68206 +               return 0;
68207 +       }
68209 +       wof_size = le64_to_cpu(attr->nres.data_size);
68210 +       down_write(&ni->file.run_lock);
68211 +       page = ni->file.offs_page;
68212 +       if (!page) {
68213 +               page = alloc_page(GFP_KERNEL);
68214 +               if (!page) {
68215 +                       err = -ENOMEM;
68216 +                       goto out;
68217 +               }
68218 +               page->index = -1;
68219 +               ni->file.offs_page = page;
68220 +       }
68221 +       lock_page(page);
68222 +       addr = page_address(page);
68224 +       if (vbo[1]) {
68225 +               voff = vbo[1] & (PAGE_SIZE - 1);
68226 +               vbo[0] = vbo[1] - bytes_per_off;
68227 +               i = 0;
68228 +       } else {
68229 +               voff = 0;
68230 +               vbo[0] = 0;
68231 +               off[0] = 0;
68232 +               i = 1;
68233 +       }
68235 +       do {
68236 +               pgoff_t index = vbo[i] >> PAGE_SHIFT;
68238 +               if (index != page->index) {
68239 +                       u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
68240 +                       u64 to = min(from + PAGE_SIZE, wof_size);
68242 +                       err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
68243 +                                                  ARRAY_SIZE(WOF_NAME), run,
68244 +                                                  from, to);
68245 +                       if (err)
68246 +                               goto out1;
68248 +                       err = ntfs_bio_pages(sbi, run, &page, 1, from,
68249 +                                            to - from, REQ_OP_READ);
68250 +                       if (err) {
68251 +                               page->index = -1;
68252 +                               goto out1;
68253 +                       }
68254 +                       page->index = index;
68255 +               }
68257 +               if (i) {
68258 +                       if (bytes_per_off == sizeof(__le32)) {
68259 +                               off32 = Add2Ptr(addr, voff);
68260 +                               off[1] = le32_to_cpu(*off32);
68261 +                       } else {
68262 +                               off64 = Add2Ptr(addr, voff);
68263 +                               off[1] = le64_to_cpu(*off64);
68264 +                       }
68265 +               } else if (!voff) {
68266 +                       if (bytes_per_off == sizeof(__le32)) {
68267 +                               off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
68268 +                               off[0] = le32_to_cpu(*off32);
68269 +                       } else {
68270 +                               off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
68271 +                               off[0] = le64_to_cpu(*off64);
68272 +                       }
68273 +               } else {
68274 +                       /* two values in one page*/
68275 +                       if (bytes_per_off == sizeof(__le32)) {
68276 +                               off32 = Add2Ptr(addr, voff);
68277 +                               off[0] = le32_to_cpu(off32[-1]);
68278 +                               off[1] = le32_to_cpu(off32[0]);
68279 +                       } else {
68280 +                               off64 = Add2Ptr(addr, voff);
68281 +                               off[0] = le64_to_cpu(off64[-1]);
68282 +                               off[1] = le64_to_cpu(off64[0]);
68283 +                       }
68284 +                       break;
68285 +               }
68286 +       } while (++i < 2);
68288 +       *vbo_data += off[0];
68289 +       *ondisk_size = off[1] - off[0];
68291 +out1:
68292 +       unlock_page(page);
68293 +out:
68294 +       up_write(&ni->file.run_lock);
68295 +       return err;
68297 +#endif
68300 + * attr_is_frame_compressed
68301 + *
68302 + * This function is used to detect compressed frame
68303 + */
68304 +int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
68305 +                            CLST frame, CLST *clst_data)
68307 +       int err;
68308 +       u32 clst_frame;
68309 +       CLST clen, lcn, vcn, alen, slen, vcn_next;
68310 +       size_t idx;
68311 +       struct runs_tree *run;
68313 +       *clst_data = 0;
68315 +       if (!is_attr_compressed(attr))
68316 +               return 0;
68318 +       if (!attr->non_res)
68319 +               return 0;
68321 +       clst_frame = 1u << attr->nres.c_unit;
68322 +       vcn = frame * clst_frame;
68323 +       run = &ni->file.run;
68325 +       if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
68326 +               err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
68327 +                                        attr->name_len, run, vcn);
68328 +               if (err)
68329 +                       return err;
68331 +               if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
68332 +                       return -EINVAL;
68333 +       }
68335 +       if (lcn == SPARSE_LCN) {
68336 +               /* sparsed frame */
68337 +               return 0;
68338 +       }
68340 +       if (clen >= clst_frame) {
68341 +               /*
68342 +                * The frame is not compressed 'cause
68343 +                * it does not contain any sparse clusters
68344 +                */
68345 +               *clst_data = clst_frame;
68346 +               return 0;
68347 +       }
68349 +       alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
68350 +       slen = 0;
68351 +       *clst_data = clen;
68353 +       /*
68354 +        * The frame is compressed if *clst_data + slen >= clst_frame
68355 +        * Check next fragments
68356 +        */
68357 +       while ((vcn += clen) < alen) {
68358 +               vcn_next = vcn;
68360 +               if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
68361 +                   vcn_next != vcn) {
68362 +                       err = attr_load_runs_vcn(ni, attr->type,
68363 +                                                attr_name(attr),
68364 +                                                attr->name_len, run, vcn_next);
68365 +                       if (err)
68366 +                               return err;
68367 +                       vcn = vcn_next;
68369 +                       if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
68370 +                               return -EINVAL;
68371 +               }
68373 +               if (lcn == SPARSE_LCN) {
68374 +                       slen += clen;
68375 +               } else {
68376 +                       if (slen) {
68377 +                               /*
68378 +                                * data_clusters + sparse_clusters =
68379 +                                * not enough for frame
68380 +                                */
68381 +                               return -EINVAL;
68382 +                       }
68383 +                       *clst_data += clen;
68384 +               }
68386 +               if (*clst_data + slen >= clst_frame) {
68387 +                       if (!slen) {
68388 +                               /*
68389 +                                * There is no sparsed clusters in this frame
68390 +                                * So it is not compressed
68391 +                                */
68392 +                               *clst_data = clst_frame;
68393 +                       } else {
68394 +                               /*frame is compressed*/
68395 +                       }
68396 +                       break;
68397 +               }
68398 +       }
68400 +       return 0;
68404 + * attr_allocate_frame
68405 + *
68406 + * allocate/free clusters for 'frame'
68407 + * assumed: down_write(&ni->file.run_lock);
68408 + */
68409 +int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
68410 +                       u64 new_valid)
68412 +       int err = 0;
68413 +       struct runs_tree *run = &ni->file.run;
68414 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
68415 +       struct ATTRIB *attr = NULL, *attr_b;
68416 +       struct ATTR_LIST_ENTRY *le, *le_b;
68417 +       struct mft_inode *mi, *mi_b;
68418 +       CLST svcn, evcn1, next_svcn, lcn, len;
68419 +       CLST vcn, end, clst_data;
68420 +       u64 total_size, valid_size, data_size;
68422 +       le_b = NULL;
68423 +       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
68424 +       if (!attr_b)
68425 +               return -ENOENT;
68427 +       if (!is_attr_ext(attr_b))
68428 +               return -EINVAL;
68430 +       vcn = frame << NTFS_LZNT_CUNIT;
68431 +       total_size = le64_to_cpu(attr_b->nres.total_size);
68433 +       svcn = le64_to_cpu(attr_b->nres.svcn);
68434 +       evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
68435 +       data_size = le64_to_cpu(attr_b->nres.data_size);
68437 +       if (svcn <= vcn && vcn < evcn1) {
68438 +               attr = attr_b;
68439 +               le = le_b;
68440 +               mi = mi_b;
68441 +       } else if (!le_b) {
68442 +               err = -EINVAL;
68443 +               goto out;
68444 +       } else {
68445 +               le = le_b;
68446 +               attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
68447 +                                   &mi);
68448 +               if (!attr) {
68449 +                       err = -EINVAL;
68450 +                       goto out;
68451 +               }
68452 +               svcn = le64_to_cpu(attr->nres.svcn);
68453 +               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
68454 +       }
68456 +       err = attr_load_runs(attr, ni, run, NULL);
68457 +       if (err)
68458 +               goto out;
68460 +       err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
68461 +       if (err)
68462 +               goto out;
68464 +       total_size -= (u64)clst_data << sbi->cluster_bits;
68466 +       len = bytes_to_cluster(sbi, compr_size);
68468 +       if (len == clst_data)
68469 +               goto out;
68471 +       if (len < clst_data) {
68472 +               err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
68473 +                                       NULL, true);
68474 +               if (err)
68475 +                       goto out;
68477 +               if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
68478 +                                  false)) {
68479 +                       err = -ENOMEM;
68480 +                       goto out;
68481 +               }
68482 +               end = vcn + clst_data;
68483 +               /* run contains updated range [vcn + len : end) */
68484 +       } else {
68485 +               CLST alen, hint = 0;
68486 +               /* Get the last lcn to allocate from */
68487 +               if (vcn + clst_data &&
68488 +                   !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
68489 +                                     NULL)) {
68490 +                       hint = -1;
68491 +               }
68493 +               err = attr_allocate_clusters(sbi, run, vcn + clst_data,
68494 +                                            hint + 1, len - clst_data, NULL, 0,
68495 +                                            &alen, 0, &lcn);
68496 +               if (err)
68497 +                       goto out;
68499 +               end = vcn + len;
68500 +               /* run contains updated range [vcn + clst_data : end) */
68501 +       }
68503 +       total_size += (u64)len << sbi->cluster_bits;
68505 +repack:
68506 +       err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
68507 +       if (err)
68508 +               goto out;
68510 +       attr_b->nres.total_size = cpu_to_le64(total_size);
68511 +       inode_set_bytes(&ni->vfs_inode, total_size);
68513 +       mi_b->dirty = true;
68514 +       mark_inode_dirty(&ni->vfs_inode);
68516 +       /* stored [vcn : next_svcn) from [vcn : end) */
68517 +       next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
68519 +       if (end <= evcn1) {
68520 +               if (next_svcn == evcn1) {
68521 +                       /* Normal way. update attribute and exit */
68522 +                       goto ok;
68523 +               }
68524 +               /* add new segment [next_svcn : evcn1 - next_svcn )*/
68525 +               if (!ni->attr_list.size) {
68526 +                       err = ni_create_attr_list(ni);
68527 +                       if (err)
68528 +                               goto out;
68529 +                       /* layout of records is changed */
68530 +                       le_b = NULL;
68531 +                       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
68532 +                                             0, NULL, &mi_b);
68533 +                       if (!attr_b) {
68534 +                               err = -ENOENT;
68535 +                               goto out;
68536 +                       }
68538 +                       attr = attr_b;
68539 +                       le = le_b;
68540 +                       mi = mi_b;
68541 +                       goto repack;
68542 +               }
68543 +       }
68545 +       svcn = evcn1;
68547 +       /* Estimate next attribute */
68548 +       attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
68550 +       if (attr) {
68551 +               CLST alloc = bytes_to_cluster(
68552 +                       sbi, le64_to_cpu(attr_b->nres.alloc_size));
68553 +               CLST evcn = le64_to_cpu(attr->nres.evcn);
68555 +               if (end < next_svcn)
68556 +                       end = next_svcn;
68557 +               while (end > evcn) {
68558 +                       /* remove segment [svcn : evcn)*/
68559 +                       mi_remove_attr(mi, attr);
68561 +                       if (!al_remove_le(ni, le)) {
68562 +                               err = -EINVAL;
68563 +                               goto out;
68564 +                       }
68566 +                       if (evcn + 1 >= alloc) {
68567 +                               /* last attribute segment */
68568 +                               evcn1 = evcn + 1;
68569 +                               goto ins_ext;
68570 +                       }
68572 +                       if (ni_load_mi(ni, le, &mi)) {
68573 +                               attr = NULL;
68574 +                               goto out;
68575 +                       }
68577 +                       attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
68578 +                                           &le->id);
68579 +                       if (!attr) {
68580 +                               err = -EINVAL;
68581 +                               goto out;
68582 +                       }
68583 +                       svcn = le64_to_cpu(attr->nres.svcn);
68584 +                       evcn = le64_to_cpu(attr->nres.evcn);
68585 +               }
68587 +               if (end < svcn)
68588 +                       end = svcn;
68590 +               err = attr_load_runs(attr, ni, run, &end);
68591 +               if (err)
68592 +                       goto out;
68594 +               evcn1 = evcn + 1;
68595 +               attr->nres.svcn = cpu_to_le64(next_svcn);
68596 +               err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
68597 +               if (err)
68598 +                       goto out;
68600 +               le->vcn = cpu_to_le64(next_svcn);
68601 +               ni->attr_list.dirty = true;
68602 +               mi->dirty = true;
68604 +               next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
68605 +       }
68606 +ins_ext:
68607 +       if (evcn1 > next_svcn) {
68608 +               err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
68609 +                                           next_svcn, evcn1 - next_svcn,
68610 +                                           attr_b->flags, &attr, &mi);
68611 +               if (err)
68612 +                       goto out;
68613 +       }
68614 +ok:
68615 +       run_truncate_around(run, vcn);
68616 +out:
68617 +       if (new_valid > data_size)
68618 +               new_valid = data_size;
68620 +       valid_size = le64_to_cpu(attr_b->nres.valid_size);
68621 +       if (new_valid != valid_size) {
68622 +               attr_b->nres.valid_size = cpu_to_le64(valid_size);
68623 +               mi_b->dirty = true;
68624 +       }
68626 +       return err;
68629 +/* Collapse range in file */
68630 +int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
68632 +       int err = 0;
68633 +       struct runs_tree *run = &ni->file.run;
68634 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
68635 +       struct ATTRIB *attr = NULL, *attr_b;
68636 +       struct ATTR_LIST_ENTRY *le, *le_b;
68637 +       struct mft_inode *mi, *mi_b;
68638 +       CLST svcn, evcn1, len, dealloc, alen;
68639 +       CLST vcn, end;
68640 +       u64 valid_size, data_size, alloc_size, total_size;
68641 +       u32 mask;
68642 +       __le16 a_flags;
68644 +       if (!bytes)
68645 +               return 0;
68647 +       le_b = NULL;
68648 +       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
68649 +       if (!attr_b)
68650 +               return -ENOENT;
68652 +       if (!attr_b->non_res) {
68653 +               /* Attribute is resident. Nothing to do? */
68654 +               return 0;
68655 +       }
68657 +       data_size = le64_to_cpu(attr_b->nres.data_size);
68658 +       alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
68659 +       a_flags = attr_b->flags;
68661 +       if (is_attr_ext(attr_b)) {
68662 +               total_size = le64_to_cpu(attr_b->nres.total_size);
68663 +               mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
68664 +       } else {
68665 +               total_size = alloc_size;
68666 +               mask = sbi->cluster_mask;
68667 +       }
68669 +       if ((vbo & mask) || (bytes & mask)) {
68670 +               /* allow to collapse only cluster aligned ranges */
68671 +               return -EINVAL;
68672 +       }
68674 +       if (vbo > data_size)
68675 +               return -EINVAL;
68677 +       down_write(&ni->file.run_lock);
68679 +       if (vbo + bytes >= data_size) {
68680 +               u64 new_valid = min(ni->i_valid, vbo);
68682 +               /* Simple truncate file at 'vbo' */
68683 +               truncate_setsize(&ni->vfs_inode, vbo);
68684 +               err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
68685 +                                   &new_valid, true, NULL);
68687 +               if (!err && new_valid < ni->i_valid)
68688 +                       ni->i_valid = new_valid;
68690 +               goto out;
68691 +       }
68693 +       /*
68694 +        * Enumerate all attribute segments and collapse
68695 +        */
68696 +       alen = alloc_size >> sbi->cluster_bits;
68697 +       vcn = vbo >> sbi->cluster_bits;
68698 +       len = bytes >> sbi->cluster_bits;
68699 +       end = vcn + len;
68700 +       dealloc = 0;
68702 +       svcn = le64_to_cpu(attr_b->nres.svcn);
68703 +       evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
68705 +       if (svcn <= vcn && vcn < evcn1) {
68706 +               attr = attr_b;
68707 +               le = le_b;
68708 +               mi = mi_b;
68709 +       } else if (!le_b) {
68710 +               err = -EINVAL;
68711 +               goto out;
68712 +       } else {
68713 +               le = le_b;
68714 +               attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
68715 +                                   &mi);
68716 +               if (!attr) {
68717 +                       err = -EINVAL;
68718 +                       goto out;
68719 +               }
68721 +               svcn = le64_to_cpu(attr->nres.svcn);
68722 +               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
68723 +       }
68725 +       for (;;) {
68726 +               if (svcn >= end) {
68727 +                       /* shift vcn */
68728 +                       attr->nres.svcn = cpu_to_le64(svcn - len);
68729 +                       attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
68730 +                       if (le) {
68731 +                               le->vcn = attr->nres.svcn;
68732 +                               ni->attr_list.dirty = true;
68733 +                       }
68734 +                       mi->dirty = true;
68735 +               } else if (svcn < vcn || end < evcn1) {
68736 +                       CLST vcn1, eat, next_svcn;
68738 +                       /* collapse a part of this attribute segment */
68739 +                       err = attr_load_runs(attr, ni, run, &svcn);
68740 +                       if (err)
68741 +                               goto out;
68742 +                       vcn1 = max(vcn, svcn);
68743 +                       eat = min(end, evcn1) - vcn1;
68745 +                       err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
68746 +                                               true);
68747 +                       if (err)
68748 +                               goto out;
68750 +                       if (!run_collapse_range(run, vcn1, eat)) {
68751 +                               err = -ENOMEM;
68752 +                               goto out;
68753 +                       }
68755 +                       if (svcn >= vcn) {
68756 +                               /* shift vcn */
68757 +                               attr->nres.svcn = cpu_to_le64(vcn);
68758 +                               if (le) {
68759 +                                       le->vcn = attr->nres.svcn;
68760 +                                       ni->attr_list.dirty = true;
68761 +                               }
68762 +                       }
68764 +                       err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
68765 +                       if (err)
68766 +                               goto out;
68768 +                       next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
68769 +                       if (next_svcn + eat < evcn1) {
68770 +                               err = ni_insert_nonresident(
68771 +                                       ni, ATTR_DATA, NULL, 0, run, next_svcn,
68772 +                                       evcn1 - eat - next_svcn, a_flags, &attr,
68773 +                                       &mi);
68774 +                               if (err)
68775 +                                       goto out;
68777 +                               /* layout of records maybe changed */
68778 +                               attr_b = NULL;
68779 +                               le = al_find_ex(ni, NULL, ATTR_DATA, NULL, 0,
68780 +                                               &next_svcn);
68781 +                               if (!le) {
68782 +                                       err = -EINVAL;
68783 +                                       goto out;
68784 +                               }
68785 +                       }
68787 +                       /* free all allocated memory */
68788 +                       run_truncate(run, 0);
68789 +               } else {
68790 +                       u16 le_sz;
68791 +                       u16 roff = le16_to_cpu(attr->nres.run_off);
68793 +                       /*run==1 means unpack and deallocate*/
68794 +                       run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
68795 +                                     evcn1 - 1, svcn, Add2Ptr(attr, roff),
68796 +                                     le32_to_cpu(attr->size) - roff);
68798 +                       /* delete this attribute segment */
68799 +                       mi_remove_attr(mi, attr);
68800 +                       if (!le)
68801 +                               break;
68803 +                       le_sz = le16_to_cpu(le->size);
68804 +                       if (!al_remove_le(ni, le)) {
68805 +                               err = -EINVAL;
68806 +                               goto out;
68807 +                       }
68809 +                       if (evcn1 >= alen)
68810 +                               break;
68812 +                       if (!svcn) {
68813 +                               /* Load next record that contains this attribute */
68814 +                               if (ni_load_mi(ni, le, &mi)) {
68815 +                                       err = -EINVAL;
68816 +                                       goto out;
68817 +                               }
68819 +                               /* Look for required attribute */
68820 +                               attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
68821 +                                                   0, &le->id);
68822 +                               if (!attr) {
68823 +                                       err = -EINVAL;
68824 +                                       goto out;
68825 +                               }
68826 +                               goto next_attr;
68827 +                       }
68828 +                       le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
68829 +               }
68831 +               if (evcn1 >= alen)
68832 +                       break;
68834 +               attr = ni_enum_attr_ex(ni, attr, &le, &mi);
68835 +               if (!attr) {
68836 +                       err = -EINVAL;
68837 +                       goto out;
68838 +               }
68840 +next_attr:
68841 +               svcn = le64_to_cpu(attr->nres.svcn);
68842 +               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
68843 +       }
68845 +       if (!attr_b) {
68846 +               le_b = NULL;
68847 +               attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
68848 +                                     &mi_b);
68849 +               if (!attr_b) {
68850 +                       err = -ENOENT;
68851 +                       goto out;
68852 +               }
68853 +       }
68855 +       data_size -= bytes;
68856 +       valid_size = ni->i_valid;
68857 +       if (vbo + bytes <= valid_size)
68858 +               valid_size -= bytes;
68859 +       else if (vbo < valid_size)
68860 +               valid_size = vbo;
68862 +       attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
68863 +       attr_b->nres.data_size = cpu_to_le64(data_size);
68864 +       attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
68865 +       total_size -= (u64)dealloc << sbi->cluster_bits;
68866 +       if (is_attr_ext(attr_b))
68867 +               attr_b->nres.total_size = cpu_to_le64(total_size);
68868 +       mi_b->dirty = true;
68870 +       /*update inode size*/
68871 +       ni->i_valid = valid_size;
68872 +       ni->vfs_inode.i_size = data_size;
68873 +       inode_set_bytes(&ni->vfs_inode, total_size);
68874 +       ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
68875 +       mark_inode_dirty(&ni->vfs_inode);
68877 +out:
68878 +       up_write(&ni->file.run_lock);
68879 +       if (err)
68880 +               make_bad_inode(&ni->vfs_inode);
68882 +       return err;
68885 +/* not for normal files */
68886 +int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes)
68888 +       int err = 0;
68889 +       struct runs_tree *run = &ni->file.run;
68890 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
68891 +       struct ATTRIB *attr = NULL, *attr_b;
68892 +       struct ATTR_LIST_ENTRY *le, *le_b;
68893 +       struct mft_inode *mi, *mi_b;
68894 +       CLST svcn, evcn1, vcn, len, end, alen, dealloc;
68895 +       u64 total_size, alloc_size;
68897 +       if (!bytes)
68898 +               return 0;
68900 +       le_b = NULL;
68901 +       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
68902 +       if (!attr_b)
68903 +               return -ENOENT;
68905 +       if (!attr_b->non_res) {
68906 +               u32 data_size = le32_to_cpu(attr->res.data_size);
68907 +               u32 from, to;
68909 +               if (vbo > data_size)
68910 +                       return 0;
68912 +               from = vbo;
68913 +               to = (vbo + bytes) < data_size ? (vbo + bytes) : data_size;
68914 +               memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
68915 +               return 0;
68916 +       }
68918 +       /* TODO: add support for normal files too */
68919 +       if (!is_attr_ext(attr_b))
68920 +               return -EOPNOTSUPP;
68922 +       alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
68923 +       total_size = le64_to_cpu(attr_b->nres.total_size);
68925 +       if (vbo >= alloc_size) {
68926 +               // NOTE: it is allowed
68927 +               return 0;
68928 +       }
68930 +       if (vbo + bytes > alloc_size)
68931 +               bytes = alloc_size - vbo;
68933 +       down_write(&ni->file.run_lock);
68934 +       /*
68935 +        * Enumerate all attribute segments and punch hole where necessary
68936 +        */
68937 +       alen = alloc_size >> sbi->cluster_bits;
68938 +       vcn = vbo >> sbi->cluster_bits;
68939 +       len = bytes >> sbi->cluster_bits;
68940 +       end = vcn + len;
68941 +       dealloc = 0;
68943 +       svcn = le64_to_cpu(attr_b->nres.svcn);
68944 +       evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
68946 +       if (svcn <= vcn && vcn < evcn1) {
68947 +               attr = attr_b;
68948 +               le = le_b;
68949 +               mi = mi_b;
68950 +       } else if (!le_b) {
68951 +               err = -EINVAL;
68952 +               goto out;
68953 +       } else {
68954 +               le = le_b;
68955 +               attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
68956 +                                   &mi);
68957 +               if (!attr) {
68958 +                       err = -EINVAL;
68959 +                       goto out;
68960 +               }
68962 +               svcn = le64_to_cpu(attr->nres.svcn);
68963 +               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
68964 +       }
68966 +       while (svcn < end) {
68967 +               CLST vcn1, zero, dealloc2;
68969 +               err = attr_load_runs(attr, ni, run, &svcn);
68970 +               if (err)
68971 +                       goto out;
68972 +               vcn1 = max(vcn, svcn);
68973 +               zero = min(end, evcn1) - vcn1;
68975 +               dealloc2 = dealloc;
68976 +               err = run_deallocate_ex(sbi, run, vcn1, zero, &dealloc, true);
68977 +               if (err)
68978 +                       goto out;
68980 +               if (dealloc2 == dealloc) {
68981 +                       /* looks like  the required range is already sparsed */
68982 +               } else {
68983 +                       if (!run_add_entry(run, vcn1, SPARSE_LCN, zero,
68984 +                                          false)) {
68985 +                               err = -ENOMEM;
68986 +                               goto out;
68987 +                       }
68989 +                       err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
68990 +                       if (err)
68991 +                               goto out;
68992 +               }
68993 +               /* free all allocated memory */
68994 +               run_truncate(run, 0);
68996 +               if (evcn1 >= alen)
68997 +                       break;
68999 +               attr = ni_enum_attr_ex(ni, attr, &le, &mi);
69000 +               if (!attr) {
69001 +                       err = -EINVAL;
69002 +                       goto out;
69003 +               }
69005 +               svcn = le64_to_cpu(attr->nres.svcn);
69006 +               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
69007 +       }
69009 +       total_size -= (u64)dealloc << sbi->cluster_bits;
69010 +       attr_b->nres.total_size = cpu_to_le64(total_size);
69011 +       mi_b->dirty = true;
69013 +       /*update inode size*/
69014 +       inode_set_bytes(&ni->vfs_inode, total_size);
69015 +       ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
69016 +       mark_inode_dirty(&ni->vfs_inode);
69018 +out:
69019 +       up_write(&ni->file.run_lock);
69020 +       if (err)
69021 +               make_bad_inode(&ni->vfs_inode);
69023 +       return err;
69025 diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c
69026 new file mode 100644
69027 index 000000000000..ea561361b576
69028 --- /dev/null
69029 +++ b/fs/ntfs3/attrlist.c
69030 @@ -0,0 +1,456 @@
69031 +// SPDX-License-Identifier: GPL-2.0
69033 + *
69034 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
69035 + *
69036 + */
69038 +#include <linux/blkdev.h>
69039 +#include <linux/buffer_head.h>
69040 +#include <linux/fs.h>
69041 +#include <linux/nls.h>
69043 +#include "debug.h"
69044 +#include "ntfs.h"
69045 +#include "ntfs_fs.h"
69047 +/* Returns true if le is valid */
69048 +static inline bool al_is_valid_le(const struct ntfs_inode *ni,
69049 +                                 struct ATTR_LIST_ENTRY *le)
69051 +       if (!le || !ni->attr_list.le || !ni->attr_list.size)
69052 +               return false;
69054 +       return PtrOffset(ni->attr_list.le, le) + le16_to_cpu(le->size) <=
69055 +              ni->attr_list.size;
69058 +void al_destroy(struct ntfs_inode *ni)
69060 +       run_close(&ni->attr_list.run);
69061 +       ntfs_free(ni->attr_list.le);
69062 +       ni->attr_list.le = NULL;
69063 +       ni->attr_list.size = 0;
69064 +       ni->attr_list.dirty = false;
69068 + * ntfs_load_attr_list
69069 + *
69070 + * This method makes sure that the ATTRIB list, if present,
69071 + * has been properly set up.
69072 + */
69073 +int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
69075 +       int err;
69076 +       size_t lsize;
69077 +       void *le = NULL;
69079 +       if (ni->attr_list.size)
69080 +               return 0;
69082 +       if (!attr->non_res) {
69083 +               lsize = le32_to_cpu(attr->res.data_size);
69084 +               le = ntfs_malloc(al_aligned(lsize));
69085 +               if (!le) {
69086 +                       err = -ENOMEM;
69087 +                       goto out;
69088 +               }
69089 +               memcpy(le, resident_data(attr), lsize);
69090 +       } else if (attr->nres.svcn) {
69091 +               err = -EINVAL;
69092 +               goto out;
69093 +       } else {
69094 +               u16 run_off = le16_to_cpu(attr->nres.run_off);
69096 +               lsize = le64_to_cpu(attr->nres.data_size);
69098 +               run_init(&ni->attr_list.run);
69100 +               err = run_unpack_ex(&ni->attr_list.run, ni->mi.sbi, ni->mi.rno,
69101 +                                   0, le64_to_cpu(attr->nres.evcn), 0,
69102 +                                   Add2Ptr(attr, run_off),
69103 +                                   le32_to_cpu(attr->size) - run_off);
69104 +               if (err < 0)
69105 +                       goto out;
69107 +               le = ntfs_malloc(al_aligned(lsize));
69108 +               if (!le) {
69109 +                       err = -ENOMEM;
69110 +                       goto out;
69111 +               }
69113 +               err = ntfs_read_run_nb(ni->mi.sbi, &ni->attr_list.run, 0, le,
69114 +                                      lsize, NULL);
69115 +               if (err)
69116 +                       goto out;
69117 +       }
69119 +       ni->attr_list.size = lsize;
69120 +       ni->attr_list.le = le;
69122 +       return 0;
69124 +out:
69125 +       ni->attr_list.le = le;
69126 +       al_destroy(ni);
69128 +       return err;
69132 + * al_enumerate
69133 + *
69134 + * Returns the next list 'le'
69135 + * if 'le' is NULL then returns the first 'le'
69136 + */
69137 +struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
69138 +                                    struct ATTR_LIST_ENTRY *le)
69140 +       size_t off;
69141 +       u16 sz;
69143 +       if (!le) {
69144 +               le = ni->attr_list.le;
69145 +       } else {
69146 +               sz = le16_to_cpu(le->size);
69147 +               if (sz < sizeof(struct ATTR_LIST_ENTRY)) {
69148 +                       /* Impossible 'cause we should not return such 'le' */
69149 +                       return NULL;
69150 +               }
69151 +               le = Add2Ptr(le, sz);
69152 +       }
69154 +       /* Check boundary */
69155 +       off = PtrOffset(ni->attr_list.le, le);
69156 +       if (off + sizeof(struct ATTR_LIST_ENTRY) > ni->attr_list.size) {
69157 +               // The regular end of list
69158 +               return NULL;
69159 +       }
69161 +       sz = le16_to_cpu(le->size);
69163 +       /* Check 'le' for errors */
69164 +       if (sz < sizeof(struct ATTR_LIST_ENTRY) ||
69165 +           off + sz > ni->attr_list.size ||
69166 +           sz < le->name_off + le->name_len * sizeof(short)) {
69167 +               return NULL;
69168 +       }
69170 +       return le;
69174 + * al_find_le
69175 + *
69176 + * finds the first 'le' in the list which matches type, name and vcn
69177 + * Returns NULL if not found
69178 + */
69179 +struct ATTR_LIST_ENTRY *al_find_le(struct ntfs_inode *ni,
69180 +                                  struct ATTR_LIST_ENTRY *le,
69181 +                                  const struct ATTRIB *attr)
69183 +       CLST svcn = attr_svcn(attr);
69185 +       return al_find_ex(ni, le, attr->type, attr_name(attr), attr->name_len,
69186 +                         &svcn);
69190 + * al_find_ex
69191 + *
69192 + * finds the first 'le' in the list which matches type, name and vcn
69193 + * Returns NULL if not found
69194 + */
69195 +struct ATTR_LIST_ENTRY *al_find_ex(struct ntfs_inode *ni,
69196 +                                  struct ATTR_LIST_ENTRY *le,
69197 +                                  enum ATTR_TYPE type, const __le16 *name,
69198 +                                  u8 name_len, const CLST *vcn)
69200 +       struct ATTR_LIST_ENTRY *ret = NULL;
69201 +       u32 type_in = le32_to_cpu(type);
69203 +       while ((le = al_enumerate(ni, le))) {
69204 +               u64 le_vcn;
69205 +               int diff = le32_to_cpu(le->type) - type_in;
69207 +               /* List entries are sorted by type, name and vcn */
69208 +               if (diff < 0)
69209 +                       continue;
69211 +               if (diff > 0)
69212 +                       return ret;
69214 +               if (le->name_len != name_len)
69215 +                       continue;
69217 +               le_vcn = le64_to_cpu(le->vcn);
69218 +               if (!le_vcn) {
69219 +                       /*
69220 +                        * compare entry names only for entry with vcn == 0
69221 +                        */
69222 +                       diff = ntfs_cmp_names(le_name(le), name_len, name,
69223 +                                             name_len, ni->mi.sbi->upcase,
69224 +                                             true);
69225 +                       if (diff < 0)
69226 +                               continue;
69228 +                       if (diff > 0)
69229 +                               return ret;
69230 +               }
69232 +               if (!vcn)
69233 +                       return le;
69235 +               if (*vcn == le_vcn)
69236 +                       return le;
69238 +               if (*vcn < le_vcn)
69239 +                       return ret;
69241 +               ret = le;
69242 +       }
69244 +       return ret;
69248 + * al_find_le_to_insert
69249 + *
69250 + * finds the first list entry which matches type, name and vcn
69251 + */
69252 +static struct ATTR_LIST_ENTRY *al_find_le_to_insert(struct ntfs_inode *ni,
69253 +                                                   enum ATTR_TYPE type,
69254 +                                                   const __le16 *name,
69255 +                                                   u8 name_len, CLST vcn)
69257 +       struct ATTR_LIST_ENTRY *le = NULL, *prev;
69258 +       u32 type_in = le32_to_cpu(type);
69260 +       /* List entries are sorted by type, name, vcn */
69261 +       while ((le = al_enumerate(ni, prev = le))) {
69262 +               int diff = le32_to_cpu(le->type) - type_in;
69264 +               if (diff < 0)
69265 +                       continue;
69267 +               if (diff > 0)
69268 +                       return le;
69270 +               if (!le->vcn) {
69271 +                       /*
69272 +                        * compare entry names only for entry with vcn == 0
69273 +                        */
69274 +                       diff = ntfs_cmp_names(le_name(le), le->name_len, name,
69275 +                                             name_len, ni->mi.sbi->upcase,
69276 +                                             true);
69277 +                       if (diff < 0)
69278 +                               continue;
69280 +                       if (diff > 0)
69281 +                               return le;
69282 +               }
69284 +               if (le64_to_cpu(le->vcn) >= vcn)
69285 +                       return le;
69286 +       }
69288 +       return prev ? Add2Ptr(prev, le16_to_cpu(prev->size)) : ni->attr_list.le;
69292 + * al_add_le
69293 + *
69294 + * adds an "attribute list entry" to the list.
69295 + */
69296 +int al_add_le(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name,
69297 +             u8 name_len, CLST svcn, __le16 id, const struct MFT_REF *ref,
69298 +             struct ATTR_LIST_ENTRY **new_le)
69300 +       int err;
69301 +       struct ATTRIB *attr;
69302 +       struct ATTR_LIST_ENTRY *le;
69303 +       size_t off;
69304 +       u16 sz;
69305 +       size_t asize, new_asize;
69306 +       u64 new_size;
69307 +       typeof(ni->attr_list) *al = &ni->attr_list;
69309 +       /*
69310 +        * Compute the size of the new 'le'
69311 +        */
69312 +       sz = le_size(name_len);
69313 +       new_size = al->size + sz;
69314 +       asize = al_aligned(al->size);
69315 +       new_asize = al_aligned(new_size);
69317 +       /* Scan forward to the point at which the new 'le' should be inserted. */
69318 +       le = al_find_le_to_insert(ni, type, name, name_len, svcn);
69319 +       off = PtrOffset(al->le, le);
69321 +       if (new_size > asize) {
69322 +               void *ptr = ntfs_malloc(new_asize);
69324 +               if (!ptr)
69325 +                       return -ENOMEM;
69327 +               memcpy(ptr, al->le, off);
69328 +               memcpy(Add2Ptr(ptr, off + sz), le, al->size - off);
69329 +               le = Add2Ptr(ptr, off);
69330 +               ntfs_free(al->le);
69331 +               al->le = ptr;
69332 +       } else {
69333 +               memmove(Add2Ptr(le, sz), le, al->size - off);
69334 +       }
69336 +       al->size = new_size;
69338 +       le->type = type;
69339 +       le->size = cpu_to_le16(sz);
69340 +       le->name_len = name_len;
69341 +       le->name_off = offsetof(struct ATTR_LIST_ENTRY, name);
69342 +       le->vcn = cpu_to_le64(svcn);
69343 +       le->ref = *ref;
69344 +       le->id = id;
69345 +       memcpy(le->name, name, sizeof(short) * name_len);
69347 +       al->dirty = true;
69349 +       err = attr_set_size(ni, ATTR_LIST, NULL, 0, &al->run, new_size,
69350 +                           &new_size, true, &attr);
69351 +       if (err)
69352 +               return err;
69354 +       if (attr && attr->non_res) {
69355 +               err = ntfs_sb_write_run(ni->mi.sbi, &al->run, 0, al->le,
69356 +                                       al->size);
69357 +               if (err)
69358 +                       return err;
69359 +       }
69361 +       al->dirty = false;
69362 +       *new_le = le;
69364 +       return 0;
69368 + * al_remove_le
69369 + *
69370 + * removes 'le' from attribute list
69371 + */
69372 +bool al_remove_le(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le)
69374 +       u16 size;
69375 +       size_t off;
69376 +       typeof(ni->attr_list) *al = &ni->attr_list;
69378 +       if (!al_is_valid_le(ni, le))
69379 +               return false;
69381 +       /* Save on stack the size of 'le' */
69382 +       size = le16_to_cpu(le->size);
69383 +       off = PtrOffset(al->le, le);
69385 +       memmove(le, Add2Ptr(le, size), al->size - (off + size));
69387 +       al->size -= size;
69388 +       al->dirty = true;
69390 +       return true;
69394 + * al_delete_le
69395 + *
69396 + * deletes from the list the first 'le' which matches its parameters.
69397 + */
69398 +bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn,
69399 +                 const __le16 *name, size_t name_len,
69400 +                 const struct MFT_REF *ref)
69402 +       u16 size;
69403 +       struct ATTR_LIST_ENTRY *le;
69404 +       size_t off;
69405 +       typeof(ni->attr_list) *al = &ni->attr_list;
69407 +       /* Scan forward to the first 'le' that matches the input */
69408 +       le = al_find_ex(ni, NULL, type, name, name_len, &vcn);
69409 +       if (!le)
69410 +               return false;
69412 +       off = PtrOffset(al->le, le);
69414 +next:
69415 +       if (off >= al->size)
69416 +               return false;
69417 +       if (le->type != type)
69418 +               return false;
69419 +       if (le->name_len != name_len)
69420 +               return false;
69421 +       if (name_len && ntfs_cmp_names(le_name(le), name_len, name, name_len,
69422 +                                      ni->mi.sbi->upcase, true))
69423 +               return false;
69424 +       if (le64_to_cpu(le->vcn) != vcn)
69425 +               return false;
69427 +       /*
69428 +        * The caller specified a segment reference, so we have to
69429 +        * scan through the matching entries until we find that segment
69430 +        * reference or we run of matching entries.
69431 +        */
69432 +       if (ref && memcmp(ref, &le->ref, sizeof(*ref))) {
69433 +               off += le16_to_cpu(le->size);
69434 +               le = Add2Ptr(al->le, off);
69435 +               goto next;
69436 +       }
69438 +       /* Save on stack the size of 'le' */
69439 +       size = le16_to_cpu(le->size);
69440 +       /* Delete 'le'. */
69441 +       memmove(le, Add2Ptr(le, size), al->size - (off + size));
69443 +       al->size -= size;
69444 +       al->dirty = true;
69446 +       return true;
69450 + * al_update
69451 + */
69452 +int al_update(struct ntfs_inode *ni)
69454 +       int err;
69455 +       struct ATTRIB *attr;
69456 +       typeof(ni->attr_list) *al = &ni->attr_list;
69458 +       if (!al->dirty || !al->size)
69459 +               return 0;
69461 +       /*
69462 +        * attribute list increased on demand in al_add_le
69463 +        * attribute list decreased here
69464 +        */
69465 +       err = attr_set_size(ni, ATTR_LIST, NULL, 0, &al->run, al->size, NULL,
69466 +                           false, &attr);
69467 +       if (err)
69468 +               goto out;
69470 +       if (!attr->non_res) {
69471 +               memcpy(resident_data(attr), al->le, al->size);
69472 +       } else {
69473 +               err = ntfs_sb_write_run(ni->mi.sbi, &al->run, 0, al->le,
69474 +                                       al->size);
69475 +               if (err)
69476 +                       goto out;
69478 +               attr->nres.valid_size = attr->nres.data_size;
69479 +       }
69481 +       ni->mi.dirty = true;
69482 +       al->dirty = false;
69484 +out:
69485 +       return err;
69487 diff --git a/fs/ntfs3/bitfunc.c b/fs/ntfs3/bitfunc.c
69488 new file mode 100644
69489 index 000000000000..2de5faef2721
69490 --- /dev/null
69491 +++ b/fs/ntfs3/bitfunc.c
69492 @@ -0,0 +1,135 @@
69493 +// SPDX-License-Identifier: GPL-2.0
69495 + *
69496 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
69497 + *
69498 + */
69499 +#include <linux/blkdev.h>
69500 +#include <linux/buffer_head.h>
69501 +#include <linux/fs.h>
69502 +#include <linux/nls.h>
69504 +#include "debug.h"
69505 +#include "ntfs.h"
69506 +#include "ntfs_fs.h"
69508 +#define BITS_IN_SIZE_T (sizeof(size_t) * 8)
69511 + * fill_mask[i] - first i bits are '1' , i = 0,1,2,3,4,5,6,7,8
69512 + * fill_mask[i] = 0xFF >> (8-i)
69513 + */
69514 +static const u8 fill_mask[] = { 0x00, 0x01, 0x03, 0x07, 0x0F,
69515 +                               0x1F, 0x3F, 0x7F, 0xFF };
69518 + * zero_mask[i] - first i bits are '0' , i = 0,1,2,3,4,5,6,7,8
69519 + * zero_mask[i] = 0xFF << i
69520 + */
69521 +static const u8 zero_mask[] = { 0xFF, 0xFE, 0xFC, 0xF8, 0xF0,
69522 +                               0xE0, 0xC0, 0x80, 0x00 };
69525 + * are_bits_clear
69526 + *
69527 + * Returns true if all bits [bit, bit+nbits) are zeros "0"
69528 + */
69529 +bool are_bits_clear(const ulong *lmap, size_t bit, size_t nbits)
69531 +       size_t pos = bit & 7;
69532 +       const u8 *map = (u8 *)lmap + (bit >> 3);
69534 +       if (pos) {
69535 +               if (8 - pos >= nbits)
69536 +                       return !nbits || !(*map & fill_mask[pos + nbits] &
69537 +                                          zero_mask[pos]);
69539 +               if (*map++ & zero_mask[pos])
69540 +                       return false;
69541 +               nbits -= 8 - pos;
69542 +       }
69544 +       pos = ((size_t)map) & (sizeof(size_t) - 1);
69545 +       if (pos) {
69546 +               pos = sizeof(size_t) - pos;
69547 +               if (nbits >= pos * 8) {
69548 +                       for (nbits -= pos * 8; pos; pos--, map++) {
69549 +                               if (*map)
69550 +                                       return false;
69551 +                       }
69552 +               }
69553 +       }
69555 +       for (pos = nbits / BITS_IN_SIZE_T; pos; pos--, map += sizeof(size_t)) {
69556 +               if (*((size_t *)map))
69557 +                       return false;
69558 +       }
69560 +       for (pos = (nbits % BITS_IN_SIZE_T) >> 3; pos; pos--, map++) {
69561 +               if (*map)
69562 +                       return false;
69563 +       }
69565 +       pos = nbits & 7;
69566 +       if (pos && (*map & fill_mask[pos]))
69567 +               return false;
69569 +       // All bits are zero
69570 +       return true;
69574 + * are_bits_set
69575 + *
69576 + * Returns true if all bits [bit, bit+nbits) are ones "1"
69577 + */
69578 +bool are_bits_set(const ulong *lmap, size_t bit, size_t nbits)
69580 +       u8 mask;
69581 +       size_t pos = bit & 7;
69582 +       const u8 *map = (u8 *)lmap + (bit >> 3);
69584 +       if (pos) {
69585 +               if (8 - pos >= nbits) {
69586 +                       mask = fill_mask[pos + nbits] & zero_mask[pos];
69587 +                       return !nbits || (*map & mask) == mask;
69588 +               }
69590 +               mask = zero_mask[pos];
69591 +               if ((*map++ & mask) != mask)
69592 +                       return false;
69593 +               nbits -= 8 - pos;
69594 +       }
69596 +       pos = ((size_t)map) & (sizeof(size_t) - 1);
69597 +       if (pos) {
69598 +               pos = sizeof(size_t) - pos;
69599 +               if (nbits >= pos * 8) {
69600 +                       for (nbits -= pos * 8; pos; pos--, map++) {
69601 +                               if (*map != 0xFF)
69602 +                                       return false;
69603 +                       }
69604 +               }
69605 +       }
69607 +       for (pos = nbits / BITS_IN_SIZE_T; pos; pos--, map += sizeof(size_t)) {
69608 +               if (*((size_t *)map) != MINUS_ONE_T)
69609 +                       return false;
69610 +       }
69612 +       for (pos = (nbits % BITS_IN_SIZE_T) >> 3; pos; pos--, map++) {
69613 +               if (*map != 0xFF)
69614 +                       return false;
69615 +       }
69617 +       pos = nbits & 7;
69618 +       if (pos) {
69619 +               u8 mask = fill_mask[pos];
69621 +               if ((*map & mask) != mask)
69622 +                       return false;
69623 +       }
69625 +       // All bits are ones
69626 +       return true;
69628 diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
69629 new file mode 100644
69630 index 000000000000..32aab0031221
69631 --- /dev/null
69632 +++ b/fs/ntfs3/bitmap.c
69633 @@ -0,0 +1,1519 @@
69634 +// SPDX-License-Identifier: GPL-2.0
69636 + *
69637 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
69638 + *
69639 + * This code builds two trees of free clusters extents.
69640 + * Trees are sorted by start of extent and by length of extent.
69641 + * NTFS_MAX_WND_EXTENTS defines the maximum number of elements in trees.
69642 + * In extreme case code reads on-disk bitmap to find free clusters
69643 + *
69644 + */
69646 +#include <linux/blkdev.h>
69647 +#include <linux/buffer_head.h>
69648 +#include <linux/fs.h>
69649 +#include <linux/nls.h>
69651 +#include "debug.h"
69652 +#include "ntfs.h"
69653 +#include "ntfs_fs.h"
69656 + * Maximum number of extents in tree.
69657 + */
69658 +#define NTFS_MAX_WND_EXTENTS (32u * 1024u)
69660 +struct rb_node_key {
69661 +       struct rb_node node;
69662 +       size_t key;
69666 + * Tree is sorted by start (key)
69667 + */
69668 +struct e_node {
69669 +       struct rb_node_key start; /* Tree sorted by start */
69670 +       struct rb_node_key count; /* Tree sorted by len*/
69673 +static int wnd_rescan(struct wnd_bitmap *wnd);
69674 +static struct buffer_head *wnd_map(struct wnd_bitmap *wnd, size_t iw);
69675 +static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits);
69677 +static struct kmem_cache *ntfs_enode_cachep;
69679 +int __init ntfs3_init_bitmap(void)
69681 +       ntfs_enode_cachep =
69682 +               kmem_cache_create("ntfs3_enode_cache", sizeof(struct e_node), 0,
69683 +                                 SLAB_RECLAIM_ACCOUNT, NULL);
69684 +       return ntfs_enode_cachep ? 0 : -ENOMEM;
69687 +void ntfs3_exit_bitmap(void)
69689 +       kmem_cache_destroy(ntfs_enode_cachep);
69692 +static inline u32 wnd_bits(const struct wnd_bitmap *wnd, size_t i)
69694 +       return i + 1 == wnd->nwnd ? wnd->bits_last : wnd->sb->s_blocksize * 8;
69698 + * b_pos + b_len - biggest fragment
69699 + * Scan range [wpos wbits) window 'buf'
69700 + * Returns -1 if not found
69701 + */
69702 +static size_t wnd_scan(const ulong *buf, size_t wbit, u32 wpos, u32 wend,
69703 +                      size_t to_alloc, size_t *prev_tail, size_t *b_pos,
69704 +                      size_t *b_len)
69706 +       while (wpos < wend) {
69707 +               size_t free_len;
69708 +               u32 free_bits, end;
69709 +               u32 used = find_next_zero_bit(buf, wend, wpos);
69711 +               if (used >= wend) {
69712 +                       if (*b_len < *prev_tail) {
69713 +                               *b_pos = wbit - *prev_tail;
69714 +                               *b_len = *prev_tail;
69715 +                       }
69717 +                       *prev_tail = 0;
69718 +                       return -1;
69719 +               }
69721 +               if (used > wpos) {
69722 +                       wpos = used;
69723 +                       if (*b_len < *prev_tail) {
69724 +                               *b_pos = wbit - *prev_tail;
69725 +                               *b_len = *prev_tail;
69726 +                       }
69728 +                       *prev_tail = 0;
69729 +               }
69731 +               /*
69732 +                * Now we have a fragment [wpos, wend) staring with 0
69733 +                */
69734 +               end = wpos + to_alloc - *prev_tail;
69735 +               free_bits = find_next_bit(buf, min(end, wend), wpos);
69737 +               free_len = *prev_tail + free_bits - wpos;
69739 +               if (*b_len < free_len) {
69740 +                       *b_pos = wbit + wpos - *prev_tail;
69741 +                       *b_len = free_len;
69742 +               }
69744 +               if (free_len >= to_alloc)
69745 +                       return wbit + wpos - *prev_tail;
69747 +               if (free_bits >= wend) {
69748 +                       *prev_tail += free_bits - wpos;
69749 +                       return -1;
69750 +               }
69752 +               wpos = free_bits + 1;
69754 +               *prev_tail = 0;
69755 +       }
69757 +       return -1;
69761 + * wnd_close
69762 + *
69763 + * Frees all resources
69764 + */
69765 +void wnd_close(struct wnd_bitmap *wnd)
69767 +       struct rb_node *node, *next;
69769 +       ntfs_free(wnd->free_bits);
69770 +       run_close(&wnd->run);
69772 +       node = rb_first(&wnd->start_tree);
69774 +       while (node) {
69775 +               next = rb_next(node);
69776 +               rb_erase(node, &wnd->start_tree);
69777 +               kmem_cache_free(ntfs_enode_cachep,
69778 +                               rb_entry(node, struct e_node, start.node));
69779 +               node = next;
69780 +       }
69783 +static struct rb_node *rb_lookup(struct rb_root *root, size_t v)
69785 +       struct rb_node **p = &root->rb_node;
69786 +       struct rb_node *r = NULL;
69788 +       while (*p) {
69789 +               struct rb_node_key *k;
69791 +               k = rb_entry(*p, struct rb_node_key, node);
69792 +               if (v < k->key) {
69793 +                       p = &(*p)->rb_left;
69794 +               } else if (v > k->key) {
69795 +                       r = &k->node;
69796 +                       p = &(*p)->rb_right;
69797 +               } else {
69798 +                       return &k->node;
69799 +               }
69800 +       }
69802 +       return r;
69806 + * rb_insert_count
69807 + *
69808 + * Helper function to insert special kind of 'count' tree
69809 + */
69810 +static inline bool rb_insert_count(struct rb_root *root, struct e_node *e)
69812 +       struct rb_node **p = &root->rb_node;
69813 +       struct rb_node *parent = NULL;
69814 +       size_t e_ckey = e->count.key;
69815 +       size_t e_skey = e->start.key;
69817 +       while (*p) {
69818 +               struct e_node *k =
69819 +                       rb_entry(parent = *p, struct e_node, count.node);
69821 +               if (e_ckey > k->count.key) {
69822 +                       p = &(*p)->rb_left;
69823 +               } else if (e_ckey < k->count.key) {
69824 +                       p = &(*p)->rb_right;
69825 +               } else if (e_skey < k->start.key) {
69826 +                       p = &(*p)->rb_left;
69827 +               } else if (e_skey > k->start.key) {
69828 +                       p = &(*p)->rb_right;
69829 +               } else {
69830 +                       WARN_ON(1);
69831 +                       return false;
69832 +               }
69833 +       }
69835 +       rb_link_node(&e->count.node, parent, p);
69836 +       rb_insert_color(&e->count.node, root);
69837 +       return true;
69841 + * inline bool rb_insert_start
69842 + *
69843 + * Helper function to insert special kind of 'start' tree
69844 + */
69845 +static inline bool rb_insert_start(struct rb_root *root, struct e_node *e)
69847 +       struct rb_node **p = &root->rb_node;
69848 +       struct rb_node *parent = NULL;
69849 +       size_t e_skey = e->start.key;
69851 +       while (*p) {
69852 +               struct e_node *k;
69854 +               parent = *p;
69856 +               k = rb_entry(parent, struct e_node, start.node);
69857 +               if (e_skey < k->start.key) {
69858 +                       p = &(*p)->rb_left;
69859 +               } else if (e_skey > k->start.key) {
69860 +                       p = &(*p)->rb_right;
69861 +               } else {
69862 +                       WARN_ON(1);
69863 +                       return false;
69864 +               }
69865 +       }
69867 +       rb_link_node(&e->start.node, parent, p);
69868 +       rb_insert_color(&e->start.node, root);
69869 +       return true;
69873 + * wnd_add_free_ext
69874 + *
69875 + * adds a new extent of free space
69876 + * build = 1 when building tree
69877 + */
69878 +static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len,
69879 +                            bool build)
69881 +       struct e_node *e, *e0 = NULL;
69882 +       size_t ib, end_in = bit + len;
69883 +       struct rb_node *n;
69885 +       if (build) {
69886 +               /* Use extent_min to filter too short extents */
69887 +               if (wnd->count >= NTFS_MAX_WND_EXTENTS &&
69888 +                   len <= wnd->extent_min) {
69889 +                       wnd->uptodated = -1;
69890 +                       return;
69891 +               }
69892 +       } else {
69893 +               /* Try to find extent before 'bit' */
69894 +               n = rb_lookup(&wnd->start_tree, bit);
69896 +               if (!n) {
69897 +                       n = rb_first(&wnd->start_tree);
69898 +               } else {
69899 +                       e = rb_entry(n, struct e_node, start.node);
69900 +                       n = rb_next(n);
69901 +                       if (e->start.key + e->count.key == bit) {
69902 +                               /* Remove left */
69903 +                               bit = e->start.key;
69904 +                               len += e->count.key;
69905 +                               rb_erase(&e->start.node, &wnd->start_tree);
69906 +                               rb_erase(&e->count.node, &wnd->count_tree);
69907 +                               wnd->count -= 1;
69908 +                               e0 = e;
69909 +                       }
69910 +               }
69912 +               while (n) {
69913 +                       size_t next_end;
69915 +                       e = rb_entry(n, struct e_node, start.node);
69916 +                       next_end = e->start.key + e->count.key;
69917 +                       if (e->start.key > end_in)
69918 +                               break;
69920 +                       /* Remove right */
69921 +                       n = rb_next(n);
69922 +                       len += next_end - end_in;
69923 +                       end_in = next_end;
69924 +                       rb_erase(&e->start.node, &wnd->start_tree);
69925 +                       rb_erase(&e->count.node, &wnd->count_tree);
69926 +                       wnd->count -= 1;
69928 +                       if (!e0)
69929 +                               e0 = e;
69930 +                       else
69931 +                               kmem_cache_free(ntfs_enode_cachep, e);
69932 +               }
69934 +               if (wnd->uptodated != 1) {
69935 +                       /* Check bits before 'bit' */
69936 +                       ib = wnd->zone_bit == wnd->zone_end ||
69937 +                                            bit < wnd->zone_end
69938 +                                    ? 0
69939 +                                    : wnd->zone_end;
69941 +                       while (bit > ib && wnd_is_free_hlp(wnd, bit - 1, 1)) {
69942 +                               bit -= 1;
69943 +                               len += 1;
69944 +                       }
69946 +                       /* Check bits after 'end_in' */
69947 +                       ib = wnd->zone_bit == wnd->zone_end ||
69948 +                                            end_in > wnd->zone_bit
69949 +                                    ? wnd->nbits
69950 +                                    : wnd->zone_bit;
69952 +                       while (end_in < ib && wnd_is_free_hlp(wnd, end_in, 1)) {
69953 +                               end_in += 1;
69954 +                               len += 1;
69955 +                       }
69956 +               }
69957 +       }
69958 +       /* Insert new fragment */
69959 +       if (wnd->count >= NTFS_MAX_WND_EXTENTS) {
69960 +               if (e0)
69961 +                       kmem_cache_free(ntfs_enode_cachep, e0);
69963 +               wnd->uptodated = -1;
69965 +               /* Compare with smallest fragment */
69966 +               n = rb_last(&wnd->count_tree);
69967 +               e = rb_entry(n, struct e_node, count.node);
69968 +               if (len <= e->count.key)
69969 +                       goto out; /* Do not insert small fragments */
69971 +               if (build) {
69972 +                       struct e_node *e2;
69974 +                       n = rb_prev(n);
69975 +                       e2 = rb_entry(n, struct e_node, count.node);
69976 +                       /* smallest fragment will be 'e2->count.key' */
69977 +                       wnd->extent_min = e2->count.key;
69978 +               }
69980 +               /* Replace smallest fragment by new one */
69981 +               rb_erase(&e->start.node, &wnd->start_tree);
69982 +               rb_erase(&e->count.node, &wnd->count_tree);
69983 +               wnd->count -= 1;
69984 +       } else {
69985 +               e = e0 ? e0 : kmem_cache_alloc(ntfs_enode_cachep, GFP_ATOMIC);
69986 +               if (!e) {
69987 +                       wnd->uptodated = -1;
69988 +                       goto out;
69989 +               }
69991 +               if (build && len <= wnd->extent_min)
69992 +                       wnd->extent_min = len;
69993 +       }
69994 +       e->start.key = bit;
69995 +       e->count.key = len;
69996 +       if (len > wnd->extent_max)
69997 +               wnd->extent_max = len;
69999 +       rb_insert_start(&wnd->start_tree, e);
70000 +       rb_insert_count(&wnd->count_tree, e);
70001 +       wnd->count += 1;
70003 +out:;
70007 + * wnd_remove_free_ext
70008 + *
70009 + * removes a run from the cached free space
70010 + */
70011 +static void wnd_remove_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len)
70013 +       struct rb_node *n, *n3;
70014 +       struct e_node *e, *e3;
70015 +       size_t end_in = bit + len;
70016 +       size_t end3, end, new_key, new_len, max_new_len;
70018 +       /* Try to find extent before 'bit' */
70019 +       n = rb_lookup(&wnd->start_tree, bit);
70021 +       if (!n)
70022 +               return;
70024 +       e = rb_entry(n, struct e_node, start.node);
70025 +       end = e->start.key + e->count.key;
70027 +       new_key = new_len = 0;
70028 +       len = e->count.key;
70030 +       /* Range [bit,end_in) must be inside 'e' or outside 'e' and 'n' */
70031 +       if (e->start.key > bit)
70032 +               ;
70033 +       else if (end_in <= end) {
70034 +               /* Range [bit,end_in) inside 'e' */
70035 +               new_key = end_in;
70036 +               new_len = end - end_in;
70037 +               len = bit - e->start.key;
70038 +       } else if (bit > end) {
70039 +               bool bmax = false;
70041 +               n3 = rb_next(n);
70043 +               while (n3) {
70044 +                       e3 = rb_entry(n3, struct e_node, start.node);
70045 +                       if (e3->start.key >= end_in)
70046 +                               break;
70048 +                       if (e3->count.key == wnd->extent_max)
70049 +                               bmax = true;
70051 +                       end3 = e3->start.key + e3->count.key;
70052 +                       if (end3 > end_in) {
70053 +                               e3->start.key = end_in;
70054 +                               rb_erase(&e3->count.node, &wnd->count_tree);
70055 +                               e3->count.key = end3 - end_in;
70056 +                               rb_insert_count(&wnd->count_tree, e3);
70057 +                               break;
70058 +                       }
70060 +                       n3 = rb_next(n3);
70061 +                       rb_erase(&e3->start.node, &wnd->start_tree);
70062 +                       rb_erase(&e3->count.node, &wnd->count_tree);
70063 +                       wnd->count -= 1;
70064 +                       kmem_cache_free(ntfs_enode_cachep, e3);
70065 +               }
70066 +               if (!bmax)
70067 +                       return;
70068 +               n3 = rb_first(&wnd->count_tree);
70069 +               wnd->extent_max =
70070 +                       n3 ? rb_entry(n3, struct e_node, count.node)->count.key
70071 +                          : 0;
70072 +               return;
70073 +       }
70075 +       if (e->count.key != wnd->extent_max) {
70076 +               ;
70077 +       } else if (rb_prev(&e->count.node)) {
70078 +               ;
70079 +       } else {
70080 +               n3 = rb_next(&e->count.node);
70081 +               max_new_len = len > new_len ? len : new_len;
70082 +               if (!n3) {
70083 +                       wnd->extent_max = max_new_len;
70084 +               } else {
70085 +                       e3 = rb_entry(n3, struct e_node, count.node);
70086 +                       wnd->extent_max = max(e3->count.key, max_new_len);
70087 +               }
70088 +       }
70090 +       if (!len) {
70091 +               if (new_len) {
70092 +                       e->start.key = new_key;
70093 +                       rb_erase(&e->count.node, &wnd->count_tree);
70094 +                       e->count.key = new_len;
70095 +                       rb_insert_count(&wnd->count_tree, e);
70096 +               } else {
70097 +                       rb_erase(&e->start.node, &wnd->start_tree);
70098 +                       rb_erase(&e->count.node, &wnd->count_tree);
70099 +                       wnd->count -= 1;
70100 +                       kmem_cache_free(ntfs_enode_cachep, e);
70101 +               }
70102 +               goto out;
70103 +       }
70104 +       rb_erase(&e->count.node, &wnd->count_tree);
70105 +       e->count.key = len;
70106 +       rb_insert_count(&wnd->count_tree, e);
70108 +       if (!new_len)
70109 +               goto out;
70111 +       if (wnd->count >= NTFS_MAX_WND_EXTENTS) {
70112 +               wnd->uptodated = -1;
70114 +               /* Get minimal extent */
70115 +               e = rb_entry(rb_last(&wnd->count_tree), struct e_node,
70116 +                            count.node);
70117 +               if (e->count.key > new_len)
70118 +                       goto out;
70120 +               /* Replace minimum */
70121 +               rb_erase(&e->start.node, &wnd->start_tree);
70122 +               rb_erase(&e->count.node, &wnd->count_tree);
70123 +               wnd->count -= 1;
70124 +       } else {
70125 +               e = kmem_cache_alloc(ntfs_enode_cachep, GFP_ATOMIC);
70126 +               if (!e)
70127 +                       wnd->uptodated = -1;
70128 +       }
70130 +       if (e) {
70131 +               e->start.key = new_key;
70132 +               e->count.key = new_len;
70133 +               rb_insert_start(&wnd->start_tree, e);
70134 +               rb_insert_count(&wnd->count_tree, e);
70135 +               wnd->count += 1;
70136 +       }
70138 +out:
70139 +       if (!wnd->count && 1 != wnd->uptodated)
70140 +               wnd_rescan(wnd);
70144 + * wnd_rescan
70145 + *
70146 + * Scan all bitmap. used while initialization.
70147 + */
70148 +static int wnd_rescan(struct wnd_bitmap *wnd)
70150 +       int err = 0;
70151 +       size_t prev_tail = 0;
70152 +       struct super_block *sb = wnd->sb;
70153 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
70154 +       u64 lbo, len = 0;
70155 +       u32 blocksize = sb->s_blocksize;
70156 +       u8 cluster_bits = sbi->cluster_bits;
70157 +       u32 wbits = 8 * sb->s_blocksize;
70158 +       u32 used, frb;
70159 +       const ulong *buf;
70160 +       size_t wpos, wbit, iw, vbo;
70161 +       struct buffer_head *bh = NULL;
70162 +       CLST lcn, clen;
70164 +       wnd->uptodated = 0;
70165 +       wnd->extent_max = 0;
70166 +       wnd->extent_min = MINUS_ONE_T;
70167 +       wnd->total_zeroes = 0;
70169 +       vbo = 0;
70171 +       for (iw = 0; iw < wnd->nwnd; iw++) {
70172 +               if (iw + 1 == wnd->nwnd)
70173 +                       wbits = wnd->bits_last;
70175 +               if (wnd->inited) {
70176 +                       if (!wnd->free_bits[iw]) {
70177 +                               /* all ones */
70178 +                               if (prev_tail) {
70179 +                                       wnd_add_free_ext(wnd,
70180 +                                                        vbo * 8 - prev_tail,
70181 +                                                        prev_tail, true);
70182 +                                       prev_tail = 0;
70183 +                               }
70184 +                               goto next_wnd;
70185 +                       }
70186 +                       if (wbits == wnd->free_bits[iw]) {
70187 +                               /* all zeroes */
70188 +                               prev_tail += wbits;
70189 +                               wnd->total_zeroes += wbits;
70190 +                               goto next_wnd;
70191 +                       }
70192 +               }
70194 +               if (!len) {
70195 +                       u32 off = vbo & sbi->cluster_mask;
70197 +                       if (!run_lookup_entry(&wnd->run, vbo >> cluster_bits,
70198 +                                             &lcn, &clen, NULL)) {
70199 +                               err = -ENOENT;
70200 +                               goto out;
70201 +                       }
70203 +                       lbo = ((u64)lcn << cluster_bits) + off;
70204 +                       len = ((u64)clen << cluster_bits) - off;
70205 +               }
70207 +               bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
70208 +               if (!bh) {
70209 +                       err = -EIO;
70210 +                       goto out;
70211 +               }
70213 +               buf = (ulong *)bh->b_data;
70215 +               used = __bitmap_weight(buf, wbits);
70216 +               if (used < wbits) {
70217 +                       frb = wbits - used;
70218 +                       wnd->free_bits[iw] = frb;
70219 +                       wnd->total_zeroes += frb;
70220 +               }
70222 +               wpos = 0;
70223 +               wbit = vbo * 8;
70225 +               if (wbit + wbits > wnd->nbits)
70226 +                       wbits = wnd->nbits - wbit;
70228 +               do {
70229 +                       used = find_next_zero_bit(buf, wbits, wpos);
70231 +                       if (used > wpos && prev_tail) {
70232 +                               wnd_add_free_ext(wnd, wbit + wpos - prev_tail,
70233 +                                                prev_tail, true);
70234 +                               prev_tail = 0;
70235 +                       }
70237 +                       wpos = used;
70239 +                       if (wpos >= wbits) {
70240 +                               /* No free blocks */
70241 +                               prev_tail = 0;
70242 +                               break;
70243 +                       }
70245 +                       frb = find_next_bit(buf, wbits, wpos);
70246 +                       if (frb >= wbits) {
70247 +                               /* keep last free block */
70248 +                               prev_tail += frb - wpos;
70249 +                               break;
70250 +                       }
70252 +                       wnd_add_free_ext(wnd, wbit + wpos - prev_tail,
70253 +                                        frb + prev_tail - wpos, true);
70255 +                       /* Skip free block and first '1' */
70256 +                       wpos = frb + 1;
70257 +                       /* Reset previous tail */
70258 +                       prev_tail = 0;
70259 +               } while (wpos < wbits);
70261 +next_wnd:
70263 +               if (bh)
70264 +                       put_bh(bh);
70265 +               bh = NULL;
70267 +               vbo += blocksize;
70268 +               if (len) {
70269 +                       len -= blocksize;
70270 +                       lbo += blocksize;
70271 +               }
70272 +       }
70274 +       /* Add last block */
70275 +       if (prev_tail)
70276 +               wnd_add_free_ext(wnd, wnd->nbits - prev_tail, prev_tail, true);
70278 +       /*
70279 +        * Before init cycle wnd->uptodated was 0
70280 +        * If any errors or limits occurs while initialization then
70281 +        * wnd->uptodated will be -1
70282 +        * If 'uptodated' is still 0 then Tree is really updated
70283 +        */
70284 +       if (!wnd->uptodated)
70285 +               wnd->uptodated = 1;
70287 +       if (wnd->zone_bit != wnd->zone_end) {
70288 +               size_t zlen = wnd->zone_end - wnd->zone_bit;
70290 +               wnd->zone_end = wnd->zone_bit;
70291 +               wnd_zone_set(wnd, wnd->zone_bit, zlen);
70292 +       }
70294 +out:
70295 +       return err;
70299 + * wnd_init
70300 + */
70301 +int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
70303 +       int err;
70304 +       u32 blocksize = sb->s_blocksize;
70305 +       u32 wbits = blocksize * 8;
70307 +       init_rwsem(&wnd->rw_lock);
70309 +       wnd->sb = sb;
70310 +       wnd->nbits = nbits;
70311 +       wnd->total_zeroes = nbits;
70312 +       wnd->extent_max = MINUS_ONE_T;
70313 +       wnd->zone_bit = wnd->zone_end = 0;
70314 +       wnd->nwnd = bytes_to_block(sb, bitmap_size(nbits));
70315 +       wnd->bits_last = nbits & (wbits - 1);
70316 +       if (!wnd->bits_last)
70317 +               wnd->bits_last = wbits;
70319 +       wnd->free_bits = ntfs_zalloc(wnd->nwnd * sizeof(u16));
70320 +       if (!wnd->free_bits)
70321 +               return -ENOMEM;
70323 +       err = wnd_rescan(wnd);
70324 +       if (err)
70325 +               return err;
70327 +       wnd->inited = true;
70329 +       return 0;
70333 + * wnd_map
70334 + *
70335 + * call sb_bread for requested window
70336 + */
70337 +static struct buffer_head *wnd_map(struct wnd_bitmap *wnd, size_t iw)
70339 +       size_t vbo;
70340 +       CLST lcn, clen;
70341 +       struct super_block *sb = wnd->sb;
70342 +       struct ntfs_sb_info *sbi;
70343 +       struct buffer_head *bh;
70344 +       u64 lbo;
70346 +       sbi = sb->s_fs_info;
70347 +       vbo = (u64)iw << sb->s_blocksize_bits;
70349 +       if (!run_lookup_entry(&wnd->run, vbo >> sbi->cluster_bits, &lcn, &clen,
70350 +                             NULL)) {
70351 +               return ERR_PTR(-ENOENT);
70352 +       }
70354 +       lbo = ((u64)lcn << sbi->cluster_bits) + (vbo & sbi->cluster_mask);
70356 +       bh = ntfs_bread(wnd->sb, lbo >> sb->s_blocksize_bits);
70357 +       if (!bh)
70358 +               return ERR_PTR(-EIO);
70360 +       return bh;
70364 + * wnd_set_free
70365 + *
70366 + * Marks the bits range from bit to bit + bits as free
70367 + */
70368 +int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
70370 +       int err = 0;
70371 +       struct super_block *sb = wnd->sb;
70372 +       size_t bits0 = bits;
70373 +       u32 wbits = 8 * sb->s_blocksize;
70374 +       size_t iw = bit >> (sb->s_blocksize_bits + 3);
70375 +       u32 wbit = bit & (wbits - 1);
70376 +       struct buffer_head *bh;
70378 +       while (iw < wnd->nwnd && bits) {
70379 +               u32 tail, op;
70380 +               ulong *buf;
70382 +               if (iw + 1 == wnd->nwnd)
70383 +                       wbits = wnd->bits_last;
70385 +               tail = wbits - wbit;
70386 +               op = tail < bits ? tail : bits;
70388 +               bh = wnd_map(wnd, iw);
70389 +               if (IS_ERR(bh)) {
70390 +                       err = PTR_ERR(bh);
70391 +                       break;
70392 +               }
70394 +               buf = (ulong *)bh->b_data;
70396 +               lock_buffer(bh);
70398 +               __bitmap_clear(buf, wbit, op);
70400 +               wnd->free_bits[iw] += op;
70402 +               set_buffer_uptodate(bh);
70403 +               mark_buffer_dirty(bh);
70404 +               unlock_buffer(bh);
70405 +               put_bh(bh);
70407 +               wnd->total_zeroes += op;
70408 +               bits -= op;
70409 +               wbit = 0;
70410 +               iw += 1;
70411 +       }
70413 +       wnd_add_free_ext(wnd, bit, bits0, false);
70415 +       return err;
70419 + * wnd_set_used
70420 + *
70421 + * Marks the bits range from bit to bit + bits as used
70422 + */
70423 +int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
70425 +       int err = 0;
70426 +       struct super_block *sb = wnd->sb;
70427 +       size_t bits0 = bits;
70428 +       size_t iw = bit >> (sb->s_blocksize_bits + 3);
70429 +       u32 wbits = 8 * sb->s_blocksize;
70430 +       u32 wbit = bit & (wbits - 1);
70431 +       struct buffer_head *bh;
70433 +       while (iw < wnd->nwnd && bits) {
70434 +               u32 tail, op;
70435 +               ulong *buf;
70437 +               if (unlikely(iw + 1 == wnd->nwnd))
70438 +                       wbits = wnd->bits_last;
70440 +               tail = wbits - wbit;
70441 +               op = tail < bits ? tail : bits;
70443 +               bh = wnd_map(wnd, iw);
70444 +               if (IS_ERR(bh)) {
70445 +                       err = PTR_ERR(bh);
70446 +                       break;
70447 +               }
70448 +               buf = (ulong *)bh->b_data;
70450 +               lock_buffer(bh);
70452 +               __bitmap_set(buf, wbit, op);
70453 +               wnd->free_bits[iw] -= op;
70455 +               set_buffer_uptodate(bh);
70456 +               mark_buffer_dirty(bh);
70457 +               unlock_buffer(bh);
70458 +               put_bh(bh);
70460 +               wnd->total_zeroes -= op;
70461 +               bits -= op;
70462 +               wbit = 0;
70463 +               iw += 1;
70464 +       }
70466 +       if (!RB_EMPTY_ROOT(&wnd->start_tree))
70467 +               wnd_remove_free_ext(wnd, bit, bits0);
70469 +       return err;
70473 + * wnd_is_free_hlp
70474 + *
70475 + * Returns true if all clusters [bit, bit+bits) are free (bitmap only)
70476 + */
70477 +static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits)
70479 +       struct super_block *sb = wnd->sb;
70480 +       size_t iw = bit >> (sb->s_blocksize_bits + 3);
70481 +       u32 wbits = 8 * sb->s_blocksize;
70482 +       u32 wbit = bit & (wbits - 1);
70484 +       while (iw < wnd->nwnd && bits) {
70485 +               u32 tail, op;
70487 +               if (unlikely(iw + 1 == wnd->nwnd))
70488 +                       wbits = wnd->bits_last;
70490 +               tail = wbits - wbit;
70491 +               op = tail < bits ? tail : bits;
70493 +               if (wbits != wnd->free_bits[iw]) {
70494 +                       bool ret;
70495 +                       struct buffer_head *bh = wnd_map(wnd, iw);
70497 +                       if (IS_ERR(bh))
70498 +                               return false;
70500 +                       ret = are_bits_clear((ulong *)bh->b_data, wbit, op);
70502 +                       put_bh(bh);
70503 +                       if (!ret)
70504 +                               return false;
70505 +               }
70507 +               bits -= op;
70508 +               wbit = 0;
70509 +               iw += 1;
70510 +       }
70512 +       return true;
70516 + * wnd_is_free
70517 + *
70518 + * Returns true if all clusters [bit, bit+bits) are free
70519 + */
70520 +bool wnd_is_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
70522 +       bool ret;
70523 +       struct rb_node *n;
70524 +       size_t end;
70525 +       struct e_node *e;
70527 +       if (RB_EMPTY_ROOT(&wnd->start_tree))
70528 +               goto use_wnd;
70530 +       n = rb_lookup(&wnd->start_tree, bit);
70531 +       if (!n)
70532 +               goto use_wnd;
70534 +       e = rb_entry(n, struct e_node, start.node);
70536 +       end = e->start.key + e->count.key;
70538 +       if (bit < end && bit + bits <= end)
70539 +               return true;
70541 +use_wnd:
70542 +       ret = wnd_is_free_hlp(wnd, bit, bits);
70544 +       return ret;
70548 + * wnd_is_used
70549 + *
70550 + * Returns true if all clusters [bit, bit+bits) are used
70551 + */
70552 +bool wnd_is_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
70554 +       bool ret = false;
70555 +       struct super_block *sb = wnd->sb;
70556 +       size_t iw = bit >> (sb->s_blocksize_bits + 3);
70557 +       u32 wbits = 8 * sb->s_blocksize;
70558 +       u32 wbit = bit & (wbits - 1);
70559 +       size_t end;
70560 +       struct rb_node *n;
70561 +       struct e_node *e;
70563 +       if (RB_EMPTY_ROOT(&wnd->start_tree))
70564 +               goto use_wnd;
70566 +       end = bit + bits;
70567 +       n = rb_lookup(&wnd->start_tree, end - 1);
70568 +       if (!n)
70569 +               goto use_wnd;
70571 +       e = rb_entry(n, struct e_node, start.node);
70572 +       if (e->start.key + e->count.key > bit)
70573 +               return false;
70575 +use_wnd:
70576 +       while (iw < wnd->nwnd && bits) {
70577 +               u32 tail, op;
70579 +               if (unlikely(iw + 1 == wnd->nwnd))
70580 +                       wbits = wnd->bits_last;
70582 +               tail = wbits - wbit;
70583 +               op = tail < bits ? tail : bits;
70585 +               if (wnd->free_bits[iw]) {
70586 +                       bool ret;
70587 +                       struct buffer_head *bh = wnd_map(wnd, iw);
70589 +                       if (IS_ERR(bh))
70590 +                               goto out;
70592 +                       ret = are_bits_set((ulong *)bh->b_data, wbit, op);
70593 +                       put_bh(bh);
70594 +                       if (!ret)
70595 +                               goto out;
70596 +               }
70598 +               bits -= op;
70599 +               wbit = 0;
70600 +               iw += 1;
70601 +       }
70602 +       ret = true;
70604 +out:
70605 +       return ret;
70609 + * wnd_find
70610 + * - flags - BITMAP_FIND_XXX flags
70611 + *
70612 + * looks for free space
70613 + * Returns 0 if not found
70614 + */
70615 +size_t wnd_find(struct wnd_bitmap *wnd, size_t to_alloc, size_t hint,
70616 +               size_t flags, size_t *allocated)
70618 +       struct super_block *sb;
70619 +       u32 wbits, wpos, wzbit, wzend;
70620 +       size_t fnd, max_alloc, b_len, b_pos;
70621 +       size_t iw, prev_tail, nwnd, wbit, ebit, zbit, zend;
70622 +       size_t to_alloc0 = to_alloc;
70623 +       const ulong *buf;
70624 +       const struct e_node *e;
70625 +       const struct rb_node *pr, *cr;
70626 +       u8 log2_bits;
70627 +       bool fbits_valid;
70628 +       struct buffer_head *bh;
70630 +       /* fast checking for available free space */
70631 +       if (flags & BITMAP_FIND_FULL) {
70632 +               size_t zeroes = wnd_zeroes(wnd);
70634 +               zeroes -= wnd->zone_end - wnd->zone_bit;
70635 +               if (zeroes < to_alloc0)
70636 +                       goto no_space;
70638 +               if (to_alloc0 > wnd->extent_max)
70639 +                       goto no_space;
70640 +       } else {
70641 +               if (to_alloc > wnd->extent_max)
70642 +                       to_alloc = wnd->extent_max;
70643 +       }
70645 +       if (wnd->zone_bit <= hint && hint < wnd->zone_end)
70646 +               hint = wnd->zone_end;
70648 +       max_alloc = wnd->nbits;
70649 +       b_len = b_pos = 0;
70651 +       if (hint >= max_alloc)
70652 +               hint = 0;
70654 +       if (RB_EMPTY_ROOT(&wnd->start_tree)) {
70655 +               if (wnd->uptodated == 1) {
70656 +                       /* extents tree is updated -> no free space */
70657 +                       goto no_space;
70658 +               }
70659 +               goto scan_bitmap;
70660 +       }
70662 +       e = NULL;
70663 +       if (!hint)
70664 +               goto allocate_biggest;
70666 +       /* Use hint: enumerate extents by start >= hint */
70667 +       pr = NULL;
70668 +       cr = wnd->start_tree.rb_node;
70670 +       for (;;) {
70671 +               e = rb_entry(cr, struct e_node, start.node);
70673 +               if (e->start.key == hint)
70674 +                       break;
70676 +               if (e->start.key < hint) {
70677 +                       pr = cr;
70678 +                       cr = cr->rb_right;
70679 +                       if (!cr)
70680 +                               break;
70681 +                       continue;
70682 +               }
70684 +               cr = cr->rb_left;
70685 +               if (!cr) {
70686 +                       e = pr ? rb_entry(pr, struct e_node, start.node) : NULL;
70687 +                       break;
70688 +               }
70689 +       }
70691 +       if (!e)
70692 +               goto allocate_biggest;
70694 +       if (e->start.key + e->count.key > hint) {
70695 +               /* We have found extension with 'hint' inside */
70696 +               size_t len = e->start.key + e->count.key - hint;
70698 +               if (len >= to_alloc && hint + to_alloc <= max_alloc) {
70699 +                       fnd = hint;
70700 +                       goto found;
70701 +               }
70703 +               if (!(flags & BITMAP_FIND_FULL)) {
70704 +                       if (len > to_alloc)
70705 +                               len = to_alloc;
70707 +                       if (hint + len <= max_alloc) {
70708 +                               fnd = hint;
70709 +                               to_alloc = len;
70710 +                               goto found;
70711 +                       }
70712 +               }
70713 +       }
70715 +allocate_biggest:
70716 +       /* Allocate from biggest free extent */
70717 +       e = rb_entry(rb_first(&wnd->count_tree), struct e_node, count.node);
70718 +       if (e->count.key != wnd->extent_max)
70719 +               wnd->extent_max = e->count.key;
70721 +       if (e->count.key < max_alloc) {
70722 +               if (e->count.key >= to_alloc) {
70723 +                       ;
70724 +               } else if (flags & BITMAP_FIND_FULL) {
70725 +                       if (e->count.key < to_alloc0) {
70726 +                               /* Biggest free block is less then requested */
70727 +                               goto no_space;
70728 +                       }
70729 +                       to_alloc = e->count.key;
70730 +               } else if (-1 != wnd->uptodated) {
70731 +                       to_alloc = e->count.key;
70732 +               } else {
70733 +                       /* Check if we can use more bits */
70734 +                       size_t op, max_check;
70735 +                       struct rb_root start_tree;
70737 +                       memcpy(&start_tree, &wnd->start_tree,
70738 +                              sizeof(struct rb_root));
70739 +                       memset(&wnd->start_tree, 0, sizeof(struct rb_root));
70741 +                       max_check = e->start.key + to_alloc;
70742 +                       if (max_check > max_alloc)
70743 +                               max_check = max_alloc;
70744 +                       for (op = e->start.key + e->count.key; op < max_check;
70745 +                            op++) {
70746 +                               if (!wnd_is_free(wnd, op, 1))
70747 +                                       break;
70748 +                       }
70749 +                       memcpy(&wnd->start_tree, &start_tree,
70750 +                              sizeof(struct rb_root));
70751 +                       to_alloc = op - e->start.key;
70752 +               }
70754 +               /* Prepare to return */
70755 +               fnd = e->start.key;
70756 +               if (e->start.key + to_alloc > max_alloc)
70757 +                       to_alloc = max_alloc - e->start.key;
70758 +               goto found;
70759 +       }
70761 +       if (wnd->uptodated == 1) {
70762 +               /* extents tree is updated -> no free space */
70763 +               goto no_space;
70764 +       }
70766 +       b_len = e->count.key;
70767 +       b_pos = e->start.key;
70769 +scan_bitmap:
70770 +       sb = wnd->sb;
70771 +       log2_bits = sb->s_blocksize_bits + 3;
70773 +       /* At most two ranges [hint, max_alloc) + [0, hint) */
70774 +Again:
70776 +       /* TODO: optimize request for case nbits > wbits */
70777 +       iw = hint >> log2_bits;
70778 +       wbits = sb->s_blocksize * 8;
70779 +       wpos = hint & (wbits - 1);
70780 +       prev_tail = 0;
70781 +       fbits_valid = true;
70783 +       if (max_alloc == wnd->nbits) {
70784 +               nwnd = wnd->nwnd;
70785 +       } else {
70786 +               size_t t = max_alloc + wbits - 1;
70788 +               nwnd = likely(t > max_alloc) ? (t >> log2_bits) : wnd->nwnd;
70789 +       }
70791 +       /* Enumerate all windows */
70792 +       for (; iw < nwnd; iw++) {
70793 +               wbit = iw << log2_bits;
70795 +               if (!wnd->free_bits[iw]) {
70796 +                       if (prev_tail > b_len) {
70797 +                               b_pos = wbit - prev_tail;
70798 +                               b_len = prev_tail;
70799 +                       }
70801 +                       /* Skip full used window */
70802 +                       prev_tail = 0;
70803 +                       wpos = 0;
70804 +                       continue;
70805 +               }
70807 +               if (unlikely(iw + 1 == nwnd)) {
70808 +                       if (max_alloc == wnd->nbits) {
70809 +                               wbits = wnd->bits_last;
70810 +                       } else {
70811 +                               size_t t = max_alloc & (wbits - 1);
70813 +                               if (t) {
70814 +                                       wbits = t;
70815 +                                       fbits_valid = false;
70816 +                               }
70817 +                       }
70818 +               }
70820 +               if (wnd->zone_end > wnd->zone_bit) {
70821 +                       ebit = wbit + wbits;
70822 +                       zbit = max(wnd->zone_bit, wbit);
70823 +                       zend = min(wnd->zone_end, ebit);
70825 +                       /* Here we have a window [wbit, ebit) and zone [zbit, zend) */
70826 +                       if (zend <= zbit) {
70827 +                               /* Zone does not overlap window */
70828 +                       } else {
70829 +                               wzbit = zbit - wbit;
70830 +                               wzend = zend - wbit;
70832 +                               /* Zone overlaps window */
70833 +                               if (wnd->free_bits[iw] == wzend - wzbit) {
70834 +                                       prev_tail = 0;
70835 +                                       wpos = 0;
70836 +                                       continue;
70837 +                               }
70839 +                               /* Scan two ranges window: [wbit, zbit) and [zend, ebit) */
70840 +                               bh = wnd_map(wnd, iw);
70842 +                               if (IS_ERR(bh)) {
70843 +                                       /* TODO: error */
70844 +                                       prev_tail = 0;
70845 +                                       wpos = 0;
70846 +                                       continue;
70847 +                               }
70849 +                               buf = (ulong *)bh->b_data;
70851 +                               /* Scan range [wbit, zbit) */
70852 +                               if (wpos < wzbit) {
70853 +                                       /* Scan range [wpos, zbit) */
70854 +                                       fnd = wnd_scan(buf, wbit, wpos, wzbit,
70855 +                                                      to_alloc, &prev_tail,
70856 +                                                      &b_pos, &b_len);
70857 +                                       if (fnd != MINUS_ONE_T) {
70858 +                                               put_bh(bh);
70859 +                                               goto found;
70860 +                                       }
70861 +                               }
70863 +                               prev_tail = 0;
70865 +                               /* Scan range [zend, ebit) */
70866 +                               if (wzend < wbits) {
70867 +                                       fnd = wnd_scan(buf, wbit,
70868 +                                                      max(wzend, wpos), wbits,
70869 +                                                      to_alloc, &prev_tail,
70870 +                                                      &b_pos, &b_len);
70871 +                                       if (fnd != MINUS_ONE_T) {
70872 +                                               put_bh(bh);
70873 +                                               goto found;
70874 +                                       }
70875 +                               }
70877 +                               wpos = 0;
70878 +                               put_bh(bh);
70879 +                               continue;
70880 +                       }
70881 +               }
70883 +               /* Current window does not overlap zone */
70884 +               if (!wpos && fbits_valid && wnd->free_bits[iw] == wbits) {
70885 +                       /* window is empty */
70886 +                       if (prev_tail + wbits >= to_alloc) {
70887 +                               fnd = wbit + wpos - prev_tail;
70888 +                               goto found;
70889 +                       }
70891 +                       /* Increase 'prev_tail' and process next window */
70892 +                       prev_tail += wbits;
70893 +                       wpos = 0;
70894 +                       continue;
70895 +               }
70897 +               /* read window */
70898 +               bh = wnd_map(wnd, iw);
70899 +               if (IS_ERR(bh)) {
70900 +                       // TODO: error
70901 +                       prev_tail = 0;
70902 +                       wpos = 0;
70903 +                       continue;
70904 +               }
70906 +               buf = (ulong *)bh->b_data;
70908 +               /* Scan range [wpos, eBits) */
70909 +               fnd = wnd_scan(buf, wbit, wpos, wbits, to_alloc, &prev_tail,
70910 +                              &b_pos, &b_len);
70911 +               put_bh(bh);
70912 +               if (fnd != MINUS_ONE_T)
70913 +                       goto found;
70914 +       }
70916 +       if (b_len < prev_tail) {
70917 +               /* The last fragment */
70918 +               b_len = prev_tail;
70919 +               b_pos = max_alloc - prev_tail;
70920 +       }
70922 +       if (hint) {
70923 +               /*
70924 +                * We have scanned range [hint max_alloc)
70925 +                * Prepare to scan range [0 hint + to_alloc)
70926 +                */
70927 +               size_t nextmax = hint + to_alloc;
70929 +               if (likely(nextmax >= hint) && nextmax < max_alloc)
70930 +                       max_alloc = nextmax;
70931 +               hint = 0;
70932 +               goto Again;
70933 +       }
70935 +       if (!b_len)
70936 +               goto no_space;
70938 +       wnd->extent_max = b_len;
70940 +       if (flags & BITMAP_FIND_FULL)
70941 +               goto no_space;
70943 +       fnd = b_pos;
70944 +       to_alloc = b_len;
70946 +found:
70947 +       if (flags & BITMAP_FIND_MARK_AS_USED) {
70948 +               /* TODO optimize remove extent (pass 'e'?) */
70949 +               if (wnd_set_used(wnd, fnd, to_alloc))
70950 +                       goto no_space;
70951 +       } else if (wnd->extent_max != MINUS_ONE_T &&
70952 +                  to_alloc > wnd->extent_max) {
70953 +               wnd->extent_max = to_alloc;
70954 +       }
70956 +       *allocated = fnd;
70957 +       return to_alloc;
70959 +no_space:
70960 +       return 0;
70964 + * wnd_extend
70965 + *
70966 + * Extend bitmap ($MFT bitmap)
70967 + */
70968 +int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
70970 +       int err;
70971 +       struct super_block *sb = wnd->sb;
70972 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
70973 +       u32 blocksize = sb->s_blocksize;
70974 +       u32 wbits = blocksize * 8;
70975 +       u32 b0, new_last;
70976 +       size_t bits, iw, new_wnd;
70977 +       size_t old_bits = wnd->nbits;
70978 +       u16 *new_free;
70980 +       if (new_bits <= old_bits)
70981 +               return -EINVAL;
70983 +       /* align to 8 byte boundary */
70984 +       new_wnd = bytes_to_block(sb, bitmap_size(new_bits));
70985 +       new_last = new_bits & (wbits - 1);
70986 +       if (!new_last)
70987 +               new_last = wbits;
70989 +       if (new_wnd != wnd->nwnd) {
70990 +               new_free = ntfs_malloc(new_wnd * sizeof(u16));
70991 +               if (!new_free)
70992 +                       return -ENOMEM;
70994 +               if (new_free != wnd->free_bits)
70995 +                       memcpy(new_free, wnd->free_bits,
70996 +                              wnd->nwnd * sizeof(short));
70997 +               memset(new_free + wnd->nwnd, 0,
70998 +                      (new_wnd - wnd->nwnd) * sizeof(short));
70999 +               ntfs_free(wnd->free_bits);
71000 +               wnd->free_bits = new_free;
71001 +       }
71003 +       /* Zero bits [old_bits,new_bits) */
71004 +       bits = new_bits - old_bits;
71005 +       b0 = old_bits & (wbits - 1);
71007 +       for (iw = old_bits >> (sb->s_blocksize_bits + 3); bits; iw += 1) {
71008 +               u32 op;
71009 +               size_t frb;
71010 +               u64 vbo, lbo, bytes;
71011 +               struct buffer_head *bh;
71012 +               ulong *buf;
71014 +               if (iw + 1 == new_wnd)
71015 +                       wbits = new_last;
71017 +               op = b0 + bits > wbits ? wbits - b0 : bits;
71018 +               vbo = (u64)iw * blocksize;
71020 +               err = ntfs_vbo_to_lbo(sbi, &wnd->run, vbo, &lbo, &bytes);
71021 +               if (err)
71022 +                       break;
71024 +               bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
71025 +               if (!bh)
71026 +                       return -EIO;
71028 +               lock_buffer(bh);
71029 +               buf = (ulong *)bh->b_data;
71031 +               __bitmap_clear(buf, b0, blocksize * 8 - b0);
71032 +               frb = wbits - __bitmap_weight(buf, wbits);
71033 +               wnd->total_zeroes += frb - wnd->free_bits[iw];
71034 +               wnd->free_bits[iw] = frb;
71036 +               set_buffer_uptodate(bh);
71037 +               mark_buffer_dirty(bh);
71038 +               unlock_buffer(bh);
71039 +               /*err = sync_dirty_buffer(bh);*/
71041 +               b0 = 0;
71042 +               bits -= op;
71043 +       }
71045 +       wnd->nbits = new_bits;
71046 +       wnd->nwnd = new_wnd;
71047 +       wnd->bits_last = new_last;
71049 +       wnd_add_free_ext(wnd, old_bits, new_bits - old_bits, false);
71051 +       return 0;
71055 + * wnd_zone_set
71056 + */
71057 +void wnd_zone_set(struct wnd_bitmap *wnd, size_t lcn, size_t len)
71059 +       size_t zlen;
71061 +       zlen = wnd->zone_end - wnd->zone_bit;
71062 +       if (zlen)
71063 +               wnd_add_free_ext(wnd, wnd->zone_bit, zlen, false);
71065 +       if (!RB_EMPTY_ROOT(&wnd->start_tree) && len)
71066 +               wnd_remove_free_ext(wnd, lcn, len);
71068 +       wnd->zone_bit = lcn;
71069 +       wnd->zone_end = lcn + len;
71072 +int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range)
71074 +       int err = 0;
71075 +       struct super_block *sb = sbi->sb;
71076 +       struct wnd_bitmap *wnd = &sbi->used.bitmap;
71077 +       u32 wbits = 8 * sb->s_blocksize;
71078 +       CLST len = 0, lcn = 0, done = 0;
71079 +       CLST minlen = bytes_to_cluster(sbi, range->minlen);
71080 +       CLST lcn_from = bytes_to_cluster(sbi, range->start);
71081 +       size_t iw = lcn_from >> (sb->s_blocksize_bits + 3);
71082 +       u32 wbit = lcn_from & (wbits - 1);
71083 +       const ulong *buf;
71084 +       CLST lcn_to;
71086 +       if (!minlen)
71087 +               minlen = 1;
71089 +       if (range->len == (u64)-1)
71090 +               lcn_to = wnd->nbits;
71091 +       else
71092 +               lcn_to = bytes_to_cluster(sbi, range->start + range->len);
71094 +       down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
71096 +       for (; iw < wnd->nbits; iw++, wbit = 0) {
71097 +               CLST lcn_wnd = iw * wbits;
71098 +               struct buffer_head *bh;
71100 +               if (lcn_wnd > lcn_to)
71101 +                       break;
71103 +               if (!wnd->free_bits[iw])
71104 +                       continue;
71106 +               if (iw + 1 == wnd->nwnd)
71107 +                       wbits = wnd->bits_last;
71109 +               if (lcn_wnd + wbits > lcn_to)
71110 +                       wbits = lcn_to - lcn_wnd;
71112 +               bh = wnd_map(wnd, iw);
71113 +               if (IS_ERR(bh)) {
71114 +                       err = PTR_ERR(bh);
71115 +                       break;
71116 +               }
71118 +               buf = (ulong *)bh->b_data;
71120 +               for (; wbit < wbits; wbit++) {
71121 +                       if (!test_bit(wbit, buf)) {
71122 +                               if (!len)
71123 +                                       lcn = lcn_wnd + wbit;
71124 +                               len += 1;
71125 +                               continue;
71126 +                       }
71127 +                       if (len >= minlen) {
71128 +                               err = ntfs_discard(sbi, lcn, len);
71129 +                               if (err)
71130 +                                       goto out;
71131 +                               done += len;
71132 +                       }
71133 +                       len = 0;
71134 +               }
71135 +               put_bh(bh);
71136 +       }
71138 +       /* Process the last fragment */
71139 +       if (len >= minlen) {
71140 +               err = ntfs_discard(sbi, lcn, len);
71141 +               if (err)
71142 +                       goto out;
71143 +               done += len;
71144 +       }
71146 +out:
71147 +       range->len = (u64)done << sbi->cluster_bits;
71149 +       up_read(&wnd->rw_lock);
71151 +       return err;
71153 diff --git a/fs/ntfs3/debug.h b/fs/ntfs3/debug.h
71154 new file mode 100644
71155 index 000000000000..dfaa4c79dc6d
71156 --- /dev/null
71157 +++ b/fs/ntfs3/debug.h
71158 @@ -0,0 +1,64 @@
71159 +/* SPDX-License-Identifier: GPL-2.0 */
71161 + *
71162 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
71163 + *
71164 + * useful functions for debuging
71165 + */
71167 +// clang-format off
71168 +#ifndef Add2Ptr
71169 +#define Add2Ptr(P, I)          ((void *)((u8 *)(P) + (I)))
71170 +#define PtrOffset(B, O)                ((size_t)((size_t)(O) - (size_t)(B)))
71171 +#endif
71173 +#define QuadAlign(n)           (((n) + 7u) & (~7u))
71174 +#define IsQuadAligned(n)       (!((size_t)(n)&7u))
71175 +#define Quad2Align(n)          (((n) + 15u) & (~15u))
71176 +#define IsQuad2Aligned(n)      (!((size_t)(n)&15u))
71177 +#define Quad4Align(n)          (((n) + 31u) & (~31u))
71178 +#define IsSizeTAligned(n)      (!((size_t)(n) & (sizeof(size_t) - 1)))
71179 +#define DwordAlign(n)          (((n) + 3u) & (~3u))
71180 +#define IsDwordAligned(n)      (!((size_t)(n)&3u))
71181 +#define WordAlign(n)           (((n) + 1u) & (~1u))
71182 +#define IsWordAligned(n)       (!((size_t)(n)&1u))
71184 +#ifdef CONFIG_PRINTK
71185 +__printf(2, 3)
71186 +void ntfs_printk(const struct super_block *sb, const char *fmt, ...);
71187 +__printf(2, 3)
71188 +void ntfs_inode_printk(struct inode *inode, const char *fmt, ...);
71189 +#else
71190 +static inline __printf(2, 3)
71191 +void ntfs_printk(const struct super_block *sb, const char *fmt, ...)
71195 +static inline __printf(2, 3)
71196 +void ntfs_inode_printk(struct inode *inode, const char *fmt, ...)
71199 +#endif
71202 + * Logging macros ( thanks Joe Perches <joe@perches.com> for implementation )
71203 + */
71205 +#define ntfs_err(sb, fmt, ...)  ntfs_printk(sb, KERN_ERR fmt, ##__VA_ARGS__)
71206 +#define ntfs_warn(sb, fmt, ...) ntfs_printk(sb, KERN_WARNING fmt, ##__VA_ARGS__)
71207 +#define ntfs_info(sb, fmt, ...) ntfs_printk(sb, KERN_INFO fmt, ##__VA_ARGS__)
71208 +#define ntfs_notice(sb, fmt, ...)                                              \
71209 +       ntfs_printk(sb, KERN_NOTICE fmt, ##__VA_ARGS__)
71211 +#define ntfs_inode_err(inode, fmt, ...)                                        \
71212 +       ntfs_inode_printk(inode, KERN_ERR fmt, ##__VA_ARGS__)
71213 +#define ntfs_inode_warn(inode, fmt, ...)                                       \
71214 +       ntfs_inode_printk(inode, KERN_WARNING fmt, ##__VA_ARGS__)
71216 +#define ntfs_malloc(s)         kmalloc(s, GFP_NOFS)
71217 +#define ntfs_zalloc(s)         kzalloc(s, GFP_NOFS)
71218 +#define ntfs_vmalloc(s)                kvmalloc(s, GFP_KERNEL)
71219 +#define ntfs_free(p)           kfree(p)
71220 +#define ntfs_vfree(p)          kvfree(p)
71221 +#define ntfs_memdup(src, len)  kmemdup(src, len, GFP_NOFS)
71222 +// clang-format on
71223 diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
71224 new file mode 100644
71225 index 000000000000..9ec6012c405b
71226 --- /dev/null
71227 +++ b/fs/ntfs3/dir.c
71228 @@ -0,0 +1,594 @@
71229 +// SPDX-License-Identifier: GPL-2.0
71231 + *
71232 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
71233 + *
71234 + *  directory handling functions for ntfs-based filesystems
71235 + *
71236 + */
71237 +#include <linux/blkdev.h>
71238 +#include <linux/buffer_head.h>
71239 +#include <linux/fs.h>
71240 +#include <linux/iversion.h>
71241 +#include <linux/nls.h>
71243 +#include "debug.h"
71244 +#include "ntfs.h"
71245 +#include "ntfs_fs.h"
71248 + * Convert little endian utf16 to nls string
71249 + */
71250 +int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const struct le_str *uni,
71251 +                     u8 *buf, int buf_len)
71253 +       int ret, uni_len, warn;
71254 +       const __le16 *ip;
71255 +       u8 *op;
71256 +       struct nls_table *nls = sbi->options.nls;
71258 +       static_assert(sizeof(wchar_t) == sizeof(__le16));
71260 +       if (!nls) {
71261 +               /* utf16 -> utf8 */
71262 +               ret = utf16s_to_utf8s((wchar_t *)uni->name, uni->len,
71263 +                                     UTF16_LITTLE_ENDIAN, buf, buf_len);
71264 +               buf[ret] = '\0';
71265 +               return ret;
71266 +       }
71268 +       ip = uni->name;
71269 +       op = buf;
71270 +       uni_len = uni->len;
71271 +       warn = 0;
71273 +       while (uni_len--) {
71274 +               u16 ec;
71275 +               int charlen;
71276 +               char dump[5];
71278 +               if (buf_len < NLS_MAX_CHARSET_SIZE) {
71279 +                       ntfs_warn(sbi->sb,
71280 +                                 "filename was truncated while converting.");
71281 +                       break;
71282 +               }
71284 +               ec = le16_to_cpu(*ip++);
71285 +               charlen = nls->uni2char(ec, op, buf_len);
71287 +               if (charlen > 0) {
71288 +                       op += charlen;
71289 +                       buf_len -= charlen;
71290 +                       continue;
71291 +               }
71293 +               *op++ = '_';
71294 +               buf_len -= 1;
71295 +               if (warn)
71296 +                       continue;
71298 +               warn = 1;
71299 +               hex_byte_pack(&dump[0], ec >> 8);
71300 +               hex_byte_pack(&dump[2], ec);
71301 +               dump[4] = 0;
71303 +               ntfs_err(sbi->sb, "failed to convert \"%s\" to %s", dump,
71304 +                        nls->charset);
71305 +       }
71307 +       *op = '\0';
71308 +       return op - buf;
71311 +// clang-format off
71312 +#define PLANE_SIZE     0x00010000
71314 +#define SURROGATE_PAIR 0x0000d800
71315 +#define SURROGATE_LOW  0x00000400
71316 +#define SURROGATE_BITS 0x000003ff
71317 +// clang-format on
71320 + * modified version of put_utf16 from fs/nls/nls_base.c
71321 + * is sparse warnings free
71322 + */
71323 +static inline void put_utf16(wchar_t *s, unsigned int c,
71324 +                            enum utf16_endian endian)
71326 +       static_assert(sizeof(wchar_t) == sizeof(__le16));
71327 +       static_assert(sizeof(wchar_t) == sizeof(__be16));
71329 +       switch (endian) {
71330 +       default:
71331 +               *s = (wchar_t)c;
71332 +               break;
71333 +       case UTF16_LITTLE_ENDIAN:
71334 +               *(__le16 *)s = __cpu_to_le16(c);
71335 +               break;
71336 +       case UTF16_BIG_ENDIAN:
71337 +               *(__be16 *)s = __cpu_to_be16(c);
71338 +               break;
71339 +       }
71343 + * modified version of 'utf8s_to_utf16s' allows to
71344 + * detect -ENAMETOOLONG without writing out of expected maximum
71345 + */
71346 +static int _utf8s_to_utf16s(const u8 *s, int inlen, enum utf16_endian endian,
71347 +                           wchar_t *pwcs, int maxout)
71349 +       u16 *op;
71350 +       int size;
71351 +       unicode_t u;
71353 +       op = pwcs;
71354 +       while (inlen > 0 && *s) {
71355 +               if (*s & 0x80) {
71356 +                       size = utf8_to_utf32(s, inlen, &u);
71357 +                       if (size < 0)
71358 +                               return -EINVAL;
71359 +                       s += size;
71360 +                       inlen -= size;
71362 +                       if (u >= PLANE_SIZE) {
71363 +                               if (maxout < 2)
71364 +                                       return -ENAMETOOLONG;
71366 +                               u -= PLANE_SIZE;
71367 +                               put_utf16(op++,
71368 +                                         SURROGATE_PAIR |
71369 +                                                 ((u >> 10) & SURROGATE_BITS),
71370 +                                         endian);
71371 +                               put_utf16(op++,
71372 +                                         SURROGATE_PAIR | SURROGATE_LOW |
71373 +                                                 (u & SURROGATE_BITS),
71374 +                                         endian);
71375 +                               maxout -= 2;
71376 +                       } else {
71377 +                               if (maxout < 1)
71378 +                                       return -ENAMETOOLONG;
71380 +                               put_utf16(op++, u, endian);
71381 +                               maxout--;
71382 +                       }
71383 +               } else {
71384 +                       if (maxout < 1)
71385 +                               return -ENAMETOOLONG;
71387 +                       put_utf16(op++, *s++, endian);
71388 +                       inlen--;
71389 +                       maxout--;
71390 +               }
71391 +       }
71392 +       return op - pwcs;
71396 + * Convert input string to utf16
71397 + *
71398 + * name, name_len - input name
71399 + * uni, max_ulen - destination memory
71400 + * endian - endian of target utf16 string
71401 + *
71402 + * This function is called:
71403 + * - to create ntfs name
71404 + * - to create symlink
71405 + *
71406 + * returns utf16 string length or error (if negative)
71407 + */
71408 +int ntfs_nls_to_utf16(struct ntfs_sb_info *sbi, const u8 *name, u32 name_len,
71409 +                     struct cpu_str *uni, u32 max_ulen,
71410 +                     enum utf16_endian endian)
71412 +       int ret, slen;
71413 +       const u8 *end;
71414 +       struct nls_table *nls = sbi->options.nls;
71415 +       u16 *uname = uni->name;
71417 +       static_assert(sizeof(wchar_t) == sizeof(u16));
71419 +       if (!nls) {
71420 +               /* utf8 -> utf16 */
71421 +               ret = _utf8s_to_utf16s(name, name_len, endian, uname, max_ulen);
71422 +               uni->len = ret;
71423 +               return ret;
71424 +       }
71426 +       for (ret = 0, end = name + name_len; name < end; ret++, name += slen) {
71427 +               if (ret >= max_ulen)
71428 +                       return -ENAMETOOLONG;
71430 +               slen = nls->char2uni(name, end - name, uname + ret);
71431 +               if (!slen)
71432 +                       return -EINVAL;
71433 +               if (slen < 0)
71434 +                       return slen;
71435 +       }
71437 +#ifdef __BIG_ENDIAN
71438 +       if (endian == UTF16_LITTLE_ENDIAN) {
71439 +               int i = ret;
71441 +               while (i--) {
71442 +                       __cpu_to_le16s(uname);
71443 +                       uname++;
71444 +               }
71445 +       }
71446 +#else
71447 +       if (endian == UTF16_BIG_ENDIAN) {
71448 +               int i = ret;
71450 +               while (i--) {
71451 +                       __cpu_to_be16s(uname);
71452 +                       uname++;
71453 +               }
71454 +       }
71455 +#endif
71457 +       uni->len = ret;
71458 +       return ret;
71461 +/* helper function */
71462 +struct inode *dir_search_u(struct inode *dir, const struct cpu_str *uni,
71463 +                          struct ntfs_fnd *fnd)
71465 +       int err = 0;
71466 +       struct super_block *sb = dir->i_sb;
71467 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
71468 +       struct ntfs_inode *ni = ntfs_i(dir);
71469 +       struct NTFS_DE *e;
71470 +       int diff;
71471 +       struct inode *inode = NULL;
71472 +       struct ntfs_fnd *fnd_a = NULL;
71474 +       if (!fnd) {
71475 +               fnd_a = fnd_get();
71476 +               if (!fnd_a) {
71477 +                       err = -ENOMEM;
71478 +                       goto out;
71479 +               }
71480 +               fnd = fnd_a;
71481 +       }
71483 +       err = indx_find(&ni->dir, ni, NULL, uni, 0, sbi, &diff, &e, fnd);
71485 +       if (err)
71486 +               goto out;
71488 +       if (diff) {
71489 +               err = -ENOENT;
71490 +               goto out;
71491 +       }
71493 +       inode = ntfs_iget5(sb, &e->ref, uni);
71494 +       if (!IS_ERR(inode) && is_bad_inode(inode)) {
71495 +               iput(inode);
71496 +               err = -EINVAL;
71497 +       }
71498 +out:
71499 +       fnd_put(fnd_a);
71501 +       return err == -ENOENT ? NULL : err ? ERR_PTR(err) : inode;
71504 +static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
71505 +                              const struct NTFS_DE *e, u8 *name,
71506 +                              struct dir_context *ctx)
71508 +       const struct ATTR_FILE_NAME *fname;
71509 +       unsigned long ino;
71510 +       int name_len;
71511 +       u32 dt_type;
71513 +       fname = Add2Ptr(e, sizeof(struct NTFS_DE));
71515 +       if (fname->type == FILE_NAME_DOS)
71516 +               return 0;
71518 +       if (!mi_is_ref(&ni->mi, &fname->home))
71519 +               return 0;
71521 +       ino = ino_get(&e->ref);
71523 +       if (ino == MFT_REC_ROOT)
71524 +               return 0;
71526 +       /* Skip meta files ( unless option to show metafiles is set ) */
71527 +       if (!sbi->options.showmeta && ntfs_is_meta_file(sbi, ino))
71528 +               return 0;
71530 +       if (sbi->options.nohidden && (fname->dup.fa & FILE_ATTRIBUTE_HIDDEN))
71531 +               return 0;
71533 +       name_len = ntfs_utf16_to_nls(sbi, (struct le_str *)&fname->name_len,
71534 +                                    name, PATH_MAX);
71535 +       if (name_len <= 0) {
71536 +               ntfs_warn(sbi->sb, "failed to convert name for inode %lx.",
71537 +                         ino);
71538 +               return 0;
71539 +       }
71541 +       dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
71543 +       return !dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);
71547 + * ntfs_read_hdr
71548 + *
71549 + * helper function 'ntfs_readdir'
71550 + */
71551 +static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
71552 +                        const struct INDEX_HDR *hdr, u64 vbo, u64 pos,
71553 +                        u8 *name, struct dir_context *ctx)
71555 +       int err;
71556 +       const struct NTFS_DE *e;
71557 +       u32 e_size;
71558 +       u32 end = le32_to_cpu(hdr->used);
71559 +       u32 off = le32_to_cpu(hdr->de_off);
71561 +       for (;; off += e_size) {
71562 +               if (off + sizeof(struct NTFS_DE) > end)
71563 +                       return -1;
71565 +               e = Add2Ptr(hdr, off);
71566 +               e_size = le16_to_cpu(e->size);
71567 +               if (e_size < sizeof(struct NTFS_DE) || off + e_size > end)
71568 +                       return -1;
71570 +               if (de_is_last(e))
71571 +                       return 0;
71573 +               /* Skip already enumerated*/
71574 +               if (vbo + off < pos)
71575 +                       continue;
71577 +               if (le16_to_cpu(e->key_size) < SIZEOF_ATTRIBUTE_FILENAME)
71578 +                       return -1;
71580 +               ctx->pos = vbo + off;
71582 +               /* Submit the name to the filldir callback. */
71583 +               err = ntfs_filldir(sbi, ni, e, name, ctx);
71584 +               if (err)
71585 +                       return err;
71586 +       }
71590 + * file_operations::iterate_shared
71591 + *
71592 + * Use non sorted enumeration.
71593 + * We have an example of broken volume where sorted enumeration
71594 + * counts each name twice
71595 + */
71596 +static int ntfs_readdir(struct file *file, struct dir_context *ctx)
71598 +       const struct INDEX_ROOT *root;
71599 +       u64 vbo;
71600 +       size_t bit;
71601 +       loff_t eod;
71602 +       int err = 0;
71603 +       struct inode *dir = file_inode(file);
71604 +       struct ntfs_inode *ni = ntfs_i(dir);
71605 +       struct super_block *sb = dir->i_sb;
71606 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
71607 +       loff_t i_size = i_size_read(dir);
71608 +       u32 pos = ctx->pos;
71609 +       u8 *name = NULL;
71610 +       struct indx_node *node = NULL;
71611 +       u8 index_bits = ni->dir.index_bits;
71613 +       /* name is a buffer of PATH_MAX length */
71614 +       static_assert(NTFS_NAME_LEN * 4 < PATH_MAX);
71616 +       eod = i_size + sbi->record_size;
71618 +       if (pos >= eod)
71619 +               return 0;
71621 +       if (!dir_emit_dots(file, ctx))
71622 +               return 0;
71624 +       /* allocate PATH_MAX bytes */
71625 +       name = __getname();
71626 +       if (!name)
71627 +               return -ENOMEM;
71629 +       if (!ni->mi_loaded && ni->attr_list.size) {
71630 +               /*
71631 +                * directory inode is locked for read
71632 +                * load all subrecords to avoid 'write' access to 'ni' during
71633 +                * directory reading
71634 +                */
71635 +               ni_lock(ni);
71636 +               if (!ni->mi_loaded && ni->attr_list.size) {
71637 +                       err = ni_load_all_mi(ni);
71638 +                       if (!err)
71639 +                               ni->mi_loaded = true;
71640 +               }
71641 +               ni_unlock(ni);
71642 +               if (err)
71643 +                       goto out;
71644 +       }
71646 +       root = indx_get_root(&ni->dir, ni, NULL, NULL);
71647 +       if (!root) {
71648 +               err = -EINVAL;
71649 +               goto out;
71650 +       }
71652 +       if (pos >= sbi->record_size) {
71653 +               bit = (pos - sbi->record_size) >> index_bits;
71654 +       } else {
71655 +               err = ntfs_read_hdr(sbi, ni, &root->ihdr, 0, pos, name, ctx);
71656 +               if (err)
71657 +                       goto out;
71658 +               bit = 0;
71659 +       }
71661 +       if (!i_size) {
71662 +               ctx->pos = eod;
71663 +               goto out;
71664 +       }
71666 +       for (;;) {
71667 +               vbo = (u64)bit << index_bits;
71668 +               if (vbo >= i_size) {
71669 +                       ctx->pos = eod;
71670 +                       goto out;
71671 +               }
71673 +               err = indx_used_bit(&ni->dir, ni, &bit);
71674 +               if (err)
71675 +                       goto out;
71677 +               if (bit == MINUS_ONE_T) {
71678 +                       ctx->pos = eod;
71679 +                       goto out;
71680 +               }
71682 +               vbo = (u64)bit << index_bits;
71683 +               if (vbo >= i_size) {
71684 +                       ntfs_inode_err(dir, "Looks like your dir is corrupt");
71685 +                       err = -EINVAL;
71686 +                       goto out;
71687 +               }
71689 +               err = indx_read(&ni->dir, ni, bit << ni->dir.idx2vbn_bits,
71690 +                               &node);
71691 +               if (err)
71692 +                       goto out;
71694 +               err = ntfs_read_hdr(sbi, ni, &node->index->ihdr,
71695 +                                   vbo + sbi->record_size, pos, name, ctx);
71696 +               if (err)
71697 +                       goto out;
71699 +               bit += 1;
71700 +       }
71702 +out:
71704 +       __putname(name);
71705 +       put_indx_node(node);
71707 +       if (err == -ENOENT) {
71708 +               err = 0;
71709 +               ctx->pos = pos;
71710 +       }
71712 +       return err;
71715 +static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
71716 +                         size_t *files)
71718 +       int err = 0;
71719 +       struct ntfs_inode *ni = ntfs_i(dir);
71720 +       struct NTFS_DE *e = NULL;
71721 +       struct INDEX_ROOT *root;
71722 +       struct INDEX_HDR *hdr;
71723 +       const struct ATTR_FILE_NAME *fname;
71724 +       u32 e_size, off, end;
71725 +       u64 vbo = 0;
71726 +       size_t drs = 0, fles = 0, bit = 0;
71727 +       loff_t i_size = ni->vfs_inode.i_size;
71728 +       struct indx_node *node = NULL;
71729 +       u8 index_bits = ni->dir.index_bits;
71731 +       if (is_empty)
71732 +               *is_empty = true;
71734 +       root = indx_get_root(&ni->dir, ni, NULL, NULL);
71735 +       if (!root)
71736 +               return -EINVAL;
71738 +       hdr = &root->ihdr;
71740 +       for (;;) {
71741 +               end = le32_to_cpu(hdr->used);
71742 +               off = le32_to_cpu(hdr->de_off);
71744 +               for (; off + sizeof(struct NTFS_DE) <= end; off += e_size) {
71745 +                       e = Add2Ptr(hdr, off);
71746 +                       e_size = le16_to_cpu(e->size);
71747 +                       if (e_size < sizeof(struct NTFS_DE) ||
71748 +                           off + e_size > end)
71749 +                               break;
71751 +                       if (de_is_last(e))
71752 +                               break;
71754 +                       fname = de_get_fname(e);
71755 +                       if (!fname)
71756 +                               continue;
71758 +                       if (fname->type == FILE_NAME_DOS)
71759 +                               continue;
71761 +                       if (is_empty) {
71762 +                               *is_empty = false;
71763 +                               if (!dirs && !files)
71764 +                                       goto out;
71765 +                       }
71767 +                       if (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY)
71768 +                               drs += 1;
71769 +                       else
71770 +                               fles += 1;
71771 +               }
71773 +               if (vbo >= i_size)
71774 +                       goto out;
71776 +               err = indx_used_bit(&ni->dir, ni, &bit);
71777 +               if (err)
71778 +                       goto out;
71780 +               if (bit == MINUS_ONE_T)
71781 +                       goto out;
71783 +               vbo = (u64)bit << index_bits;
71784 +               if (vbo >= i_size)
71785 +                       goto out;
71787 +               err = indx_read(&ni->dir, ni, bit << ni->dir.idx2vbn_bits,
71788 +                               &node);
71789 +               if (err)
71790 +                       goto out;
71792 +               hdr = &node->index->ihdr;
71793 +               bit += 1;
71794 +               vbo = (u64)bit << ni->dir.idx2vbn_bits;
71795 +       }
71797 +out:
71798 +       put_indx_node(node);
71799 +       if (dirs)
71800 +               *dirs = drs;
71801 +       if (files)
71802 +               *files = fles;
71804 +       return err;
71807 +bool dir_is_empty(struct inode *dir)
71809 +       bool is_empty = false;
71811 +       ntfs_dir_count(dir, &is_empty, NULL, NULL);
71813 +       return is_empty;
71816 +const struct file_operations ntfs_dir_operations = {
71817 +       .llseek = generic_file_llseek,
71818 +       .read = generic_read_dir,
71819 +       .iterate_shared = ntfs_readdir,
71820 +       .fsync = generic_file_fsync,
71821 +       .open = ntfs_file_open,
71823 diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
71824 new file mode 100644
71825 index 000000000000..347baf674008
71826 --- /dev/null
71827 +++ b/fs/ntfs3/file.c
71828 @@ -0,0 +1,1130 @@
71829 +// SPDX-License-Identifier: GPL-2.0
71831 + *
71832 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
71833 + *
71834 + *  regular file handling primitives for ntfs-based filesystems
71835 + */
71836 +#include <linux/backing-dev.h>
71837 +#include <linux/buffer_head.h>
71838 +#include <linux/compat.h>
71839 +#include <linux/falloc.h>
71840 +#include <linux/fiemap.h>
71841 +#include <linux/msdos_fs.h> /* FAT_IOCTL_XXX */
71842 +#include <linux/nls.h>
71844 +#include "debug.h"
71845 +#include "ntfs.h"
71846 +#include "ntfs_fs.h"
71848 +static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg)
71850 +       struct fstrim_range __user *user_range;
71851 +       struct fstrim_range range;
71852 +       struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
71853 +       int err;
71855 +       if (!capable(CAP_SYS_ADMIN))
71856 +               return -EPERM;
71858 +       if (!blk_queue_discard(q))
71859 +               return -EOPNOTSUPP;
71861 +       user_range = (struct fstrim_range __user *)arg;
71862 +       if (copy_from_user(&range, user_range, sizeof(range)))
71863 +               return -EFAULT;
71865 +       range.minlen = max_t(u32, range.minlen, q->limits.discard_granularity);
71867 +       err = ntfs_trim_fs(sbi, &range);
71868 +       if (err < 0)
71869 +               return err;
71871 +       if (copy_to_user(user_range, &range, sizeof(range)))
71872 +               return -EFAULT;
71874 +       return 0;
71877 +static long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
71879 +       struct inode *inode = file_inode(filp);
71880 +       struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
71881 +       u32 __user *user_attr = (u32 __user *)arg;
71883 +       switch (cmd) {
71884 +       case FAT_IOCTL_GET_ATTRIBUTES:
71885 +               return put_user(le32_to_cpu(ntfs_i(inode)->std_fa), user_attr);
71887 +       case FAT_IOCTL_GET_VOLUME_ID:
71888 +               return put_user(sbi->volume.ser_num, user_attr);
71890 +       case FITRIM:
71891 +               return ntfs_ioctl_fitrim(sbi, arg);
71892 +       }
71893 +       return -ENOTTY; /* Inappropriate ioctl for device */
71896 +#ifdef CONFIG_COMPAT
71897 +static long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg)
71900 +       return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
71902 +#endif
71905 + * inode_operations::getattr
71906 + */
71907 +int ntfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
71908 +                struct kstat *stat, u32 request_mask, u32 flags)
71910 +       struct inode *inode = d_inode(path->dentry);
71911 +       struct ntfs_inode *ni = ntfs_i(inode);
71913 +       if (is_compressed(ni))
71914 +               stat->attributes |= STATX_ATTR_COMPRESSED;
71916 +       if (is_encrypted(ni))
71917 +               stat->attributes |= STATX_ATTR_ENCRYPTED;
71919 +       stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED;
71921 +       generic_fillattr(mnt_userns, inode, stat);
71923 +       stat->result_mask |= STATX_BTIME;
71924 +       stat->btime = ni->i_crtime;
71926 +       return 0;
71929 +static int ntfs_extend_initialized_size(struct file *file,
71930 +                                       struct ntfs_inode *ni,
71931 +                                       const loff_t valid,
71932 +                                       const loff_t new_valid)
71934 +       struct inode *inode = &ni->vfs_inode;
71935 +       struct address_space *mapping = inode->i_mapping;
71936 +       struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
71937 +       loff_t pos = valid;
71938 +       int err;
71940 +       if (is_resident(ni)) {
71941 +               ni->i_valid = new_valid;
71942 +               return 0;
71943 +       }
71945 +       WARN_ON(is_compressed(ni));
71946 +       WARN_ON(valid >= new_valid);
71948 +       for (;;) {
71949 +               u32 zerofrom, len;
71950 +               struct page *page;
71951 +               void *fsdata;
71952 +               u8 bits;
71953 +               CLST vcn, lcn, clen;
71955 +               if (is_sparsed(ni)) {
71956 +                       bits = sbi->cluster_bits;
71957 +                       vcn = pos >> bits;
71959 +                       err = attr_data_get_block(ni, vcn, 0, &lcn, &clen,
71960 +                                                 NULL);
71961 +                       if (err)
71962 +                               goto out;
71964 +                       if (lcn == SPARSE_LCN) {
71965 +                               loff_t vbo = (loff_t)vcn << bits;
71966 +                               loff_t to = vbo + ((loff_t)clen << bits);
71968 +                               if (to <= new_valid) {
71969 +                                       ni->i_valid = to;
71970 +                                       pos = to;
71971 +                                       goto next;
71972 +                               }
71974 +                               if (vbo < pos) {
71975 +                                       pos = vbo;
71976 +                               } else {
71977 +                                       to = (new_valid >> bits) << bits;
71978 +                                       if (pos < to) {
71979 +                                               ni->i_valid = to;
71980 +                                               pos = to;
71981 +                                               goto next;
71982 +                                       }
71983 +                               }
71984 +                       }
71985 +               }
71987 +               zerofrom = pos & (PAGE_SIZE - 1);
71988 +               len = PAGE_SIZE - zerofrom;
71990 +               if (pos + len > new_valid)
71991 +                       len = new_valid - pos;
71993 +               err = pagecache_write_begin(file, mapping, pos, len, 0, &page,
71994 +                                           &fsdata);
71995 +               if (err)
71996 +                       goto out;
71998 +               zero_user_segment(page, zerofrom, PAGE_SIZE);
72000 +               /* this function in any case puts page*/
72001 +               err = pagecache_write_end(file, mapping, pos, len, len, page,
72002 +                                         fsdata);
72003 +               if (err < 0)
72004 +                       goto out;
72005 +               pos += len;
72007 +next:
72008 +               if (pos >= new_valid)
72009 +                       break;
72011 +               balance_dirty_pages_ratelimited(mapping);
72012 +               cond_resched();
72013 +       }
72015 +       mark_inode_dirty(inode);
72017 +       return 0;
72019 +out:
72020 +       ni->i_valid = valid;
72021 +       ntfs_inode_warn(inode, "failed to extend initialized size to %llx.",
72022 +                       new_valid);
72023 +       return err;
72027 + * ntfs_sparse_cluster
72028 + *
72029 + * Helper function to zero a new allocated clusters
72030 + */
72031 +void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn,
72032 +                        CLST len)
72034 +       struct address_space *mapping = inode->i_mapping;
72035 +       struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
72036 +       u64 vbo = (u64)vcn << sbi->cluster_bits;
72037 +       u64 bytes = (u64)len << sbi->cluster_bits;
72038 +       u32 blocksize = 1 << inode->i_blkbits;
72039 +       pgoff_t idx0 = page0 ? page0->index : -1;
72040 +       loff_t vbo_clst = vbo & sbi->cluster_mask_inv;
72041 +       loff_t end = ntfs_up_cluster(sbi, vbo + bytes);
72042 +       pgoff_t idx = vbo_clst >> PAGE_SHIFT;
72043 +       u32 from = vbo_clst & (PAGE_SIZE - 1);
72044 +       pgoff_t idx_end = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
72045 +       loff_t page_off;
72046 +       u32 to;
72047 +       bool partial;
72048 +       struct page *page;
72050 +       for (; idx < idx_end; idx += 1, from = 0) {
72051 +               page = idx == idx0 ? page0 : grab_cache_page(mapping, idx);
72053 +               if (!page)
72054 +                       continue;
72056 +               page_off = (loff_t)idx << PAGE_SHIFT;
72057 +               to = (page_off + PAGE_SIZE) > end ? (end - page_off)
72058 +                                                 : PAGE_SIZE;
72059 +               partial = false;
72061 +               if ((from || PAGE_SIZE != to) &&
72062 +                   likely(!page_has_buffers(page))) {
72063 +                       create_empty_buffers(page, blocksize, 0);
72064 +                       if (!page_has_buffers(page)) {
72065 +                               ntfs_inode_err(
72066 +                                       inode,
72067 +                                       "failed to allocate page buffers.");
72068 +                               /*err = -ENOMEM;*/
72069 +                               goto unlock_page;
72070 +                       }
72071 +               }
72073 +               if (page_has_buffers(page)) {
72074 +                       struct buffer_head *head, *bh;
72075 +                       u32 bh_off = 0;
72077 +                       bh = head = page_buffers(page);
72078 +                       do {
72079 +                               u32 bh_next = bh_off + blocksize;
72081 +                               if (from <= bh_off && bh_next <= to) {
72082 +                                       set_buffer_uptodate(bh);
72083 +                                       mark_buffer_dirty(bh);
72084 +                               } else if (!buffer_uptodate(bh)) {
72085 +                                       partial = true;
72086 +                               }
72087 +                               bh_off = bh_next;
72088 +                       } while (head != (bh = bh->b_this_page));
72089 +               }
72091 +               zero_user_segment(page, from, to);
72093 +               if (!partial) {
72094 +                       if (!PageUptodate(page))
72095 +                               SetPageUptodate(page);
72096 +                       set_page_dirty(page);
72097 +               }
72099 +unlock_page:
72100 +               if (idx != idx0) {
72101 +                       unlock_page(page);
72102 +                       put_page(page);
72103 +               }
72104 +               cond_resched();
72105 +       }
72106 +       mark_inode_dirty(inode);
72110 + * file_operations::mmap
72111 + */
72112 +static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
72114 +       struct address_space *mapping = file->f_mapping;
72115 +       struct inode *inode = mapping->host;
72116 +       struct ntfs_inode *ni = ntfs_i(inode);
72117 +       u64 to, from = ((u64)vma->vm_pgoff << PAGE_SHIFT);
72118 +       bool rw = vma->vm_flags & VM_WRITE;
72119 +       int err;
72121 +       if (is_encrypted(ni)) {
72122 +               ntfs_inode_warn(inode,
72123 +                               "mmap is not supported for encrypted files");
72124 +               err = -EOPNOTSUPP;
72125 +               goto out;
72126 +       }
72128 +       if (!rw)
72129 +               goto do_map;
72131 +       if (is_compressed(ni)) {
72132 +               ntfs_inode_warn(
72133 +                       inode,
72134 +                       "mmap(write) is not supported for compressed files");
72135 +               err = -EOPNOTSUPP;
72136 +               goto out;
72137 +       }
72139 +       to = min_t(loff_t, i_size_read(inode),
72140 +                  from + vma->vm_end - vma->vm_start);
72142 +       if (is_sparsed(ni)) {
72143 +               /* allocate clusters for rw map */
72144 +               struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
72145 +               CLST vcn, lcn, len;
72146 +               CLST end = bytes_to_cluster(sbi, to);
72147 +               bool new;
72149 +               for (vcn = from >> sbi->cluster_bits; vcn < end; vcn += len) {
72150 +                       err = attr_data_get_block(ni, vcn, 1, &lcn, &len, &new);
72151 +                       if (err)
72152 +                               goto out;
72153 +                       if (!new)
72154 +                               continue;
72155 +                       ntfs_sparse_cluster(inode, NULL, vcn, 1);
72156 +               }
72157 +       }
72159 +       if (ni->i_valid < to) {
72160 +               inode_lock(inode);
72161 +               err = ntfs_extend_initialized_size(file, ni, ni->i_valid, to);
72162 +               inode_unlock(inode);
72163 +               if (err)
72164 +                       goto out;
72165 +       }
72167 +do_map:
72168 +       err = generic_file_mmap(file, vma);
72169 +out:
72170 +       return err;
72173 +static int ntfs_extend(struct inode *inode, loff_t pos, size_t count,
72174 +                      struct file *file)
72176 +       struct ntfs_inode *ni = ntfs_i(inode);
72177 +       struct address_space *mapping = inode->i_mapping;
72178 +       loff_t end = pos + count;
72179 +       bool extend_init = file && pos > ni->i_valid;
72180 +       int err;
72182 +       if (end <= inode->i_size && !extend_init)
72183 +               return 0;
72185 +       /*mark rw ntfs as dirty. it will be cleared at umount*/
72186 +       ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY);
72188 +       if (end > inode->i_size) {
72189 +               err = ntfs_set_size(inode, end);
72190 +               if (err)
72191 +                       goto out;
72192 +               inode->i_size = end;
72193 +       }
72195 +       if (extend_init && !is_compressed(ni)) {
72196 +               err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos);
72197 +               if (err)
72198 +                       goto out;
72199 +       } else {
72200 +               err = 0;
72201 +       }
72203 +       inode->i_ctime = inode->i_mtime = current_time(inode);
72204 +       mark_inode_dirty(inode);
72206 +       if (IS_SYNC(inode)) {
72207 +               int err2;
72209 +               err = filemap_fdatawrite_range(mapping, pos, end - 1);
72210 +               err2 = sync_mapping_buffers(mapping);
72211 +               if (!err)
72212 +                       err = err2;
72213 +               err2 = write_inode_now(inode, 1);
72214 +               if (!err)
72215 +                       err = err2;
72216 +               if (!err)
72217 +                       err = filemap_fdatawait_range(mapping, pos, end - 1);
72218 +       }
72220 +out:
72221 +       return err;
72224 +static int ntfs_truncate(struct inode *inode, loff_t new_size)
72226 +       struct super_block *sb = inode->i_sb;
72227 +       struct ntfs_inode *ni = ntfs_i(inode);
72228 +       int err, dirty = 0;
72229 +       u64 new_valid;
72231 +       if (!S_ISREG(inode->i_mode))
72232 +               return 0;
72234 +       if (is_compressed(ni)) {
72235 +               if (ni->i_valid > new_size)
72236 +                       ni->i_valid = new_size;
72237 +       } else {
72238 +               err = block_truncate_page(inode->i_mapping, new_size,
72239 +                                         ntfs_get_block);
72240 +               if (err)
72241 +                       return err;
72242 +       }
72244 +       new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size));
72246 +       ni_lock(ni);
72248 +       truncate_setsize(inode, new_size);
72250 +       down_write(&ni->file.run_lock);
72251 +       err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
72252 +                           &new_valid, true, NULL);
72253 +       up_write(&ni->file.run_lock);
72255 +       if (new_valid < ni->i_valid)
72256 +               ni->i_valid = new_valid;
72258 +       ni_unlock(ni);
72260 +       ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
72261 +       inode->i_ctime = inode->i_mtime = current_time(inode);
72262 +       if (!IS_DIRSYNC(inode)) {
72263 +               dirty = 1;
72264 +       } else {
72265 +               err = ntfs_sync_inode(inode);
72266 +               if (err)
72267 +                       return err;
72268 +       }
72270 +       if (dirty)
72271 +               mark_inode_dirty(inode);
72273 +       /*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/
72275 +       return 0;
72279 + * Preallocate space for a file. This implements ntfs's fallocate file
72280 + * operation, which gets called from sys_fallocate system call. User
72281 + * space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set
72282 + * we just allocate clusters without zeroing them out. Otherwise we
72283 + * allocate and zero out clusters via an expanding truncate.
72284 + */
72285 +static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
72287 +       struct inode *inode = file->f_mapping->host;
72288 +       struct super_block *sb = inode->i_sb;
72289 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
72290 +       struct ntfs_inode *ni = ntfs_i(inode);
72291 +       loff_t end = vbo + len;
72292 +       loff_t vbo_down = round_down(vbo, PAGE_SIZE);
72293 +       loff_t i_size;
72294 +       int err;
72296 +       /* No support for dir */
72297 +       if (!S_ISREG(inode->i_mode))
72298 +               return -EOPNOTSUPP;
72300 +       /* Return error if mode is not supported */
72301 +       if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
72302 +                    FALLOC_FL_COLLAPSE_RANGE))
72303 +               return -EOPNOTSUPP;
72305 +       ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
72307 +       inode_lock(inode);
72308 +       i_size = inode->i_size;
72310 +       if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
72311 +               /* should never be here, see ntfs_file_open*/
72312 +               err = -EOPNOTSUPP;
72313 +               goto out;
72314 +       }
72316 +       if (mode & FALLOC_FL_PUNCH_HOLE) {
72317 +               if (!(mode & FALLOC_FL_KEEP_SIZE)) {
72318 +                       err = -EINVAL;
72319 +                       goto out;
72320 +               }
72322 +               if (!is_sparsed(ni) && !is_compressed(ni)) {
72323 +                       ntfs_inode_warn(
72324 +                               inode,
72325 +                               "punch_hole only for sparsed/compressed files");
72326 +                       err = -EOPNOTSUPP;
72327 +                       goto out;
72328 +               }
72330 +               err = filemap_write_and_wait_range(inode->i_mapping, vbo,
72331 +                                                  end - 1);
72332 +               if (err)
72333 +                       goto out;
72335 +               err = filemap_write_and_wait_range(inode->i_mapping, end,
72336 +                                                  LLONG_MAX);
72337 +               if (err)
72338 +                       goto out;
72340 +               truncate_pagecache(inode, vbo_down);
72342 +               ni_lock(ni);
72343 +               err = attr_punch_hole(ni, vbo, len);
72344 +               ni_unlock(ni);
72345 +       } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
72346 +               if (mode & ~FALLOC_FL_COLLAPSE_RANGE) {
72347 +                       err = -EINVAL;
72348 +                       goto out;
72349 +               }
72351 +               /*
72352 +                * Write tail of the last page before removed range since
72353 +                * it will get removed from the page cache below.
72354 +                */
72355 +               err = filemap_write_and_wait_range(inode->i_mapping, vbo_down,
72356 +                                                  vbo);
72357 +               if (err)
72358 +                       goto out;
72360 +               /*
72361 +                * Write data that will be shifted to preserve them
72362 +                * when discarding page cache below
72363 +                */
72364 +               err = filemap_write_and_wait_range(inode->i_mapping, end,
72365 +                                                  LLONG_MAX);
72366 +               if (err)
72367 +                       goto out;
72369 +               truncate_pagecache(inode, vbo_down);
72371 +               ni_lock(ni);
72372 +               err = attr_collapse_range(ni, vbo, len);
72373 +               ni_unlock(ni);
72374 +       } else {
72375 +               /*
72376 +                * normal file: allocate clusters, do not change 'valid' size
72377 +                */
72378 +               err = ntfs_set_size(inode, max(end, i_size));
72379 +               if (err)
72380 +                       goto out;
72382 +               if (is_sparsed(ni) || is_compressed(ni)) {
72383 +                       CLST vcn_v = ni->i_valid >> sbi->cluster_bits;
72384 +                       CLST vcn = vbo >> sbi->cluster_bits;
72385 +                       CLST cend = bytes_to_cluster(sbi, end);
72386 +                       CLST lcn, clen;
72387 +                       bool new;
72389 +                       /*
72390 +                        * allocate but not zero new clusters (see below comments)
72391 +                        * this breaks security (one can read unused on-disk areas)
72392 +                        * zeroing these clusters may be too long
72393 +                        * may be we should check here for root rights?
72394 +                        */
72395 +                       for (; vcn < cend; vcn += clen) {
72396 +                               err = attr_data_get_block(ni, vcn, cend - vcn,
72397 +                                                         &lcn, &clen, &new);
72398 +                               if (err)
72399 +                                       goto out;
72400 +                               if (!new || vcn >= vcn_v)
72401 +                                       continue;
72403 +                               /*
72404 +                                * Unwritten area
72405 +                                * NTFS is not able to store several unwritten areas
72406 +                                * Activate 'ntfs_sparse_cluster' to zero new allocated clusters
72407 +                                *
72408 +                                * Dangerous in case:
72409 +                                * 1G of sparsed clusters + 1 cluster of data =>
72410 +                                * valid_size == 1G + 1 cluster
72411 +                                * fallocate(1G) will zero 1G and this can be very long
72412 +                                * xfstest 016/086 will fail without 'ntfs_sparse_cluster'
72413 +                                */
72414 +                               /*ntfs_sparse_cluster(inode, NULL, vcn,
72415 +                                *                  min(vcn_v - vcn, clen));
72416 +                                */
72417 +                       }
72418 +               }
72420 +               if (mode & FALLOC_FL_KEEP_SIZE) {
72421 +                       ni_lock(ni);
72422 +                       /*true - keep preallocated*/
72423 +                       err = attr_set_size(ni, ATTR_DATA, NULL, 0,
72424 +                                           &ni->file.run, i_size, &ni->i_valid,
72425 +                                           true, NULL);
72426 +                       ni_unlock(ni);
72427 +               }
72428 +       }
72430 +       if (!err) {
72431 +               inode->i_ctime = inode->i_mtime = current_time(inode);
72432 +               mark_inode_dirty(inode);
72433 +       }
72434 +out:
72435 +       if (err == -EFBIG)
72436 +               err = -ENOSPC;
72438 +       inode_unlock(inode);
72439 +       return err;
72443 + * inode_operations::setattr
72444 + */
72445 +int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
72446 +                 struct iattr *attr)
72448 +       struct super_block *sb = dentry->d_sb;
72449 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
72450 +       struct inode *inode = d_inode(dentry);
72451 +       struct ntfs_inode *ni = ntfs_i(inode);
72452 +       u32 ia_valid = attr->ia_valid;
72453 +       umode_t mode = inode->i_mode;
72454 +       int err;
72456 +       if (sbi->options.no_acs_rules) {
72457 +               /* "no access rules" - force any changes of time etc. */
72458 +               attr->ia_valid |= ATTR_FORCE;
72459 +               /* and disable for editing some attributes */
72460 +               attr->ia_valid &= ~(ATTR_UID | ATTR_GID | ATTR_MODE);
72461 +               ia_valid = attr->ia_valid;
72462 +       }
72464 +       err = setattr_prepare(mnt_userns, dentry, attr);
72465 +       if (err)
72466 +               goto out;
72468 +       if (ia_valid & ATTR_SIZE) {
72469 +               loff_t oldsize = inode->i_size;
72471 +               if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
72472 +                       /* should never be here, see ntfs_file_open*/
72473 +                       err = -EOPNOTSUPP;
72474 +                       goto out;
72475 +               }
72476 +               inode_dio_wait(inode);
72478 +               if (attr->ia_size < oldsize)
72479 +                       err = ntfs_truncate(inode, attr->ia_size);
72480 +               else if (attr->ia_size > oldsize)
72481 +                       err = ntfs_extend(inode, attr->ia_size, 0, NULL);
72483 +               if (err)
72484 +                       goto out;
72486 +               ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
72487 +       }
72489 +       setattr_copy(mnt_userns, inode, attr);
72491 +       if (mode != inode->i_mode) {
72492 +               err = ntfs_acl_chmod(mnt_userns, inode);
72493 +               if (err)
72494 +                       goto out;
72496 +               /* linux 'w' -> windows 'ro' */
72497 +               if (0222 & inode->i_mode)
72498 +                       ni->std_fa &= ~FILE_ATTRIBUTE_READONLY;
72499 +               else
72500 +                       ni->std_fa |= FILE_ATTRIBUTE_READONLY;
72501 +       }
72503 +       mark_inode_dirty(inode);
72504 +out:
72505 +       return err;
72508 +static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
72510 +       ssize_t err;
72511 +       size_t count = iov_iter_count(iter);
72512 +       struct file *file = iocb->ki_filp;
72513 +       struct inode *inode = file->f_mapping->host;
72514 +       struct ntfs_inode *ni = ntfs_i(inode);
72516 +       if (is_encrypted(ni)) {
72517 +               ntfs_inode_warn(inode, "encrypted i/o not supported");
72518 +               return -EOPNOTSUPP;
72519 +       }
72521 +       if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
72522 +               ntfs_inode_warn(inode, "direct i/o + compressed not supported");
72523 +               return -EOPNOTSUPP;
72524 +       }
72526 +#ifndef CONFIG_NTFS3_LZX_XPRESS
72527 +       if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
72528 +               ntfs_inode_warn(
72529 +                       inode,
72530 +                       "activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files");
72531 +               return -EOPNOTSUPP;
72532 +       }
72533 +#endif
72535 +       if (is_dedup(ni)) {
72536 +               ntfs_inode_warn(inode, "read deduplicated not supported");
72537 +               return -EOPNOTSUPP;
72538 +       }
72540 +       err = count ? generic_file_read_iter(iocb, iter) : 0;
72542 +       return err;
72545 +/* returns array of locked pages */
72546 +static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index,
72547 +                               struct page **pages, u32 pages_per_frame,
72548 +                               bool *frame_uptodate)
72550 +       gfp_t gfp_mask = mapping_gfp_mask(mapping);
72551 +       u32 npages;
72553 +       *frame_uptodate = true;
72555 +       for (npages = 0; npages < pages_per_frame; npages++, index++) {
72556 +               struct page *page;
72558 +               page = find_or_create_page(mapping, index, gfp_mask);
72559 +               if (!page) {
72560 +                       while (npages--) {
72561 +                               page = pages[npages];
72562 +                               unlock_page(page);
72563 +                               put_page(page);
72564 +                       }
72566 +                       return -ENOMEM;
72567 +               }
72569 +               if (!PageUptodate(page))
72570 +                       *frame_uptodate = false;
72572 +               pages[npages] = page;
72573 +       }
72575 +       return 0;
72578 +/*helper for ntfs_file_write_iter (compressed files)*/
72579 +static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
72581 +       int err;
72582 +       struct file *file = iocb->ki_filp;
72583 +       size_t count = iov_iter_count(from);
72584 +       loff_t pos = iocb->ki_pos;
72585 +       struct inode *inode = file_inode(file);
72586 +       loff_t i_size = inode->i_size;
72587 +       struct address_space *mapping = inode->i_mapping;
72588 +       struct ntfs_inode *ni = ntfs_i(inode);
72589 +       u64 valid = ni->i_valid;
72590 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
72591 +       struct page *page, **pages = NULL;
72592 +       size_t written = 0;
72593 +       u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
72594 +       u32 frame_size = 1u << frame_bits;
72595 +       u32 pages_per_frame = frame_size >> PAGE_SHIFT;
72596 +       u32 ip, off;
72597 +       CLST frame;
72598 +       u64 frame_vbo;
72599 +       pgoff_t index;
72600 +       bool frame_uptodate;
72602 +       if (frame_size < PAGE_SIZE) {
72603 +               /*
72604 +                * frame_size == 8K if cluster 512
72605 +                * frame_size == 64K if cluster 4096
72606 +                */
72607 +               ntfs_inode_warn(inode, "page size is bigger than frame size");
72608 +               return -EOPNOTSUPP;
72609 +       }
72611 +       pages = ntfs_malloc(pages_per_frame * sizeof(struct page *));
72612 +       if (!pages)
72613 +               return -ENOMEM;
72615 +       current->backing_dev_info = inode_to_bdi(inode);
72616 +       err = file_remove_privs(file);
72617 +       if (err)
72618 +               goto out;
72620 +       err = file_update_time(file);
72621 +       if (err)
72622 +               goto out;
72624 +       /* zero range [valid : pos) */
72625 +       while (valid < pos) {
72626 +               CLST lcn, clen;
72628 +               frame = valid >> frame_bits;
72629 +               frame_vbo = valid & ~(frame_size - 1);
72630 +               off = valid & (frame_size - 1);
72632 +               err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 0, &lcn,
72633 +                                         &clen, NULL);
72634 +               if (err)
72635 +                       goto out;
72637 +               if (lcn == SPARSE_LCN) {
72638 +                       ni->i_valid = valid =
72639 +                               frame_vbo + ((u64)clen << sbi->cluster_bits);
72640 +                       continue;
72641 +               }
72643 +               /* Load full frame */
72644 +               err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT,
72645 +                                          pages, pages_per_frame,
72646 +                                          &frame_uptodate);
72647 +               if (err)
72648 +                       goto out;
72650 +               if (!frame_uptodate && off) {
72651 +                       err = ni_read_frame(ni, frame_vbo, pages,
72652 +                                           pages_per_frame);
72653 +                       if (err) {
72654 +                               for (ip = 0; ip < pages_per_frame; ip++) {
72655 +                                       page = pages[ip];
72656 +                                       unlock_page(page);
72657 +                                       put_page(page);
72658 +                               }
72659 +                               goto out;
72660 +                       }
72661 +               }
72663 +               ip = off >> PAGE_SHIFT;
72664 +               off = offset_in_page(valid);
72665 +               for (; ip < pages_per_frame; ip++, off = 0) {
72666 +                       page = pages[ip];
72667 +                       zero_user_segment(page, off, PAGE_SIZE);
72668 +                       flush_dcache_page(page);
72669 +                       SetPageUptodate(page);
72670 +               }
72672 +               ni_lock(ni);
72673 +               err = ni_write_frame(ni, pages, pages_per_frame);
72674 +               ni_unlock(ni);
72676 +               for (ip = 0; ip < pages_per_frame; ip++) {
72677 +                       page = pages[ip];
72678 +                       SetPageUptodate(page);
72679 +                       unlock_page(page);
72680 +                       put_page(page);
72681 +               }
72683 +               if (err)
72684 +                       goto out;
72686 +               ni->i_valid = valid = frame_vbo + frame_size;
72687 +       }
72689 +       /* copy user data [pos : pos + count) */
72690 +       while (count) {
72691 +               size_t copied, bytes;
72693 +               off = pos & (frame_size - 1);
72694 +               bytes = frame_size - off;
72695 +               if (bytes > count)
72696 +                       bytes = count;
72698 +               frame = pos >> frame_bits;
72699 +               frame_vbo = pos & ~(frame_size - 1);
72700 +               index = frame_vbo >> PAGE_SHIFT;
72702 +               if (unlikely(iov_iter_fault_in_readable(from, bytes))) {
72703 +                       err = -EFAULT;
72704 +                       goto out;
72705 +               }
72707 +               /* Load full frame */
72708 +               err = ntfs_get_frame_pages(mapping, index, pages,
72709 +                                          pages_per_frame, &frame_uptodate);
72710 +               if (err)
72711 +                       goto out;
72713 +               if (!frame_uptodate) {
72714 +                       loff_t to = pos + bytes;
72716 +                       if (off || (to < i_size && (to & (frame_size - 1)))) {
72717 +                               err = ni_read_frame(ni, frame_vbo, pages,
72718 +                                                   pages_per_frame);
72719 +                               if (err) {
72720 +                                       for (ip = 0; ip < pages_per_frame;
72721 +                                            ip++) {
72722 +                                               page = pages[ip];
72723 +                                               unlock_page(page);
72724 +                                               put_page(page);
72725 +                                       }
72726 +                                       goto out;
72727 +                               }
72728 +                       }
72729 +               }
72731 +               WARN_ON(!bytes);
72732 +               copied = 0;
72733 +               ip = off >> PAGE_SHIFT;
72734 +               off = offset_in_page(pos);
72736 +               /* copy user data to pages */
72737 +               for (;;) {
72738 +                       size_t cp, tail = PAGE_SIZE - off;
72740 +                       page = pages[ip];
72741 +                       cp = iov_iter_copy_from_user_atomic(page, from, off,
72742 +                                                           min(tail, bytes));
72743 +                       flush_dcache_page(page);
72744 +                       iov_iter_advance(from, cp);
72745 +                       copied += cp;
72746 +                       bytes -= cp;
72747 +                       if (!bytes || !cp)
72748 +                               break;
72750 +                       if (cp < tail) {
72751 +                               off += cp;
72752 +                       } else {
72753 +                               ip++;
72754 +                               off = 0;
72755 +                       }
72756 +               }
72758 +               ni_lock(ni);
72759 +               err = ni_write_frame(ni, pages, pages_per_frame);
72760 +               ni_unlock(ni);
72762 +               for (ip = 0; ip < pages_per_frame; ip++) {
72763 +                       page = pages[ip];
72764 +                       ClearPageDirty(page);
72765 +                       SetPageUptodate(page);
72766 +                       unlock_page(page);
72767 +                       put_page(page);
72768 +               }
72770 +               if (err)
72771 +                       goto out;
72773 +               /*
72774 +                * We can loop for a long time in here. Be nice and allow
72775 +                * us to schedule out to avoid softlocking if preempt
72776 +                * is disabled.
72777 +                */
72778 +               cond_resched();
72780 +               pos += copied;
72781 +               written += copied;
72783 +               count = iov_iter_count(from);
72784 +       }
72786 +out:
72787 +       ntfs_free(pages);
72789 +       current->backing_dev_info = NULL;
72791 +       if (err < 0)
72792 +               return err;
72794 +       iocb->ki_pos += written;
72795 +       if (iocb->ki_pos > ni->i_valid)
72796 +               ni->i_valid = iocb->ki_pos;
72798 +       return written;
72802 + * file_operations::write_iter
72803 + */
72804 +static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
72806 +       struct file *file = iocb->ki_filp;
72807 +       struct address_space *mapping = file->f_mapping;
72808 +       struct inode *inode = mapping->host;
72809 +       ssize_t ret;
72810 +       struct ntfs_inode *ni = ntfs_i(inode);
72812 +       if (is_encrypted(ni)) {
72813 +               ntfs_inode_warn(inode, "encrypted i/o not supported");
72814 +               return -EOPNOTSUPP;
72815 +       }
72817 +       if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
72818 +               ntfs_inode_warn(inode, "direct i/o + compressed not supported");
72819 +               return -EOPNOTSUPP;
72820 +       }
72822 +       if (is_dedup(ni)) {
72823 +               ntfs_inode_warn(inode, "write into deduplicated not supported");
72824 +               return -EOPNOTSUPP;
72825 +       }
72827 +       if (!inode_trylock(inode)) {
72828 +               if (iocb->ki_flags & IOCB_NOWAIT)
72829 +                       return -EAGAIN;
72830 +               inode_lock(inode);
72831 +       }
72833 +       ret = generic_write_checks(iocb, from);
72834 +       if (ret <= 0)
72835 +               goto out;
72837 +       if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
72838 +               /* should never be here, see ntfs_file_open*/
72839 +               ret = -EOPNOTSUPP;
72840 +               goto out;
72841 +       }
72843 +       ret = ntfs_extend(inode, iocb->ki_pos, ret, file);
72844 +       if (ret)
72845 +               goto out;
72847 +       ret = is_compressed(ni) ? ntfs_compress_write(iocb, from)
72848 +                               : __generic_file_write_iter(iocb, from);
72850 +out:
72851 +       inode_unlock(inode);
72853 +       if (ret > 0)
72854 +               ret = generic_write_sync(iocb, ret);
72856 +       return ret;
72860 + * file_operations::open
72861 + */
72862 +int ntfs_file_open(struct inode *inode, struct file *file)
72864 +       struct ntfs_inode *ni = ntfs_i(inode);
72866 +       if (unlikely((is_compressed(ni) || is_encrypted(ni)) &&
72867 +                    (file->f_flags & O_DIRECT))) {
72868 +               return -EOPNOTSUPP;
72869 +       }
72871 +       /* Decompress "external compressed" file if opened for rw */
72872 +       if ((ni->ni_flags & NI_FLAG_COMPRESSED_MASK) &&
72873 +           (file->f_flags & (O_WRONLY | O_RDWR | O_TRUNC))) {
72874 +#ifdef CONFIG_NTFS3_LZX_XPRESS
72875 +               int err = ni_decompress_file(ni);
72877 +               if (err)
72878 +                       return err;
72879 +#else
72880 +               ntfs_inode_warn(
72881 +                       inode,
72882 +                       "activate CONFIG_NTFS3_LZX_XPRESS to write external compressed files");
72883 +               return -EOPNOTSUPP;
72884 +#endif
72885 +       }
72887 +       return generic_file_open(inode, file);
72891 + * file_operations::release
72892 + */
72893 +static int ntfs_file_release(struct inode *inode, struct file *file)
72895 +       struct ntfs_inode *ni = ntfs_i(inode);
72896 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
72897 +       int err = 0;
72899 +       /* if we are the last writer on the inode, drop the block reservation */
72900 +       if (sbi->options.prealloc && ((file->f_mode & FMODE_WRITE) &&
72901 +                                     atomic_read(&inode->i_writecount) == 1)) {
72902 +               ni_lock(ni);
72903 +               down_write(&ni->file.run_lock);
72905 +               err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
72906 +                                   inode->i_size, &ni->i_valid, false, NULL);
72908 +               up_write(&ni->file.run_lock);
72909 +               ni_unlock(ni);
72910 +       }
72911 +       return err;
72914 +/* file_operations::fiemap */
72915 +int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
72916 +               __u64 start, __u64 len)
72918 +       int err;
72919 +       struct ntfs_inode *ni = ntfs_i(inode);
72921 +       if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR)
72922 +               return -EOPNOTSUPP;
72924 +       ni_lock(ni);
72926 +       err = ni_fiemap(ni, fieinfo, start, len);
72928 +       ni_unlock(ni);
72930 +       return err;
72933 +const struct inode_operations ntfs_file_inode_operations = {
72934 +       .getattr = ntfs_getattr,
72935 +       .setattr = ntfs3_setattr,
72936 +       .listxattr = ntfs_listxattr,
72937 +       .permission = ntfs_permission,
72938 +       .get_acl = ntfs_get_acl,
72939 +       .set_acl = ntfs_set_acl,
72940 +       .fiemap = ntfs_fiemap,
72943 +const struct file_operations ntfs_file_operations = {
72944 +       .llseek = generic_file_llseek,
72945 +       .read_iter = ntfs_file_read_iter,
72946 +       .write_iter = ntfs_file_write_iter,
72947 +       .unlocked_ioctl = ntfs_ioctl,
72948 +#ifdef CONFIG_COMPAT
72949 +       .compat_ioctl = ntfs_compat_ioctl,
72950 +#endif
72951 +       .splice_read = generic_file_splice_read,
72952 +       .mmap = ntfs_file_mmap,
72953 +       .open = ntfs_file_open,
72954 +       .fsync = generic_file_fsync,
72955 +       .splice_write = iter_file_splice_write,
72956 +       .fallocate = ntfs_fallocate,
72957 +       .release = ntfs_file_release,
72959 diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
72960 new file mode 100644
72961 index 000000000000..c3121bf9c62f
72962 --- /dev/null
72963 +++ b/fs/ntfs3/frecord.c
72964 @@ -0,0 +1,3071 @@
72965 +// SPDX-License-Identifier: GPL-2.0
72967 + *
72968 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
72969 + *
72970 + */
72972 +#include <linux/blkdev.h>
72973 +#include <linux/buffer_head.h>
72974 +#include <linux/fiemap.h>
72975 +#include <linux/fs.h>
72976 +#include <linux/nls.h>
72977 +#include <linux/vmalloc.h>
72979 +#include "debug.h"
72980 +#include "ntfs.h"
72981 +#include "ntfs_fs.h"
72982 +#ifdef CONFIG_NTFS3_LZX_XPRESS
72983 +#include "lib/lib.h"
72984 +#endif
72986 +static struct mft_inode *ni_ins_mi(struct ntfs_inode *ni, struct rb_root *tree,
72987 +                                  CLST ino, struct rb_node *ins)
72989 +       struct rb_node **p = &tree->rb_node;
72990 +       struct rb_node *pr = NULL;
72992 +       while (*p) {
72993 +               struct mft_inode *mi;
72995 +               pr = *p;
72996 +               mi = rb_entry(pr, struct mft_inode, node);
72997 +               if (mi->rno > ino)
72998 +                       p = &pr->rb_left;
72999 +               else if (mi->rno < ino)
73000 +                       p = &pr->rb_right;
73001 +               else
73002 +                       return mi;
73003 +       }
73005 +       if (!ins)
73006 +               return NULL;
73008 +       rb_link_node(ins, pr, p);
73009 +       rb_insert_color(ins, tree);
73010 +       return rb_entry(ins, struct mft_inode, node);
73014 + * ni_find_mi
73015 + *
73016 + * finds mft_inode by record number
73017 + */
73018 +static struct mft_inode *ni_find_mi(struct ntfs_inode *ni, CLST rno)
73020 +       return ni_ins_mi(ni, &ni->mi_tree, rno, NULL);
73024 + * ni_add_mi
73025 + *
73026 + * adds new mft_inode into ntfs_inode
73027 + */
73028 +static void ni_add_mi(struct ntfs_inode *ni, struct mft_inode *mi)
73030 +       ni_ins_mi(ni, &ni->mi_tree, mi->rno, &mi->node);
73034 + * ni_remove_mi
73035 + *
73036 + * removes mft_inode from ntfs_inode
73037 + */
73038 +void ni_remove_mi(struct ntfs_inode *ni, struct mft_inode *mi)
73040 +       rb_erase(&mi->node, &ni->mi_tree);
73044 + * ni_std
73045 + *
73046 + * returns pointer into std_info from primary record
73047 + */
73048 +struct ATTR_STD_INFO *ni_std(struct ntfs_inode *ni)
73050 +       const struct ATTRIB *attr;
73052 +       attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
73053 +       return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO))
73054 +                   : NULL;
73058 + * ni_std5
73059 + *
73060 + * returns pointer into std_info from primary record
73061 + */
73062 +struct ATTR_STD_INFO5 *ni_std5(struct ntfs_inode *ni)
73064 +       const struct ATTRIB *attr;
73066 +       attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
73068 +       return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO5))
73069 +                   : NULL;
73073 + * ni_clear
73074 + *
73075 + * clears resources allocated by ntfs_inode
73076 + */
73077 +void ni_clear(struct ntfs_inode *ni)
73079 +       struct rb_node *node;
73081 +       if (!ni->vfs_inode.i_nlink && is_rec_inuse(ni->mi.mrec))
73082 +               ni_delete_all(ni);
73084 +       al_destroy(ni);
73086 +       for (node = rb_first(&ni->mi_tree); node;) {
73087 +               struct rb_node *next = rb_next(node);
73088 +               struct mft_inode *mi = rb_entry(node, struct mft_inode, node);
73090 +               rb_erase(node, &ni->mi_tree);
73091 +               mi_put(mi);
73092 +               node = next;
73093 +       }
73095 +       /* bad inode always has mode == S_IFREG */
73096 +       if (ni->ni_flags & NI_FLAG_DIR)
73097 +               indx_clear(&ni->dir);
73098 +       else {
73099 +               run_close(&ni->file.run);
73100 +#ifdef CONFIG_NTFS3_LZX_XPRESS
73101 +               if (ni->file.offs_page) {
73102 +                       /* on-demand allocated page for offsets */
73103 +                       put_page(ni->file.offs_page);
73104 +                       ni->file.offs_page = NULL;
73105 +               }
73106 +#endif
73107 +       }
73109 +       mi_clear(&ni->mi);
73113 + * ni_load_mi_ex
73114 + *
73115 + * finds mft_inode by record number.
73116 + */
73117 +int ni_load_mi_ex(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi)
73119 +       int err;
73120 +       struct mft_inode *r;
73122 +       r = ni_find_mi(ni, rno);
73123 +       if (r)
73124 +               goto out;
73126 +       err = mi_get(ni->mi.sbi, rno, &r);
73127 +       if (err)
73128 +               return err;
73130 +       ni_add_mi(ni, r);
73132 +out:
73133 +       if (mi)
73134 +               *mi = r;
73135 +       return 0;
73139 + * ni_load_mi
73140 + *
73141 + * load mft_inode corresponded list_entry
73142 + */
73143 +int ni_load_mi(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
73144 +              struct mft_inode **mi)
73146 +       CLST rno;
73148 +       if (!le) {
73149 +               *mi = &ni->mi;
73150 +               return 0;
73151 +       }
73153 +       rno = ino_get(&le->ref);
73154 +       if (rno == ni->mi.rno) {
73155 +               *mi = &ni->mi;
73156 +               return 0;
73157 +       }
73158 +       return ni_load_mi_ex(ni, rno, mi);
73162 + * ni_find_attr
73163 + *
73164 + * returns attribute and record this attribute belongs to
73165 + */
73166 +struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
73167 +                           struct ATTR_LIST_ENTRY **le_o, enum ATTR_TYPE type,
73168 +                           const __le16 *name, u8 name_len, const CLST *vcn,
73169 +                           struct mft_inode **mi)
73171 +       struct ATTR_LIST_ENTRY *le;
73172 +       struct mft_inode *m;
73174 +       if (!ni->attr_list.size ||
73175 +           (!name_len && (type == ATTR_LIST || type == ATTR_STD))) {
73176 +               if (le_o)
73177 +                       *le_o = NULL;
73178 +               if (mi)
73179 +                       *mi = &ni->mi;
73181 +               /* Look for required attribute in primary record */
73182 +               return mi_find_attr(&ni->mi, attr, type, name, name_len, NULL);
73183 +       }
73185 +       /* first look for list entry of required type */
73186 +       le = al_find_ex(ni, le_o ? *le_o : NULL, type, name, name_len, vcn);
73187 +       if (!le)
73188 +               return NULL;
73190 +       if (le_o)
73191 +               *le_o = le;
73193 +       /* Load record that contains this attribute */
73194 +       if (ni_load_mi(ni, le, &m))
73195 +               return NULL;
73197 +       /* Look for required attribute */
73198 +       attr = mi_find_attr(m, NULL, type, name, name_len, &le->id);
73200 +       if (!attr)
73201 +               goto out;
73203 +       if (!attr->non_res) {
73204 +               if (vcn && *vcn)
73205 +                       goto out;
73206 +       } else if (!vcn) {
73207 +               if (attr->nres.svcn)
73208 +                       goto out;
73209 +       } else if (le64_to_cpu(attr->nres.svcn) > *vcn ||
73210 +                  *vcn > le64_to_cpu(attr->nres.evcn)) {
73211 +               goto out;
73212 +       }
73214 +       if (mi)
73215 +               *mi = m;
73216 +       return attr;
73218 +out:
73219 +       ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
73220 +       return NULL;
73224 + * ni_enum_attr_ex
73225 + *
73226 + * enumerates attributes in ntfs_inode
73227 + */
73228 +struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr,
73229 +                              struct ATTR_LIST_ENTRY **le,
73230 +                              struct mft_inode **mi)
73232 +       struct mft_inode *mi2;
73233 +       struct ATTR_LIST_ENTRY *le2;
73235 +       /* Do we have an attribute list? */
73236 +       if (!ni->attr_list.size) {
73237 +               *le = NULL;
73238 +               if (mi)
73239 +                       *mi = &ni->mi;
73240 +               /* Enum attributes in primary record */
73241 +               return mi_enum_attr(&ni->mi, attr);
73242 +       }
73244 +       /* get next list entry */
73245 +       le2 = *le = al_enumerate(ni, attr ? *le : NULL);
73246 +       if (!le2)
73247 +               return NULL;
73249 +       /* Load record that contains the required attribute */
73250 +       if (ni_load_mi(ni, le2, &mi2))
73251 +               return NULL;
73253 +       if (mi)
73254 +               *mi = mi2;
73256 +       /* Find attribute in loaded record */
73257 +       return rec_find_attr_le(mi2, le2);
73261 + * ni_load_attr
73262 + *
73263 + * loads attribute that contains given vcn
73264 + */
73265 +struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
73266 +                           const __le16 *name, u8 name_len, CLST vcn,
73267 +                           struct mft_inode **pmi)
73269 +       struct ATTR_LIST_ENTRY *le;
73270 +       struct ATTRIB *attr;
73271 +       struct mft_inode *mi;
73272 +       struct ATTR_LIST_ENTRY *next;
73274 +       if (!ni->attr_list.size) {
73275 +               if (pmi)
73276 +                       *pmi = &ni->mi;
73277 +               return mi_find_attr(&ni->mi, NULL, type, name, name_len, NULL);
73278 +       }
73280 +       le = al_find_ex(ni, NULL, type, name, name_len, NULL);
73281 +       if (!le)
73282 +               return NULL;
73284 +       /*
73285 +        * Unfortunately ATTR_LIST_ENTRY contains only start vcn
73286 +        * So to find the ATTRIB segment that contains 'vcn' we should
73287 +        * enumerate some entries
73288 +        */
73289 +       if (vcn) {
73290 +               for (;; le = next) {
73291 +                       next = al_find_ex(ni, le, type, name, name_len, NULL);
73292 +                       if (!next || le64_to_cpu(next->vcn) > vcn)
73293 +                               break;
73294 +               }
73295 +       }
73297 +       if (ni_load_mi(ni, le, &mi))
73298 +               return NULL;
73300 +       if (pmi)
73301 +               *pmi = mi;
73303 +       attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
73304 +       if (!attr)
73305 +               return NULL;
73307 +       if (!attr->non_res)
73308 +               return attr;
73310 +       if (le64_to_cpu(attr->nres.svcn) <= vcn &&
73311 +           vcn <= le64_to_cpu(attr->nres.evcn))
73312 +               return attr;
73314 +       return NULL;
73318 + * ni_load_all_mi
73319 + *
73320 + * loads all subrecords
73321 + */
73322 +int ni_load_all_mi(struct ntfs_inode *ni)
73324 +       int err;
73325 +       struct ATTR_LIST_ENTRY *le;
73327 +       if (!ni->attr_list.size)
73328 +               return 0;
73330 +       le = NULL;
73332 +       while ((le = al_enumerate(ni, le))) {
73333 +               CLST rno = ino_get(&le->ref);
73335 +               if (rno == ni->mi.rno)
73336 +                       continue;
73338 +               err = ni_load_mi_ex(ni, rno, NULL);
73339 +               if (err)
73340 +                       return err;
73341 +       }
73343 +       return 0;
73347 + * ni_add_subrecord
73348 + *
73349 + * allocate + format + attach a new subrecord
73350 + */
73351 +bool ni_add_subrecord(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi)
73353 +       struct mft_inode *m;
73355 +       m = ntfs_zalloc(sizeof(struct mft_inode));
73356 +       if (!m)
73357 +               return false;
73359 +       if (mi_format_new(m, ni->mi.sbi, rno, 0, ni->mi.rno == MFT_REC_MFT)) {
73360 +               mi_put(m);
73361 +               return false;
73362 +       }
73364 +       mi_get_ref(&ni->mi, &m->mrec->parent_ref);
73366 +       ni_add_mi(ni, m);
73367 +       *mi = m;
73368 +       return true;
73372 + * ni_remove_attr
73373 + *
73374 + * removes all attributes for the given type/name/id
73375 + */
73376 +int ni_remove_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
73377 +                  const __le16 *name, size_t name_len, bool base_only,
73378 +                  const __le16 *id)
73380 +       int err;
73381 +       struct ATTRIB *attr;
73382 +       struct ATTR_LIST_ENTRY *le;
73383 +       struct mft_inode *mi;
73384 +       u32 type_in;
73385 +       int diff;
73387 +       if (base_only || type == ATTR_LIST || !ni->attr_list.size) {
73388 +               attr = mi_find_attr(&ni->mi, NULL, type, name, name_len, id);
73389 +               if (!attr)
73390 +                       return -ENOENT;
73392 +               mi_remove_attr(&ni->mi, attr);
73393 +               return 0;
73394 +       }
73396 +       type_in = le32_to_cpu(type);
73397 +       le = NULL;
73399 +       for (;;) {
73400 +               le = al_enumerate(ni, le);
73401 +               if (!le)
73402 +                       return 0;
73404 +next_le2:
73405 +               diff = le32_to_cpu(le->type) - type_in;
73406 +               if (diff < 0)
73407 +                       continue;
73409 +               if (diff > 0)
73410 +                       return 0;
73412 +               if (le->name_len != name_len)
73413 +                       continue;
73415 +               if (name_len &&
73416 +                   memcmp(le_name(le), name, name_len * sizeof(short)))
73417 +                       continue;
73419 +               if (id && le->id != *id)
73420 +                       continue;
73421 +               err = ni_load_mi(ni, le, &mi);
73422 +               if (err)
73423 +                       return err;
73425 +               al_remove_le(ni, le);
73427 +               attr = mi_find_attr(mi, NULL, type, name, name_len, id);
73428 +               if (!attr)
73429 +                       return -ENOENT;
73431 +               mi_remove_attr(mi, attr);
73433 +               if (PtrOffset(ni->attr_list.le, le) >= ni->attr_list.size)
73434 +                       return 0;
73435 +               goto next_le2;
73436 +       }
73440 + * ni_ins_new_attr
73441 + *
73442 + * inserts the attribute into record
73443 + * Returns not full constructed attribute or NULL if not possible to create
73444 + */
73445 +static struct ATTRIB *ni_ins_new_attr(struct ntfs_inode *ni,
73446 +                                     struct mft_inode *mi,
73447 +                                     struct ATTR_LIST_ENTRY *le,
73448 +                                     enum ATTR_TYPE type, const __le16 *name,
73449 +                                     u8 name_len, u32 asize, u16 name_off,
73450 +                                     CLST svcn)
73452 +       int err;
73453 +       struct ATTRIB *attr;
73454 +       bool le_added = false;
73455 +       struct MFT_REF ref;
73457 +       mi_get_ref(mi, &ref);
73459 +       if (type != ATTR_LIST && !le && ni->attr_list.size) {
73460 +               err = al_add_le(ni, type, name, name_len, svcn, cpu_to_le16(-1),
73461 +                               &ref, &le);
73462 +               if (err) {
73463 +                       /* no memory or no space */
73464 +                       return NULL;
73465 +               }
73466 +               le_added = true;
73468 +               /*
73469 +                * al_add_le -> attr_set_size (list) -> ni_expand_list
73470 +                * which moves some attributes out of primary record
73471 +                * this means that name may point into moved memory
73472 +                * reinit 'name' from le
73473 +                */
73474 +               name = le->name;
73475 +       }
73477 +       attr = mi_insert_attr(mi, type, name, name_len, asize, name_off);
73478 +       if (!attr) {
73479 +               if (le_added)
73480 +                       al_remove_le(ni, le);
73481 +               return NULL;
73482 +       }
73484 +       if (type == ATTR_LIST) {
73485 +               /*attr list is not in list entry array*/
73486 +               goto out;
73487 +       }
73489 +       if (!le)
73490 +               goto out;
73492 +       /* Update ATTRIB Id and record reference */
73493 +       le->id = attr->id;
73494 +       ni->attr_list.dirty = true;
73495 +       le->ref = ref;
73497 +out:
73498 +       return attr;
73502 + * random write access to sparsed or compressed file may result to
73503 + * not optimized packed runs.
73504 + * Here it is the place to optimize it
73505 + */
73506 +static int ni_repack(struct ntfs_inode *ni)
73508 +       int err = 0;
73509 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
73510 +       struct mft_inode *mi, *mi_p = NULL;
73511 +       struct ATTRIB *attr = NULL, *attr_p;
73512 +       struct ATTR_LIST_ENTRY *le = NULL, *le_p;
73513 +       CLST alloc = 0;
73514 +       u8 cluster_bits = sbi->cluster_bits;
73515 +       CLST svcn, evcn = 0, svcn_p, evcn_p, next_svcn;
73516 +       u32 roff, rs = sbi->record_size;
73517 +       struct runs_tree run;
73519 +       run_init(&run);
73521 +       while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi))) {
73522 +               if (!attr->non_res)
73523 +                       continue;
73525 +               svcn = le64_to_cpu(attr->nres.svcn);
73526 +               if (svcn != le64_to_cpu(le->vcn)) {
73527 +                       err = -EINVAL;
73528 +                       break;
73529 +               }
73531 +               if (!svcn) {
73532 +                       alloc = le64_to_cpu(attr->nres.alloc_size) >>
73533 +                               cluster_bits;
73534 +                       mi_p = NULL;
73535 +               } else if (svcn != evcn + 1) {
73536 +                       err = -EINVAL;
73537 +                       break;
73538 +               }
73540 +               evcn = le64_to_cpu(attr->nres.evcn);
73542 +               if (svcn > evcn + 1) {
73543 +                       err = -EINVAL;
73544 +                       break;
73545 +               }
73547 +               if (!mi_p) {
73548 +                       /* do not try if too little free space */
73549 +                       if (le32_to_cpu(mi->mrec->used) + 8 >= rs)
73550 +                               continue;
73552 +                       /* do not try if last attribute segment */
73553 +                       if (evcn + 1 == alloc)
73554 +                               continue;
73555 +                       run_close(&run);
73556 +               }
73558 +               roff = le16_to_cpu(attr->nres.run_off);
73559 +               err = run_unpack(&run, sbi, ni->mi.rno, svcn, evcn, svcn,
73560 +                                Add2Ptr(attr, roff),
73561 +                                le32_to_cpu(attr->size) - roff);
73562 +               if (err < 0)
73563 +                       break;
73565 +               if (!mi_p) {
73566 +                       mi_p = mi;
73567 +                       attr_p = attr;
73568 +                       svcn_p = svcn;
73569 +                       evcn_p = evcn;
73570 +                       le_p = le;
73571 +                       err = 0;
73572 +                       continue;
73573 +               }
73575 +               /*
73576 +                * run contains data from two records: mi_p and mi
73577 +                * try to pack in one
73578 +                */
73579 +               err = mi_pack_runs(mi_p, attr_p, &run, evcn + 1 - svcn_p);
73580 +               if (err)
73581 +                       break;
73583 +               next_svcn = le64_to_cpu(attr_p->nres.evcn) + 1;
73585 +               if (next_svcn >= evcn + 1) {
73586 +                       /* we can remove this attribute segment */
73587 +                       al_remove_le(ni, le);
73588 +                       mi_remove_attr(mi, attr);
73589 +                       le = le_p;
73590 +                       continue;
73591 +               }
73593 +               attr->nres.svcn = le->vcn = cpu_to_le64(next_svcn);
73594 +               mi->dirty = true;
73595 +               ni->attr_list.dirty = true;
73597 +               if (evcn + 1 == alloc) {
73598 +                       err = mi_pack_runs(mi, attr, &run,
73599 +                                          evcn + 1 - next_svcn);
73600 +                       if (err)
73601 +                               break;
73602 +                       mi_p = NULL;
73603 +               } else {
73604 +                       mi_p = mi;
73605 +                       attr_p = attr;
73606 +                       svcn_p = next_svcn;
73607 +                       evcn_p = evcn;
73608 +                       le_p = le;
73609 +                       run_truncate_head(&run, next_svcn);
73610 +               }
73611 +       }
73613 +       if (err) {
73614 +               ntfs_inode_warn(&ni->vfs_inode, "repack problem");
73615 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
73617 +               /* Pack loaded but not packed runs */
73618 +               if (mi_p)
73619 +                       mi_pack_runs(mi_p, attr_p, &run, evcn_p + 1 - svcn_p);
73620 +       }
73622 +       run_close(&run);
73623 +       return err;
73627 + * ni_try_remove_attr_list
73628 + *
73629 + * Can we remove attribute list?
73630 + * Check the case when primary record contains enough space for all attributes
73631 + */
73632 +static int ni_try_remove_attr_list(struct ntfs_inode *ni)
73634 +       int err = 0;
73635 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
73636 +       struct ATTRIB *attr, *attr_list, *attr_ins;
73637 +       struct ATTR_LIST_ENTRY *le;
73638 +       struct mft_inode *mi;
73639 +       u32 asize, free;
73640 +       struct MFT_REF ref;
73641 +       __le16 id;
73643 +       if (!ni->attr_list.dirty)
73644 +               return 0;
73646 +       err = ni_repack(ni);
73647 +       if (err)
73648 +               return err;
73650 +       attr_list = mi_find_attr(&ni->mi, NULL, ATTR_LIST, NULL, 0, NULL);
73651 +       if (!attr_list)
73652 +               return 0;
73654 +       asize = le32_to_cpu(attr_list->size);
73656 +       /* free space in primary record without attribute list */
73657 +       free = sbi->record_size - le32_to_cpu(ni->mi.mrec->used) + asize;
73658 +       mi_get_ref(&ni->mi, &ref);
73660 +       le = NULL;
73661 +       while ((le = al_enumerate(ni, le))) {
73662 +               if (!memcmp(&le->ref, &ref, sizeof(ref)))
73663 +                       continue;
73665 +               if (le->vcn)
73666 +                       return 0;
73668 +               mi = ni_find_mi(ni, ino_get(&le->ref));
73669 +               if (!mi)
73670 +                       return 0;
73672 +               attr = mi_find_attr(mi, NULL, le->type, le_name(le),
73673 +                                   le->name_len, &le->id);
73674 +               if (!attr)
73675 +                       return 0;
73677 +               asize = le32_to_cpu(attr->size);
73678 +               if (asize > free)
73679 +                       return 0;
73681 +               free -= asize;
73682 +       }
73684 +       /* Is seems that attribute list can be removed from primary record */
73685 +       mi_remove_attr(&ni->mi, attr_list);
73687 +       /*
73688 +        * Repeat the cycle above and move all attributes to primary record.
73689 +        * It should be success!
73690 +        */
73691 +       le = NULL;
73692 +       while ((le = al_enumerate(ni, le))) {
73693 +               if (!memcmp(&le->ref, &ref, sizeof(ref)))
73694 +                       continue;
73696 +               mi = ni_find_mi(ni, ino_get(&le->ref));
73698 +               attr = mi_find_attr(mi, NULL, le->type, le_name(le),
73699 +                                   le->name_len, &le->id);
73700 +               asize = le32_to_cpu(attr->size);
73702 +               /* insert into primary record */
73703 +               attr_ins = mi_insert_attr(&ni->mi, le->type, le_name(le),
73704 +                                         le->name_len, asize,
73705 +                                         le16_to_cpu(attr->name_off));
73706 +               id = attr_ins->id;
73708 +               /* copy all except id */
73709 +               memcpy(attr_ins, attr, asize);
73710 +               attr_ins->id = id;
73712 +               /* remove from original record */
73713 +               mi_remove_attr(mi, attr);
73714 +       }
73716 +       run_deallocate(sbi, &ni->attr_list.run, true);
73717 +       run_close(&ni->attr_list.run);
73718 +       ni->attr_list.size = 0;
73719 +       ntfs_free(ni->attr_list.le);
73720 +       ni->attr_list.le = NULL;
73721 +       ni->attr_list.dirty = false;
73723 +       return 0;
73727 + * ni_create_attr_list
73728 + *
73729 + * generates an attribute list for this primary record
73730 + */
73731 +int ni_create_attr_list(struct ntfs_inode *ni)
73733 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
73734 +       int err;
73735 +       u32 lsize;
73736 +       struct ATTRIB *attr;
73737 +       struct ATTRIB *arr_move[7];
73738 +       struct ATTR_LIST_ENTRY *le, *le_b[7];
73739 +       struct MFT_REC *rec;
73740 +       bool is_mft;
73741 +       CLST rno = 0;
73742 +       struct mft_inode *mi;
73743 +       u32 free_b, nb, to_free, rs;
73744 +       u16 sz;
73746 +       is_mft = ni->mi.rno == MFT_REC_MFT;
73747 +       rec = ni->mi.mrec;
73748 +       rs = sbi->record_size;
73750 +       /*
73751 +        * Skip estimating exact memory requirement
73752 +        * Looks like one record_size is always enough
73753 +        */
73754 +       le = ntfs_malloc(al_aligned(rs));
73755 +       if (!le) {
73756 +               err = -ENOMEM;
73757 +               goto out;
73758 +       }
73760 +       mi_get_ref(&ni->mi, &le->ref);
73761 +       ni->attr_list.le = le;
73763 +       attr = NULL;
73764 +       nb = 0;
73765 +       free_b = 0;
73766 +       attr = NULL;
73768 +       for (; (attr = mi_enum_attr(&ni->mi, attr)); le = Add2Ptr(le, sz)) {
73769 +               sz = le_size(attr->name_len);
73770 +               le->type = attr->type;
73771 +               le->size = cpu_to_le16(sz);
73772 +               le->name_len = attr->name_len;
73773 +               le->name_off = offsetof(struct ATTR_LIST_ENTRY, name);
73774 +               le->vcn = 0;
73775 +               if (le != ni->attr_list.le)
73776 +                       le->ref = ni->attr_list.le->ref;
73777 +               le->id = attr->id;
73779 +               if (attr->name_len)
73780 +                       memcpy(le->name, attr_name(attr),
73781 +                              sizeof(short) * attr->name_len);
73782 +               else if (attr->type == ATTR_STD)
73783 +                       continue;
73784 +               else if (attr->type == ATTR_LIST)
73785 +                       continue;
73786 +               else if (is_mft && attr->type == ATTR_DATA)
73787 +                       continue;
73789 +               if (!nb || nb < ARRAY_SIZE(arr_move)) {
73790 +                       le_b[nb] = le;
73791 +                       arr_move[nb++] = attr;
73792 +                       free_b += le32_to_cpu(attr->size);
73793 +               }
73794 +       }
73796 +       lsize = PtrOffset(ni->attr_list.le, le);
73797 +       ni->attr_list.size = lsize;
73799 +       to_free = le32_to_cpu(rec->used) + lsize + SIZEOF_RESIDENT;
73800 +       if (to_free <= rs) {
73801 +               to_free = 0;
73802 +       } else {
73803 +               to_free -= rs;
73805 +               if (to_free > free_b) {
73806 +                       err = -EINVAL;
73807 +                       goto out1;
73808 +               }
73809 +       }
73811 +       /* Allocate child mft. */
73812 +       err = ntfs_look_free_mft(sbi, &rno, is_mft, ni, &mi);
73813 +       if (err)
73814 +               goto out1;
73816 +       /* Call 'mi_remove_attr' in reverse order to keep pointers 'arr_move' valid */
73817 +       while (to_free > 0) {
73818 +               struct ATTRIB *b = arr_move[--nb];
73819 +               u32 asize = le32_to_cpu(b->size);
73820 +               u16 name_off = le16_to_cpu(b->name_off);
73822 +               attr = mi_insert_attr(mi, b->type, Add2Ptr(b, name_off),
73823 +                                     b->name_len, asize, name_off);
73824 +               WARN_ON(!attr);
73826 +               mi_get_ref(mi, &le_b[nb]->ref);
73827 +               le_b[nb]->id = attr->id;
73829 +               /* copy all except id */
73830 +               memcpy(attr, b, asize);
73831 +               attr->id = le_b[nb]->id;
73833 +               WARN_ON(!mi_remove_attr(&ni->mi, b));
73835 +               if (to_free <= asize)
73836 +                       break;
73837 +               to_free -= asize;
73838 +               WARN_ON(!nb);
73839 +       }
73841 +       attr = mi_insert_attr(&ni->mi, ATTR_LIST, NULL, 0,
73842 +                             lsize + SIZEOF_RESIDENT, SIZEOF_RESIDENT);
73843 +       WARN_ON(!attr);
73845 +       attr->non_res = 0;
73846 +       attr->flags = 0;
73847 +       attr->res.data_size = cpu_to_le32(lsize);
73848 +       attr->res.data_off = SIZEOF_RESIDENT_LE;
73849 +       attr->res.flags = 0;
73850 +       attr->res.res = 0;
73852 +       memcpy(resident_data_ex(attr, lsize), ni->attr_list.le, lsize);
73854 +       ni->attr_list.dirty = false;
73856 +       mark_inode_dirty(&ni->vfs_inode);
73857 +       goto out;
73859 +out1:
73860 +       ntfs_free(ni->attr_list.le);
73861 +       ni->attr_list.le = NULL;
73862 +       ni->attr_list.size = 0;
73864 +out:
73865 +       return err;
73869 + * ni_ins_attr_ext
73870 + *
73871 + * This method adds an external attribute to the ntfs_inode.
73872 + */
73873 +static int ni_ins_attr_ext(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
73874 +                          enum ATTR_TYPE type, const __le16 *name, u8 name_len,
73875 +                          u32 asize, CLST svcn, u16 name_off, bool force_ext,
73876 +                          struct ATTRIB **ins_attr, struct mft_inode **ins_mi)
73878 +       struct ATTRIB *attr;
73879 +       struct mft_inode *mi;
73880 +       CLST rno;
73881 +       u64 vbo;
73882 +       struct rb_node *node;
73883 +       int err;
73884 +       bool is_mft, is_mft_data;
73885 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
73887 +       is_mft = ni->mi.rno == MFT_REC_MFT;
73888 +       is_mft_data = is_mft && type == ATTR_DATA && !name_len;
73890 +       if (asize > sbi->max_bytes_per_attr) {
73891 +               err = -EINVAL;
73892 +               goto out;
73893 +       }
73895 +       /*
73896 +        * standard information and attr_list cannot be made external.
73897 +        * The Log File cannot have any external attributes
73898 +        */
73899 +       if (type == ATTR_STD || type == ATTR_LIST ||
73900 +           ni->mi.rno == MFT_REC_LOG) {
73901 +               err = -EINVAL;
73902 +               goto out;
73903 +       }
73905 +       /* Create attribute list if it is not already existed */
73906 +       if (!ni->attr_list.size) {
73907 +               err = ni_create_attr_list(ni);
73908 +               if (err)
73909 +                       goto out;
73910 +       }
73912 +       vbo = is_mft_data ? ((u64)svcn << sbi->cluster_bits) : 0;
73914 +       if (force_ext)
73915 +               goto insert_ext;
73917 +       /* Load all subrecords into memory. */
73918 +       err = ni_load_all_mi(ni);
73919 +       if (err)
73920 +               goto out;
73922 +       /* Check each of loaded subrecord */
73923 +       for (node = rb_first(&ni->mi_tree); node; node = rb_next(node)) {
73924 +               mi = rb_entry(node, struct mft_inode, node);
73926 +               if (is_mft_data &&
73927 +                   (mi_enum_attr(mi, NULL) ||
73928 +                    vbo <= ((u64)mi->rno << sbi->record_bits))) {
73929 +                       /* We can't accept this record 'case MFT's bootstrapping */
73930 +                       continue;
73931 +               }
73932 +               if (is_mft &&
73933 +                   mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, NULL)) {
73934 +                       /*
73935 +                        * This child record already has a ATTR_DATA.
73936 +                        * So it can't accept any other records.
73937 +                        */
73938 +                       continue;
73939 +               }
73941 +               if ((type != ATTR_NAME || name_len) &&
73942 +                   mi_find_attr(mi, NULL, type, name, name_len, NULL)) {
73943 +                       /* Only indexed attributes can share same record */
73944 +                       continue;
73945 +               }
73947 +               /* Try to insert attribute into this subrecord */
73948 +               attr = ni_ins_new_attr(ni, mi, le, type, name, name_len, asize,
73949 +                                      name_off, svcn);
73950 +               if (!attr)
73951 +                       continue;
73953 +               if (ins_attr)
73954 +                       *ins_attr = attr;
73955 +               return 0;
73956 +       }
73958 +insert_ext:
73959 +       /* We have to allocate a new child subrecord*/
73960 +       err = ntfs_look_free_mft(sbi, &rno, is_mft_data, ni, &mi);
73961 +       if (err)
73962 +               goto out;
73964 +       if (is_mft_data && vbo <= ((u64)rno << sbi->record_bits)) {
73965 +               err = -EINVAL;
73966 +               goto out1;
73967 +       }
73969 +       attr = ni_ins_new_attr(ni, mi, le, type, name, name_len, asize,
73970 +                              name_off, svcn);
73971 +       if (!attr)
73972 +               goto out2;
73974 +       if (ins_attr)
73975 +               *ins_attr = attr;
73976 +       if (ins_mi)
73977 +               *ins_mi = mi;
73979 +       return 0;
73981 +out2:
73982 +       ni_remove_mi(ni, mi);
73983 +       mi_put(mi);
73984 +       err = -EINVAL;
73986 +out1:
73987 +       ntfs_mark_rec_free(sbi, rno);
73989 +out:
73990 +       return err;
73994 + * ni_insert_attr
73995 + *
73996 + * inserts an attribute into the file.
73997 + *
73998 + * If the primary record has room, it will just insert the attribute.
73999 + * If not, it may make the attribute external.
74000 + * For $MFT::Data it may make room for the attribute by
74001 + * making other attributes external.
74002 + *
74003 + * NOTE:
74004 + * The ATTR_LIST and ATTR_STD cannot be made external.
74005 + * This function does not fill new attribute full
74006 + * It only fills 'size'/'type'/'id'/'name_len' fields
74007 + */
74008 +static int ni_insert_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
74009 +                         const __le16 *name, u8 name_len, u32 asize,
74010 +                         u16 name_off, CLST svcn, struct ATTRIB **ins_attr,
74011 +                         struct mft_inode **ins_mi)
74013 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
74014 +       int err;
74015 +       struct ATTRIB *attr, *eattr;
74016 +       struct MFT_REC *rec;
74017 +       bool is_mft;
74018 +       struct ATTR_LIST_ENTRY *le;
74019 +       u32 list_reserve, max_free, free, used, t32;
74020 +       __le16 id;
74021 +       u16 t16;
74023 +       is_mft = ni->mi.rno == MFT_REC_MFT;
74024 +       rec = ni->mi.mrec;
74026 +       list_reserve = SIZEOF_NONRESIDENT + 3 * (1 + 2 * sizeof(u32));
74027 +       used = le32_to_cpu(rec->used);
74028 +       free = sbi->record_size - used;
74030 +       if (is_mft && type != ATTR_LIST) {
74031 +               /* Reserve space for the ATTRIB List. */
74032 +               if (free < list_reserve)
74033 +                       free = 0;
74034 +               else
74035 +                       free -= list_reserve;
74036 +       }
74038 +       if (asize <= free) {
74039 +               attr = ni_ins_new_attr(ni, &ni->mi, NULL, type, name, name_len,
74040 +                                      asize, name_off, svcn);
74041 +               if (attr) {
74042 +                       if (ins_attr)
74043 +                               *ins_attr = attr;
74044 +                       if (ins_mi)
74045 +                               *ins_mi = &ni->mi;
74046 +                       err = 0;
74047 +                       goto out;
74048 +               }
74049 +       }
74051 +       if (!is_mft || type != ATTR_DATA || svcn) {
74052 +               /* This ATTRIB will be external. */
74053 +               err = ni_ins_attr_ext(ni, NULL, type, name, name_len, asize,
74054 +                                     svcn, name_off, false, ins_attr, ins_mi);
74055 +               goto out;
74056 +       }
74058 +       /*
74059 +        * Here we have: "is_mft && type == ATTR_DATA && !svcn
74060 +        *
74061 +        * The first chunk of the $MFT::Data ATTRIB must be the base record.
74062 +        * Evict as many other attributes as possible.
74063 +        */
74064 +       max_free = free;
74066 +       /* Estimate the result of moving all possible attributes away.*/
74067 +       attr = NULL;
74069 +       while ((attr = mi_enum_attr(&ni->mi, attr))) {
74070 +               if (attr->type == ATTR_STD)
74071 +                       continue;
74072 +               if (attr->type == ATTR_LIST)
74073 +                       continue;
74074 +               max_free += le32_to_cpu(attr->size);
74075 +       }
74077 +       if (max_free < asize + list_reserve) {
74078 +               /* Impossible to insert this attribute into primary record */
74079 +               err = -EINVAL;
74080 +               goto out;
74081 +       }
74083 +       /* Start real attribute moving */
74084 +       attr = NULL;
74086 +       for (;;) {
74087 +               attr = mi_enum_attr(&ni->mi, attr);
74088 +               if (!attr) {
74089 +                       /* We should never be here 'cause we have already check this case */
74090 +                       err = -EINVAL;
74091 +                       goto out;
74092 +               }
74094 +               /* Skip attributes that MUST be primary record */
74095 +               if (attr->type == ATTR_STD || attr->type == ATTR_LIST)
74096 +                       continue;
74098 +               le = NULL;
74099 +               if (ni->attr_list.size) {
74100 +                       le = al_find_le(ni, NULL, attr);
74101 +                       if (!le) {
74102 +                               /* Really this is a serious bug */
74103 +                               err = -EINVAL;
74104 +                               goto out;
74105 +                       }
74106 +               }
74108 +               t32 = le32_to_cpu(attr->size);
74109 +               t16 = le16_to_cpu(attr->name_off);
74110 +               err = ni_ins_attr_ext(ni, le, attr->type, Add2Ptr(attr, t16),
74111 +                                     attr->name_len, t32, attr_svcn(attr), t16,
74112 +                                     false, &eattr, NULL);
74113 +               if (err)
74114 +                       return err;
74116 +               id = eattr->id;
74117 +               memcpy(eattr, attr, t32);
74118 +               eattr->id = id;
74120 +               /* remove attrib from primary record */
74121 +               mi_remove_attr(&ni->mi, attr);
74123 +               /* attr now points to next attribute */
74124 +               if (attr->type == ATTR_END)
74125 +                       goto out;
74126 +       }
74127 +       while (asize + list_reserve > sbi->record_size - le32_to_cpu(rec->used))
74128 +               ;
74130 +       attr = ni_ins_new_attr(ni, &ni->mi, NULL, type, name, name_len, asize,
74131 +                              name_off, svcn);
74132 +       if (!attr) {
74133 +               err = -EINVAL;
74134 +               goto out;
74135 +       }
74137 +       if (ins_attr)
74138 +               *ins_attr = attr;
74139 +       if (ins_mi)
74140 +               *ins_mi = &ni->mi;
74142 +out:
74143 +       return err;
74147 + * ni_expand_mft_list
74148 + *
74149 + * This method splits ATTR_DATA of $MFT
74150 + */
74151 +static int ni_expand_mft_list(struct ntfs_inode *ni)
74153 +       int err = 0;
74154 +       struct runs_tree *run = &ni->file.run;
74155 +       u32 asize, run_size, done = 0;
74156 +       struct ATTRIB *attr;
74157 +       struct rb_node *node;
74158 +       CLST mft_min, mft_new, svcn, evcn, plen;
74159 +       struct mft_inode *mi, *mi_min, *mi_new;
74160 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
74162 +       /* Find the nearest Mft */
74163 +       mft_min = 0;
74164 +       mft_new = 0;
74165 +       mi_min = NULL;
74167 +       for (node = rb_first(&ni->mi_tree); node; node = rb_next(node)) {
74168 +               mi = rb_entry(node, struct mft_inode, node);
74170 +               attr = mi_enum_attr(mi, NULL);
74172 +               if (!attr) {
74173 +                       mft_min = mi->rno;
74174 +                       mi_min = mi;
74175 +                       break;
74176 +               }
74177 +       }
74179 +       if (ntfs_look_free_mft(sbi, &mft_new, true, ni, &mi_new)) {
74180 +               mft_new = 0;
74181 +               // really this is not critical
74182 +       } else if (mft_min > mft_new) {
74183 +               mft_min = mft_new;
74184 +               mi_min = mi_new;
74185 +       } else {
74186 +               ntfs_mark_rec_free(sbi, mft_new);
74187 +               mft_new = 0;
74188 +               ni_remove_mi(ni, mi_new);
74189 +       }
74191 +       attr = mi_find_attr(&ni->mi, NULL, ATTR_DATA, NULL, 0, NULL);
74192 +       if (!attr) {
74193 +               err = -EINVAL;
74194 +               goto out;
74195 +       }
74197 +       asize = le32_to_cpu(attr->size);
74199 +       evcn = le64_to_cpu(attr->nres.evcn);
74200 +       svcn = bytes_to_cluster(sbi, (u64)(mft_min + 1) << sbi->record_bits);
74201 +       if (evcn + 1 >= svcn) {
74202 +               err = -EINVAL;
74203 +               goto out;
74204 +       }
74206 +       /*
74207 +        * split primary attribute [0 evcn] in two parts [0 svcn) + [svcn evcn]
74208 +        *
74209 +        * Update first part of ATTR_DATA in 'primary MFT
74210 +        */
74211 +       err = run_pack(run, 0, svcn, Add2Ptr(attr, SIZEOF_NONRESIDENT),
74212 +                      asize - SIZEOF_NONRESIDENT, &plen);
74213 +       if (err < 0)
74214 +               goto out;
74216 +       run_size = QuadAlign(err);
74217 +       err = 0;
74219 +       if (plen < svcn) {
74220 +               err = -EINVAL;
74221 +               goto out;
74222 +       }
74224 +       attr->nres.evcn = cpu_to_le64(svcn - 1);
74225 +       attr->size = cpu_to_le32(run_size + SIZEOF_NONRESIDENT);
74226 +       /* 'done' - how many bytes of primary MFT becomes free */
74227 +       done = asize - run_size - SIZEOF_NONRESIDENT;
74228 +       le32_sub_cpu(&ni->mi.mrec->used, done);
74230 +       /* Estimate the size of second part: run_buf=NULL */
74231 +       err = run_pack(run, svcn, evcn + 1 - svcn, NULL, sbi->record_size,
74232 +                      &plen);
74233 +       if (err < 0)
74234 +               goto out;
74236 +       run_size = QuadAlign(err);
74237 +       err = 0;
74239 +       if (plen < evcn + 1 - svcn) {
74240 +               err = -EINVAL;
74241 +               goto out;
74242 +       }
74244 +       /*
74245 +        * This function may implicitly call expand attr_list
74246 +        * Insert second part of ATTR_DATA in 'mi_min'
74247 +        */
74248 +       attr = ni_ins_new_attr(ni, mi_min, NULL, ATTR_DATA, NULL, 0,
74249 +                              SIZEOF_NONRESIDENT + run_size,
74250 +                              SIZEOF_NONRESIDENT, svcn);
74251 +       if (!attr) {
74252 +               err = -EINVAL;
74253 +               goto out;
74254 +       }
74256 +       attr->non_res = 1;
74257 +       attr->name_off = SIZEOF_NONRESIDENT_LE;
74258 +       attr->flags = 0;
74260 +       run_pack(run, svcn, evcn + 1 - svcn, Add2Ptr(attr, SIZEOF_NONRESIDENT),
74261 +                run_size, &plen);
74263 +       attr->nres.svcn = cpu_to_le64(svcn);
74264 +       attr->nres.evcn = cpu_to_le64(evcn);
74265 +       attr->nres.run_off = cpu_to_le16(SIZEOF_NONRESIDENT);
74267 +out:
74268 +       if (mft_new) {
74269 +               ntfs_mark_rec_free(sbi, mft_new);
74270 +               ni_remove_mi(ni, mi_new);
74271 +       }
74273 +       return !err && !done ? -EOPNOTSUPP : err;
74277 + * ni_expand_list
74278 + *
74279 + * This method moves all possible attributes out of primary record
74280 + */
74281 +int ni_expand_list(struct ntfs_inode *ni)
74283 +       int err = 0;
74284 +       u32 asize, done = 0;
74285 +       struct ATTRIB *attr, *ins_attr;
74286 +       struct ATTR_LIST_ENTRY *le;
74287 +       bool is_mft = ni->mi.rno == MFT_REC_MFT;
74288 +       struct MFT_REF ref;
74290 +       mi_get_ref(&ni->mi, &ref);
74291 +       le = NULL;
74293 +       while ((le = al_enumerate(ni, le))) {
74294 +               if (le->type == ATTR_STD)
74295 +                       continue;
74297 +               if (memcmp(&ref, &le->ref, sizeof(struct MFT_REF)))
74298 +                       continue;
74300 +               if (is_mft && le->type == ATTR_DATA)
74301 +                       continue;
74303 +               /* Find attribute in primary record */
74304 +               attr = rec_find_attr_le(&ni->mi, le);
74305 +               if (!attr) {
74306 +                       err = -EINVAL;
74307 +                       goto out;
74308 +               }
74310 +               asize = le32_to_cpu(attr->size);
74312 +               /* Always insert into new record to avoid collisions (deep recursive) */
74313 +               err = ni_ins_attr_ext(ni, le, attr->type, attr_name(attr),
74314 +                                     attr->name_len, asize, attr_svcn(attr),
74315 +                                     le16_to_cpu(attr->name_off), true,
74316 +                                     &ins_attr, NULL);
74318 +               if (err)
74319 +                       goto out;
74321 +               memcpy(ins_attr, attr, asize);
74322 +               ins_attr->id = le->id;
74323 +               mi_remove_attr(&ni->mi, attr);
74325 +               done += asize;
74326 +               goto out;
74327 +       }
74329 +       if (!is_mft) {
74330 +               err = -EFBIG; /* attr list is too big(?) */
74331 +               goto out;
74332 +       }
74334 +       /* split mft data as much as possible */
74335 +       err = ni_expand_mft_list(ni);
74336 +       if (err)
74337 +               goto out;
74339 +out:
74340 +       return !err && !done ? -EOPNOTSUPP : err;
74344 + * ni_insert_nonresident
74345 + *
74346 + * inserts new nonresident attribute
74347 + */
74348 +int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
74349 +                         const __le16 *name, u8 name_len,
74350 +                         const struct runs_tree *run, CLST svcn, CLST len,
74351 +                         __le16 flags, struct ATTRIB **new_attr,
74352 +                         struct mft_inode **mi)
74354 +       int err;
74355 +       CLST plen;
74356 +       struct ATTRIB *attr;
74357 +       bool is_ext =
74358 +               (flags & (ATTR_FLAG_SPARSED | ATTR_FLAG_COMPRESSED)) && !svcn;
74359 +       u32 name_size = QuadAlign(name_len * sizeof(short));
74360 +       u32 name_off = is_ext ? SIZEOF_NONRESIDENT_EX : SIZEOF_NONRESIDENT;
74361 +       u32 run_off = name_off + name_size;
74362 +       u32 run_size, asize;
74363 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
74365 +       err = run_pack(run, svcn, len, NULL, sbi->max_bytes_per_attr - run_off,
74366 +                      &plen);
74367 +       if (err < 0)
74368 +               goto out;
74370 +       run_size = QuadAlign(err);
74372 +       if (plen < len) {
74373 +               err = -EINVAL;
74374 +               goto out;
74375 +       }
74377 +       asize = run_off + run_size;
74379 +       if (asize > sbi->max_bytes_per_attr) {
74380 +               err = -EINVAL;
74381 +               goto out;
74382 +       }
74384 +       err = ni_insert_attr(ni, type, name, name_len, asize, name_off, svcn,
74385 +                            &attr, mi);
74387 +       if (err)
74388 +               goto out;
74390 +       attr->non_res = 1;
74391 +       attr->name_off = cpu_to_le16(name_off);
74392 +       attr->flags = flags;
74394 +       run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_size, &plen);
74396 +       attr->nres.svcn = cpu_to_le64(svcn);
74397 +       attr->nres.evcn = cpu_to_le64((u64)svcn + len - 1);
74399 +       err = 0;
74400 +       if (new_attr)
74401 +               *new_attr = attr;
74403 +       *(__le64 *)&attr->nres.run_off = cpu_to_le64(run_off);
74405 +       attr->nres.alloc_size =
74406 +               svcn ? 0 : cpu_to_le64((u64)len << ni->mi.sbi->cluster_bits);
74407 +       attr->nres.data_size = attr->nres.alloc_size;
74408 +       attr->nres.valid_size = attr->nres.alloc_size;
74410 +       if (is_ext) {
74411 +               if (flags & ATTR_FLAG_COMPRESSED)
74412 +                       attr->nres.c_unit = COMPRESSION_UNIT;
74413 +               attr->nres.total_size = attr->nres.alloc_size;
74414 +       }
74416 +out:
74417 +       return err;
74421 + * ni_insert_resident
74422 + *
74423 + * inserts new resident attribute
74424 + */
74425 +int ni_insert_resident(struct ntfs_inode *ni, u32 data_size,
74426 +                      enum ATTR_TYPE type, const __le16 *name, u8 name_len,
74427 +                      struct ATTRIB **new_attr, struct mft_inode **mi)
74429 +       int err;
74430 +       u32 name_size = QuadAlign(name_len * sizeof(short));
74431 +       u32 asize = SIZEOF_RESIDENT + name_size + QuadAlign(data_size);
74432 +       struct ATTRIB *attr;
74434 +       err = ni_insert_attr(ni, type, name, name_len, asize, SIZEOF_RESIDENT,
74435 +                            0, &attr, mi);
74436 +       if (err)
74437 +               return err;
74439 +       attr->non_res = 0;
74440 +       attr->flags = 0;
74442 +       attr->res.data_size = cpu_to_le32(data_size);
74443 +       attr->res.data_off = cpu_to_le16(SIZEOF_RESIDENT + name_size);
74444 +       if (type == ATTR_NAME)
74445 +               attr->res.flags = RESIDENT_FLAG_INDEXED;
74446 +       attr->res.res = 0;
74448 +       if (new_attr)
74449 +               *new_attr = attr;
74451 +       return 0;
74455 + * ni_remove_attr_le
74456 + *
74457 + * removes attribute from record
74458 + */
74459 +int ni_remove_attr_le(struct ntfs_inode *ni, struct ATTRIB *attr,
74460 +                     struct ATTR_LIST_ENTRY *le)
74462 +       int err;
74463 +       struct mft_inode *mi;
74465 +       err = ni_load_mi(ni, le, &mi);
74466 +       if (err)
74467 +               return err;
74469 +       mi_remove_attr(mi, attr);
74471 +       if (le)
74472 +               al_remove_le(ni, le);
74474 +       return 0;
74478 + * ni_delete_all
74479 + *
74480 + * removes all attributes and frees allocates space
74481 + * ntfs_evict_inode->ntfs_clear_inode->ni_delete_all (if no links)
74482 + */
74483 +int ni_delete_all(struct ntfs_inode *ni)
74485 +       int err;
74486 +       struct ATTR_LIST_ENTRY *le = NULL;
74487 +       struct ATTRIB *attr = NULL;
74488 +       struct rb_node *node;
74489 +       u16 roff;
74490 +       u32 asize;
74491 +       CLST svcn, evcn;
74492 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
74493 +       bool nt3 = is_ntfs3(sbi);
74494 +       struct MFT_REF ref;
74496 +       while ((attr = ni_enum_attr_ex(ni, attr, &le, NULL))) {
74497 +               if (!nt3 || attr->name_len) {
74498 +                       ;
74499 +               } else if (attr->type == ATTR_REPARSE) {
74500 +                       mi_get_ref(&ni->mi, &ref);
74501 +                       ntfs_remove_reparse(sbi, 0, &ref);
74502 +               } else if (attr->type == ATTR_ID && !attr->non_res &&
74503 +                          le32_to_cpu(attr->res.data_size) >=
74504 +                                  sizeof(struct GUID)) {
74505 +                       ntfs_objid_remove(sbi, resident_data(attr));
74506 +               }
74508 +               if (!attr->non_res)
74509 +                       continue;
74511 +               svcn = le64_to_cpu(attr->nres.svcn);
74512 +               evcn = le64_to_cpu(attr->nres.evcn);
74514 +               if (evcn + 1 <= svcn)
74515 +                       continue;
74517 +               asize = le32_to_cpu(attr->size);
74518 +               roff = le16_to_cpu(attr->nres.run_off);
74520 +               /*run==1 means unpack and deallocate*/
74521 +               run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
74522 +                             Add2Ptr(attr, roff), asize - roff);
74523 +       }
74525 +       if (ni->attr_list.size) {
74526 +               run_deallocate(ni->mi.sbi, &ni->attr_list.run, true);
74527 +               al_destroy(ni);
74528 +       }
74530 +       /* Free all subrecords */
74531 +       for (node = rb_first(&ni->mi_tree); node;) {
74532 +               struct rb_node *next = rb_next(node);
74533 +               struct mft_inode *mi = rb_entry(node, struct mft_inode, node);
74535 +               clear_rec_inuse(mi->mrec);
74536 +               mi->dirty = true;
74537 +               mi_write(mi, 0);
74539 +               ntfs_mark_rec_free(sbi, mi->rno);
74540 +               ni_remove_mi(ni, mi);
74541 +               mi_put(mi);
74542 +               node = next;
74543 +       }
74545 +       // Free base record
74546 +       clear_rec_inuse(ni->mi.mrec);
74547 +       ni->mi.dirty = true;
74548 +       err = mi_write(&ni->mi, 0);
74550 +       ntfs_mark_rec_free(sbi, ni->mi.rno);
74552 +       return err;
74556 + * ni_fname_name
74557 + *
74558 + * returns file name attribute by its value
74559 + */
74560 +struct ATTR_FILE_NAME *ni_fname_name(struct ntfs_inode *ni,
74561 +                                    const struct cpu_str *uni,
74562 +                                    const struct MFT_REF *home_dir,
74563 +                                    struct ATTR_LIST_ENTRY **le)
74565 +       struct ATTRIB *attr = NULL;
74566 +       struct ATTR_FILE_NAME *fname;
74568 +       *le = NULL;
74570 +       /* Enumerate all names */
74571 +next:
74572 +       attr = ni_find_attr(ni, attr, le, ATTR_NAME, NULL, 0, NULL, NULL);
74573 +       if (!attr)
74574 +               return NULL;
74576 +       fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
74577 +       if (!fname)
74578 +               goto next;
74580 +       if (home_dir && memcmp(home_dir, &fname->home, sizeof(*home_dir)))
74581 +               goto next;
74583 +       if (!uni)
74584 +               goto next;
74586 +       if (uni->len != fname->name_len)
74587 +               goto next;
74589 +       if (ntfs_cmp_names_cpu(uni, (struct le_str *)&fname->name_len, NULL,
74590 +                              false))
74591 +               goto next;
74593 +       return fname;
74597 + * ni_fname_type
74598 + *
74599 + * returns file name attribute with given type
74600 + */
74601 +struct ATTR_FILE_NAME *ni_fname_type(struct ntfs_inode *ni, u8 name_type,
74602 +                                    struct ATTR_LIST_ENTRY **le)
74604 +       struct ATTRIB *attr = NULL;
74605 +       struct ATTR_FILE_NAME *fname;
74607 +       *le = NULL;
74609 +       /* Enumerate all names */
74610 +       for (;;) {
74611 +               attr = ni_find_attr(ni, attr, le, ATTR_NAME, NULL, 0, NULL,
74612 +                                   NULL);
74613 +               if (!attr)
74614 +                       return NULL;
74616 +               fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
74617 +               if (fname && name_type == fname->type)
74618 +                       return fname;
74619 +       }
74623 + * Process compressed/sparsed in special way
74624 + * NOTE: you need to set ni->std_fa = new_fa
74625 + * after this function to keep internal structures in consistency
74626 + */
74627 +int ni_new_attr_flags(struct ntfs_inode *ni, enum FILE_ATTRIBUTE new_fa)
74629 +       struct ATTRIB *attr;
74630 +       struct mft_inode *mi;
74631 +       __le16 new_aflags;
74632 +       u32 new_asize;
74634 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
74635 +       if (!attr)
74636 +               return -EINVAL;
74638 +       new_aflags = attr->flags;
74640 +       if (new_fa & FILE_ATTRIBUTE_SPARSE_FILE)
74641 +               new_aflags |= ATTR_FLAG_SPARSED;
74642 +       else
74643 +               new_aflags &= ~ATTR_FLAG_SPARSED;
74645 +       if (new_fa & FILE_ATTRIBUTE_COMPRESSED)
74646 +               new_aflags |= ATTR_FLAG_COMPRESSED;
74647 +       else
74648 +               new_aflags &= ~ATTR_FLAG_COMPRESSED;
74650 +       if (new_aflags == attr->flags)
74651 +               return 0;
74653 +       if ((new_aflags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) ==
74654 +           (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) {
74655 +               ntfs_inode_warn(&ni->vfs_inode,
74656 +                               "file can't be sparsed and compressed");
74657 +               return -EOPNOTSUPP;
74658 +       }
74660 +       if (!attr->non_res)
74661 +               goto out;
74663 +       if (attr->nres.data_size) {
74664 +               ntfs_inode_warn(
74665 +                       &ni->vfs_inode,
74666 +                       "one can change sparsed/compressed only for empty files");
74667 +               return -EOPNOTSUPP;
74668 +       }
74670 +       /* resize nonresident empty attribute in-place only*/
74671 +       new_asize = (new_aflags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED))
74672 +                           ? (SIZEOF_NONRESIDENT_EX + 8)
74673 +                           : (SIZEOF_NONRESIDENT + 8);
74675 +       if (!mi_resize_attr(mi, attr, new_asize - le32_to_cpu(attr->size)))
74676 +               return -EOPNOTSUPP;
74678 +       if (new_aflags & ATTR_FLAG_SPARSED) {
74679 +               attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
74680 +               /* windows uses 16 clusters per frame but supports one cluster per frame too*/
74681 +               attr->nres.c_unit = 0;
74682 +               ni->vfs_inode.i_mapping->a_ops = &ntfs_aops;
74683 +       } else if (new_aflags & ATTR_FLAG_COMPRESSED) {
74684 +               attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
74685 +               /* the only allowed: 16 clusters per frame */
74686 +               attr->nres.c_unit = NTFS_LZNT_CUNIT;
74687 +               ni->vfs_inode.i_mapping->a_ops = &ntfs_aops_cmpr;
74688 +       } else {
74689 +               attr->name_off = SIZEOF_NONRESIDENT_LE;
74690 +               /* normal files */
74691 +               attr->nres.c_unit = 0;
74692 +               ni->vfs_inode.i_mapping->a_ops = &ntfs_aops;
74693 +       }
74694 +       attr->nres.run_off = attr->name_off;
74695 +out:
74696 +       attr->flags = new_aflags;
74697 +       mi->dirty = true;
74699 +       return 0;
74703 + * ni_parse_reparse
74704 + *
74705 + * buffer is at least 24 bytes
74706 + */
74707 +enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
74708 +                                  void *buffer)
74710 +       const struct REPARSE_DATA_BUFFER *rp = NULL;
74711 +       u8 bits;
74712 +       u16 len;
74713 +       typeof(rp->CompressReparseBuffer) *cmpr;
74715 +       static_assert(sizeof(struct REPARSE_DATA_BUFFER) <= 24);
74717 +       /* Try to estimate reparse point */
74718 +       if (!attr->non_res) {
74719 +               rp = resident_data_ex(attr, sizeof(struct REPARSE_DATA_BUFFER));
74720 +       } else if (le64_to_cpu(attr->nres.data_size) >=
74721 +                  sizeof(struct REPARSE_DATA_BUFFER)) {
74722 +               struct runs_tree run;
74724 +               run_init(&run);
74726 +               if (!attr_load_runs_vcn(ni, ATTR_REPARSE, NULL, 0, &run, 0) &&
74727 +                   !ntfs_read_run_nb(ni->mi.sbi, &run, 0, buffer,
74728 +                                     sizeof(struct REPARSE_DATA_BUFFER),
74729 +                                     NULL)) {
74730 +                       rp = buffer;
74731 +               }
74733 +               run_close(&run);
74734 +       }
74736 +       if (!rp)
74737 +               return REPARSE_NONE;
74739 +       len = le16_to_cpu(rp->ReparseDataLength);
74740 +       switch (rp->ReparseTag) {
74741 +       case (IO_REPARSE_TAG_MICROSOFT | IO_REPARSE_TAG_SYMBOLIC_LINK):
74742 +               break; /* Symbolic link */
74743 +       case IO_REPARSE_TAG_MOUNT_POINT:
74744 +               break; /* Mount points and junctions */
74745 +       case IO_REPARSE_TAG_SYMLINK:
74746 +               break;
74747 +       case IO_REPARSE_TAG_COMPRESS:
74748 +               /*
74749 +                * WOF - Windows Overlay Filter - used to compress files with lzx/xpress
74750 +                * Unlike native NTFS file compression, the Windows Overlay Filter supports
74751 +                * only read operations. This means that it doesn’t need to sector-align each
74752 +                * compressed chunk, so the compressed data can be packed more tightly together.
74753 +                * If you open the file for writing, the Windows Overlay Filter just decompresses
74754 +                * the entire file, turning it back into a plain file.
74755 +                *
74756 +                * ntfs3 driver decompresses the entire file only on write or change size requests
74757 +                */
74759 +               cmpr = &rp->CompressReparseBuffer;
74760 +               if (len < sizeof(*cmpr) ||
74761 +                   cmpr->WofVersion != WOF_CURRENT_VERSION ||
74762 +                   cmpr->WofProvider != WOF_PROVIDER_SYSTEM ||
74763 +                   cmpr->ProviderVer != WOF_PROVIDER_CURRENT_VERSION) {
74764 +                       return REPARSE_NONE;
74765 +               }
74767 +               switch (cmpr->CompressionFormat) {
74768 +               case WOF_COMPRESSION_XPRESS4K:
74769 +                       bits = 0xc; // 4k
74770 +                       break;
74771 +               case WOF_COMPRESSION_XPRESS8K:
74772 +                       bits = 0xd; // 8k
74773 +                       break;
74774 +               case WOF_COMPRESSION_XPRESS16K:
74775 +                       bits = 0xe; // 16k
74776 +                       break;
74777 +               case WOF_COMPRESSION_LZX32K:
74778 +                       bits = 0xf; // 32k
74779 +                       break;
74780 +               default:
74781 +                       bits = 0x10; // 64k
74782 +                       break;
74783 +               }
74784 +               ni_set_ext_compress_bits(ni, bits);
74785 +               return REPARSE_COMPRESSED;
74787 +       case IO_REPARSE_TAG_DEDUP:
74788 +               ni->ni_flags |= NI_FLAG_DEDUPLICATED;
74789 +               return REPARSE_DEDUPLICATED;
74791 +       default:
74792 +               if (rp->ReparseTag & IO_REPARSE_TAG_NAME_SURROGATE)
74793 +                       break;
74795 +               return REPARSE_NONE;
74796 +       }
74798 +       /* Looks like normal symlink */
74799 +       return REPARSE_LINK;
74803 + * helper for file_fiemap
74804 + * assumed ni_lock
74805 + * TODO: less aggressive locks
74806 + */
74807 +int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
74808 +             __u64 vbo, __u64 len)
74810 +       int err = 0;
74811 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
74812 +       u8 cluster_bits = sbi->cluster_bits;
74813 +       struct runs_tree *run;
74814 +       struct rw_semaphore *run_lock;
74815 +       struct ATTRIB *attr;
74816 +       CLST vcn = vbo >> cluster_bits;
74817 +       CLST lcn, clen;
74818 +       u64 valid = ni->i_valid;
74819 +       u64 lbo, bytes;
74820 +       u64 end, alloc_size;
74821 +       size_t idx = -1;
74822 +       u32 flags;
74823 +       bool ok;
74825 +       if (S_ISDIR(ni->vfs_inode.i_mode)) {
74826 +               run = &ni->dir.alloc_run;
74827 +               attr = ni_find_attr(ni, NULL, NULL, ATTR_ALLOC, I30_NAME,
74828 +                                   ARRAY_SIZE(I30_NAME), NULL, NULL);
74829 +               run_lock = &ni->dir.run_lock;
74830 +       } else {
74831 +               run = &ni->file.run;
74832 +               attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
74833 +                                   NULL);
74834 +               if (!attr) {
74835 +                       err = -EINVAL;
74836 +                       goto out;
74837 +               }
74838 +               if (is_attr_compressed(attr)) {
74839 +                       /*unfortunately cp -r incorrectly treats compressed clusters*/
74840 +                       err = -EOPNOTSUPP;
74841 +                       ntfs_inode_warn(
74842 +                               &ni->vfs_inode,
74843 +                               "fiemap is not supported for compressed file (cp -r)");
74844 +                       goto out;
74845 +               }
74846 +               run_lock = &ni->file.run_lock;
74847 +       }
74849 +       if (!attr || !attr->non_res) {
74850 +               err = fiemap_fill_next_extent(
74851 +                       fieinfo, 0, 0,
74852 +                       attr ? le32_to_cpu(attr->res.data_size) : 0,
74853 +                       FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_LAST |
74854 +                               FIEMAP_EXTENT_MERGED);
74855 +               goto out;
74856 +       }
74858 +       end = vbo + len;
74859 +       alloc_size = le64_to_cpu(attr->nres.alloc_size);
74860 +       if (end > alloc_size)
74861 +               end = alloc_size;
74863 +       down_read(run_lock);
74865 +       while (vbo < end) {
74866 +               if (idx == -1) {
74867 +                       ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx);
74868 +               } else {
74869 +                       CLST vcn_next = vcn;
74871 +                       ok = run_get_entry(run, ++idx, &vcn, &lcn, &clen) &&
74872 +                            vcn == vcn_next;
74873 +                       if (!ok)
74874 +                               vcn = vcn_next;
74875 +               }
74877 +               if (!ok) {
74878 +                       up_read(run_lock);
74879 +                       down_write(run_lock);
74881 +                       err = attr_load_runs_vcn(ni, attr->type,
74882 +                                                attr_name(attr),
74883 +                                                attr->name_len, run, vcn);
74885 +                       up_write(run_lock);
74886 +                       down_read(run_lock);
74888 +                       if (err)
74889 +                               break;
74891 +                       ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx);
74893 +                       if (!ok) {
74894 +                               err = -EINVAL;
74895 +                               break;
74896 +                       }
74897 +               }
74899 +               if (!clen) {
74900 +                       err = -EINVAL; // ?
74901 +                       break;
74902 +               }
74904 +               if (lcn == SPARSE_LCN) {
74905 +                       vcn += clen;
74906 +                       vbo = (u64)vcn << cluster_bits;
74907 +                       continue;
74908 +               }
74910 +               flags = FIEMAP_EXTENT_MERGED;
74911 +               if (S_ISDIR(ni->vfs_inode.i_mode)) {
74912 +                       ;
74913 +               } else if (is_attr_compressed(attr)) {
74914 +                       CLST clst_data;
74916 +                       err = attr_is_frame_compressed(
74917 +                               ni, attr, vcn >> attr->nres.c_unit, &clst_data);
74918 +                       if (err)
74919 +                               break;
74920 +                       if (clst_data < NTFS_LZNT_CLUSTERS)
74921 +                               flags |= FIEMAP_EXTENT_ENCODED;
74922 +               } else if (is_attr_encrypted(attr)) {
74923 +                       flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
74924 +               }
74926 +               vbo = (u64)vcn << cluster_bits;
74927 +               bytes = (u64)clen << cluster_bits;
74928 +               lbo = (u64)lcn << cluster_bits;
74930 +               vcn += clen;
74932 +               if (vbo + bytes >= end) {
74933 +                       bytes = end - vbo;
74934 +                       flags |= FIEMAP_EXTENT_LAST;
74935 +               }
74937 +               if (vbo + bytes <= valid) {
74938 +                       ;
74939 +               } else if (vbo >= valid) {
74940 +                       flags |= FIEMAP_EXTENT_UNWRITTEN;
74941 +               } else {
74942 +                       /* vbo < valid && valid < vbo + bytes */
74943 +                       u64 dlen = valid - vbo;
74945 +                       err = fiemap_fill_next_extent(fieinfo, vbo, lbo, dlen,
74946 +                                                     flags);
74947 +                       if (err < 0)
74948 +                               break;
74949 +                       if (err == 1) {
74950 +                               err = 0;
74951 +                               break;
74952 +                       }
74954 +                       vbo = valid;
74955 +                       bytes -= dlen;
74956 +                       if (!bytes)
74957 +                               continue;
74959 +                       lbo += dlen;
74960 +                       flags |= FIEMAP_EXTENT_UNWRITTEN;
74961 +               }
74963 +               err = fiemap_fill_next_extent(fieinfo, vbo, lbo, bytes, flags);
74964 +               if (err < 0)
74965 +                       break;
74966 +               if (err == 1) {
74967 +                       err = 0;
74968 +                       break;
74969 +               }
74971 +               vbo += bytes;
74972 +       }
74974 +       up_read(run_lock);
74976 +out:
74977 +       return err;
74981 + * When decompressing, we typically obtain more than one page per reference.
74982 + * We inject the additional pages into the page cache.
74983 + */
74984 +int ni_readpage_cmpr(struct ntfs_inode *ni, struct page *page)
74986 +       int err;
74987 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
74988 +       struct address_space *mapping = page->mapping;
74989 +       pgoff_t index = page->index;
74990 +       u64 frame_vbo, vbo = (u64)index << PAGE_SHIFT;
74991 +       struct page **pages = NULL; /*array of at most 16 pages. stack?*/
74992 +       u8 frame_bits;
74993 +       CLST frame;
74994 +       u32 i, idx, frame_size, pages_per_frame;
74995 +       gfp_t gfp_mask;
74996 +       struct page *pg;
74998 +       if (vbo >= ni->vfs_inode.i_size) {
74999 +               SetPageUptodate(page);
75000 +               err = 0;
75001 +               goto out;
75002 +       }
75004 +       if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
75005 +               /* xpress or lzx */
75006 +               frame_bits = ni_ext_compress_bits(ni);
75007 +       } else {
75008 +               /* lznt compression*/
75009 +               frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
75010 +       }
75011 +       frame_size = 1u << frame_bits;
75012 +       frame = vbo >> frame_bits;
75013 +       frame_vbo = (u64)frame << frame_bits;
75014 +       idx = (vbo - frame_vbo) >> PAGE_SHIFT;
75016 +       pages_per_frame = frame_size >> PAGE_SHIFT;
75017 +       pages = ntfs_zalloc(pages_per_frame * sizeof(struct page *));
75018 +       if (!pages) {
75019 +               err = -ENOMEM;
75020 +               goto out;
75021 +       }
75023 +       pages[idx] = page;
75024 +       index = frame_vbo >> PAGE_SHIFT;
75025 +       gfp_mask = mapping_gfp_mask(mapping);
75027 +       for (i = 0; i < pages_per_frame; i++, index++) {
75028 +               if (i == idx)
75029 +                       continue;
75031 +               pg = find_or_create_page(mapping, index, gfp_mask);
75032 +               if (!pg) {
75033 +                       err = -ENOMEM;
75034 +                       goto out1;
75035 +               }
75036 +               pages[i] = pg;
75037 +       }
75039 +       err = ni_read_frame(ni, frame_vbo, pages, pages_per_frame);
75041 +out1:
75042 +       if (err)
75043 +               SetPageError(page);
75045 +       for (i = 0; i < pages_per_frame; i++) {
75046 +               pg = pages[i];
75047 +               if (i == idx)
75048 +                       continue;
75049 +               unlock_page(pg);
75050 +               put_page(pg);
75051 +       }
75053 +out:
75054 +       /* At this point, err contains 0 or -EIO depending on the "critical" page */
75055 +       ntfs_free(pages);
75056 +       unlock_page(page);
75058 +       return err;
75061 +#ifdef CONFIG_NTFS3_LZX_XPRESS
75063 + * decompress lzx/xpress compressed file
75064 + * remove ATTR_DATA::WofCompressedData
75065 + * remove ATTR_REPARSE
75066 + */
75067 +int ni_decompress_file(struct ntfs_inode *ni)
75069 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
75070 +       struct inode *inode = &ni->vfs_inode;
75071 +       loff_t i_size = inode->i_size;
75072 +       struct address_space *mapping = inode->i_mapping;
75073 +       gfp_t gfp_mask = mapping_gfp_mask(mapping);
75074 +       struct page **pages = NULL;
75075 +       struct ATTR_LIST_ENTRY *le;
75076 +       struct ATTRIB *attr;
75077 +       CLST vcn, cend, lcn, clen, end;
75078 +       pgoff_t index;
75079 +       u64 vbo;
75080 +       u8 frame_bits;
75081 +       u32 i, frame_size, pages_per_frame, bytes;
75082 +       struct mft_inode *mi;
75083 +       int err;
75085 +       /* clusters for decompressed data*/
75086 +       cend = bytes_to_cluster(sbi, i_size);
75088 +       if (!i_size)
75089 +               goto remove_wof;
75091 +       /* check in advance */
75092 +       if (cend > wnd_zeroes(&sbi->used.bitmap)) {
75093 +               err = -ENOSPC;
75094 +               goto out;
75095 +       }
75097 +       frame_bits = ni_ext_compress_bits(ni);
75098 +       frame_size = 1u << frame_bits;
75099 +       pages_per_frame = frame_size >> PAGE_SHIFT;
75100 +       pages = ntfs_zalloc(pages_per_frame * sizeof(struct page *));
75101 +       if (!pages) {
75102 +               err = -ENOMEM;
75103 +               goto out;
75104 +       }
75106 +       /*
75107 +        * Step 1: decompress data and copy to new allocated clusters
75108 +        */
75109 +       index = 0;
75110 +       for (vbo = 0; vbo < i_size; vbo += bytes) {
75111 +               u32 nr_pages;
75112 +               bool new;
75114 +               if (vbo + frame_size > i_size) {
75115 +                       bytes = i_size - vbo;
75116 +                       nr_pages = (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
75117 +               } else {
75118 +                       nr_pages = pages_per_frame;
75119 +                       bytes = frame_size;
75120 +               }
75122 +               end = bytes_to_cluster(sbi, vbo + bytes);
75124 +               for (vcn = vbo >> sbi->cluster_bits; vcn < end; vcn += clen) {
75125 +                       err = attr_data_get_block(ni, vcn, cend - vcn, &lcn,
75126 +                                                 &clen, &new);
75127 +                       if (err)
75128 +                               goto out;
75129 +               }
75131 +               for (i = 0; i < pages_per_frame; i++, index++) {
75132 +                       struct page *pg;
75134 +                       pg = find_or_create_page(mapping, index, gfp_mask);
75135 +                       if (!pg) {
75136 +                               while (i--) {
75137 +                                       unlock_page(pages[i]);
75138 +                                       put_page(pages[i]);
75139 +                               }
75140 +                               err = -ENOMEM;
75141 +                               goto out;
75142 +                       }
75143 +                       pages[i] = pg;
75144 +               }
75146 +               err = ni_read_frame(ni, vbo, pages, pages_per_frame);
75148 +               if (!err) {
75149 +                       down_read(&ni->file.run_lock);
75150 +                       err = ntfs_bio_pages(sbi, &ni->file.run, pages,
75151 +                                            nr_pages, vbo, bytes,
75152 +                                            REQ_OP_WRITE);
75153 +                       up_read(&ni->file.run_lock);
75154 +               }
75156 +               for (i = 0; i < pages_per_frame; i++) {
75157 +                       unlock_page(pages[i]);
75158 +                       put_page(pages[i]);
75159 +               }
75161 +               if (err)
75162 +                       goto out;
75164 +               cond_resched();
75165 +       }
75167 +remove_wof:
75168 +       /*
75169 +        * Step 2: deallocate attributes ATTR_DATA::WofCompressedData and ATTR_REPARSE
75170 +        */
75171 +       attr = NULL;
75172 +       le = NULL;
75173 +       while ((attr = ni_enum_attr_ex(ni, attr, &le, NULL))) {
75174 +               CLST svcn, evcn;
75175 +               u32 asize, roff;
75177 +               if (attr->type == ATTR_REPARSE) {
75178 +                       struct MFT_REF ref;
75180 +                       mi_get_ref(&ni->mi, &ref);
75181 +                       ntfs_remove_reparse(sbi, 0, &ref);
75182 +               }
75184 +               if (!attr->non_res)
75185 +                       continue;
75187 +               if (attr->type != ATTR_REPARSE &&
75188 +                   (attr->type != ATTR_DATA ||
75189 +                    attr->name_len != ARRAY_SIZE(WOF_NAME) ||
75190 +                    memcmp(attr_name(attr), WOF_NAME, sizeof(WOF_NAME))))
75191 +                       continue;
75193 +               svcn = le64_to_cpu(attr->nres.svcn);
75194 +               evcn = le64_to_cpu(attr->nres.evcn);
75196 +               if (evcn + 1 <= svcn)
75197 +                       continue;
75199 +               asize = le32_to_cpu(attr->size);
75200 +               roff = le16_to_cpu(attr->nres.run_off);
75202 +               /*run==1 means unpack and deallocate*/
75203 +               run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
75204 +                             Add2Ptr(attr, roff), asize - roff);
75205 +       }
75207 +       /*
75208 +        * Step 3: remove attribute ATTR_DATA::WofCompressedData
75209 +        */
75210 +       err = ni_remove_attr(ni, ATTR_DATA, WOF_NAME, ARRAY_SIZE(WOF_NAME),
75211 +                            false, NULL);
75212 +       if (err)
75213 +               goto out;
75215 +       /*
75216 +        * Step 4: remove ATTR_REPARSE
75217 +        */
75218 +       err = ni_remove_attr(ni, ATTR_REPARSE, NULL, 0, false, NULL);
75219 +       if (err)
75220 +               goto out;
75222 +       /*
75223 +        * Step 5: remove sparse flag from data attribute
75224 +        */
75225 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
75226 +       if (!attr) {
75227 +               err = -EINVAL;
75228 +               goto out;
75229 +       }
75231 +       if (attr->non_res && is_attr_sparsed(attr)) {
75232 +               /* sparsed attribute header is 8 bytes bigger than normal*/
75233 +               struct MFT_REC *rec = mi->mrec;
75234 +               u32 used = le32_to_cpu(rec->used);
75235 +               u32 asize = le32_to_cpu(attr->size);
75236 +               u16 roff = le16_to_cpu(attr->nres.run_off);
75237 +               char *rbuf = Add2Ptr(attr, roff);
75239 +               memmove(rbuf - 8, rbuf, used - PtrOffset(rec, rbuf));
75240 +               attr->size = cpu_to_le32(asize - 8);
75241 +               attr->flags &= ~ATTR_FLAG_SPARSED;
75242 +               attr->nres.run_off = cpu_to_le16(roff - 8);
75243 +               attr->nres.c_unit = 0;
75244 +               rec->used = cpu_to_le32(used - 8);
75245 +               mi->dirty = true;
75246 +               ni->std_fa &= ~(FILE_ATTRIBUTE_SPARSE_FILE |
75247 +                               FILE_ATTRIBUTE_REPARSE_POINT);
75249 +               mark_inode_dirty(inode);
75250 +       }
75252 +       /* clear cached flag */
75253 +       ni->ni_flags &= ~NI_FLAG_COMPRESSED_MASK;
75254 +       if (ni->file.offs_page) {
75255 +               put_page(ni->file.offs_page);
75256 +               ni->file.offs_page = NULL;
75257 +       }
75258 +       mapping->a_ops = &ntfs_aops;
75260 +out:
75261 +       ntfs_free(pages);
75262 +       if (err) {
75263 +               make_bad_inode(inode);
75264 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
75265 +       }
75267 +       return err;
75270 +/* external compression lzx/xpress */
75271 +static int decompress_lzx_xpress(struct ntfs_sb_info *sbi, const char *cmpr,
75272 +                                size_t cmpr_size, void *unc, size_t unc_size,
75273 +                                u32 frame_size)
75275 +       int err;
75276 +       void *ctx;
75278 +       if (cmpr_size == unc_size) {
75279 +               /* frame not compressed */
75280 +               memcpy(unc, cmpr, unc_size);
75281 +               return 0;
75282 +       }
75284 +       err = 0;
75285 +       if (frame_size == 0x8000) {
75286 +               mutex_lock(&sbi->compress.mtx_lzx);
75287 +               /* LZX: frame compressed */
75288 +               ctx = sbi->compress.lzx;
75289 +               if (!ctx) {
75290 +                       /* Lazy initialize lzx decompress context */
75291 +                       ctx = lzx_allocate_decompressor();
75292 +                       if (!ctx) {
75293 +                               err = -ENOMEM;
75294 +                               goto out1;
75295 +                       }
75297 +                       sbi->compress.lzx = ctx;
75298 +               }
75300 +               if (lzx_decompress(ctx, cmpr, cmpr_size, unc, unc_size)) {
75301 +                       /* treat all errors as "invalid argument" */
75302 +                       err = -EINVAL;
75303 +               }
75304 +out1:
75305 +               mutex_unlock(&sbi->compress.mtx_lzx);
75306 +       } else {
75307 +               /* XPRESS: frame compressed */
75308 +               mutex_lock(&sbi->compress.mtx_xpress);
75309 +               ctx = sbi->compress.xpress;
75310 +               if (!ctx) {
75311 +                       /* Lazy initialize xpress decompress context */
75312 +                       ctx = xpress_allocate_decompressor();
75313 +                       if (!ctx) {
75314 +                               err = -ENOMEM;
75315 +                               goto out2;
75316 +                       }
75318 +                       sbi->compress.xpress = ctx;
75319 +               }
75321 +               if (xpress_decompress(ctx, cmpr, cmpr_size, unc, unc_size)) {
75322 +                       /* treat all errors as "invalid argument" */
75323 +                       err = -EINVAL;
75324 +               }
75325 +out2:
75326 +               mutex_unlock(&sbi->compress.mtx_xpress);
75327 +       }
75328 +       return err;
75330 +#endif
75333 + * ni_read_frame
75334 + *
75335 + * pages - array of locked pages
75336 + */
75337 +int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
75338 +                 u32 pages_per_frame)
75340 +       int err;
75341 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
75342 +       u8 cluster_bits = sbi->cluster_bits;
75343 +       char *frame_ondisk = NULL;
75344 +       char *frame_mem = NULL;
75345 +       struct page **pages_disk = NULL;
75346 +       struct ATTR_LIST_ENTRY *le = NULL;
75347 +       struct runs_tree *run = &ni->file.run;
75348 +       u64 valid_size = ni->i_valid;
75349 +       u64 vbo_disk;
75350 +       size_t unc_size;
75351 +       u32 frame_size, i, npages_disk, ondisk_size;
75352 +       struct page *pg;
75353 +       struct ATTRIB *attr;
75354 +       CLST frame, clst_data;
75356 +       /*
75357 +        * To simplify decompress algorithm do vmap for source and target pages
75358 +        */
75359 +       for (i = 0; i < pages_per_frame; i++)
75360 +               kmap(pages[i]);
75362 +       frame_size = pages_per_frame << PAGE_SHIFT;
75363 +       frame_mem = vmap(pages, pages_per_frame, VM_MAP, PAGE_KERNEL);
75364 +       if (!frame_mem) {
75365 +               err = -ENOMEM;
75366 +               goto out;
75367 +       }
75369 +       attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, NULL);
75370 +       if (!attr) {
75371 +               err = -ENOENT;
75372 +               goto out1;
75373 +       }
75375 +       if (!attr->non_res) {
75376 +               u32 data_size = le32_to_cpu(attr->res.data_size);
75378 +               memset(frame_mem, 0, frame_size);
75379 +               if (frame_vbo < data_size) {
75380 +                       ondisk_size = data_size - frame_vbo;
75381 +                       memcpy(frame_mem, resident_data(attr) + frame_vbo,
75382 +                              min(ondisk_size, frame_size));
75383 +               }
75384 +               err = 0;
75385 +               goto out1;
75386 +       }
75388 +       if (frame_vbo >= valid_size) {
75389 +               memset(frame_mem, 0, frame_size);
75390 +               err = 0;
75391 +               goto out1;
75392 +       }
75394 +       if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
75395 +#ifndef CONFIG_NTFS3_LZX_XPRESS
75396 +               err = -EOPNOTSUPP;
75397 +               goto out1;
75398 +#else
75399 +               u32 frame_bits = ni_ext_compress_bits(ni);
75400 +               u64 frame64 = frame_vbo >> frame_bits;
75401 +               u64 frames, vbo_data;
75403 +               if (frame_size != (1u << frame_bits)) {
75404 +                       err = -EINVAL;
75405 +                       goto out1;
75406 +               }
75407 +               switch (frame_size) {
75408 +               case 0x1000:
75409 +               case 0x2000:
75410 +               case 0x4000:
75411 +               case 0x8000:
75412 +                       break;
75413 +               default:
75414 +                       /* unknown compression */
75415 +                       err = -EOPNOTSUPP;
75416 +                       goto out1;
75417 +               }
75419 +               attr = ni_find_attr(ni, attr, &le, ATTR_DATA, WOF_NAME,
75420 +                                   ARRAY_SIZE(WOF_NAME), NULL, NULL);
75421 +               if (!attr) {
75422 +                       ntfs_inode_err(
75423 +                               &ni->vfs_inode,
75424 +                               "external compressed file should contains data attribute \"WofCompressedData\"");
75425 +                       err = -EINVAL;
75426 +                       goto out1;
75427 +               }
75429 +               if (!attr->non_res) {
75430 +                       run = NULL;
75431 +               } else {
75432 +                       run = run_alloc();
75433 +                       if (!run) {
75434 +                               err = -ENOMEM;
75435 +                               goto out1;
75436 +                       }
75437 +               }
75439 +               frames = (ni->vfs_inode.i_size - 1) >> frame_bits;
75441 +               err = attr_wof_frame_info(ni, attr, run, frame64, frames,
75442 +                                         frame_bits, &ondisk_size, &vbo_data);
75443 +               if (err)
75444 +                       goto out2;
75446 +               if (frame64 == frames) {
75447 +                       unc_size = 1 + ((ni->vfs_inode.i_size - 1) &
75448 +                                       (frame_size - 1));
75449 +                       ondisk_size = attr_size(attr) - vbo_data;
75450 +               } else {
75451 +                       unc_size = frame_size;
75452 +               }
75454 +               if (ondisk_size > frame_size) {
75455 +                       err = -EINVAL;
75456 +                       goto out2;
75457 +               }
75459 +               if (!attr->non_res) {
75460 +                       if (vbo_data + ondisk_size >
75461 +                           le32_to_cpu(attr->res.data_size)) {
75462 +                               err = -EINVAL;
75463 +                               goto out1;
75464 +                       }
75466 +                       err = decompress_lzx_xpress(
75467 +                               sbi, Add2Ptr(resident_data(attr), vbo_data),
75468 +                               ondisk_size, frame_mem, unc_size, frame_size);
75469 +                       goto out1;
75470 +               }
75471 +               vbo_disk = vbo_data;
75472 +               /* load all runs to read [vbo_disk-vbo_to) */
75473 +               err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
75474 +                                          ARRAY_SIZE(WOF_NAME), run, vbo_disk,
75475 +                                          vbo_data + ondisk_size);
75476 +               if (err)
75477 +                       goto out2;
75478 +               npages_disk = (ondisk_size + (vbo_disk & (PAGE_SIZE - 1)) +
75479 +                              PAGE_SIZE - 1) >>
75480 +                             PAGE_SHIFT;
75481 +#endif
75482 +       } else if (is_attr_compressed(attr)) {
75483 +               /* lznt compression*/
75484 +               if (sbi->cluster_size > NTFS_LZNT_MAX_CLUSTER) {
75485 +                       err = -EOPNOTSUPP;
75486 +                       goto out1;
75487 +               }
75489 +               if (attr->nres.c_unit != NTFS_LZNT_CUNIT) {
75490 +                       err = -EOPNOTSUPP;
75491 +                       goto out1;
75492 +               }
75494 +               down_write(&ni->file.run_lock);
75495 +               run_truncate_around(run, le64_to_cpu(attr->nres.svcn));
75496 +               frame = frame_vbo >> (cluster_bits + NTFS_LZNT_CUNIT);
75497 +               err = attr_is_frame_compressed(ni, attr, frame, &clst_data);
75498 +               up_write(&ni->file.run_lock);
75499 +               if (err)
75500 +                       goto out1;
75502 +               if (!clst_data) {
75503 +                       memset(frame_mem, 0, frame_size);
75504 +                       goto out1;
75505 +               }
75507 +               frame_size = sbi->cluster_size << NTFS_LZNT_CUNIT;
75508 +               ondisk_size = clst_data << cluster_bits;
75510 +               if (clst_data >= NTFS_LZNT_CLUSTERS) {
75511 +                       /* frame is not compressed */
75512 +                       down_read(&ni->file.run_lock);
75513 +                       err = ntfs_bio_pages(sbi, run, pages, pages_per_frame,
75514 +                                            frame_vbo, ondisk_size,
75515 +                                            REQ_OP_READ);
75516 +                       up_read(&ni->file.run_lock);
75517 +                       goto out1;
75518 +               }
75519 +               vbo_disk = frame_vbo;
75520 +               npages_disk = (ondisk_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
75521 +       } else {
75522 +               __builtin_unreachable();
75523 +               err = -EINVAL;
75524 +               goto out1;
75525 +       }
75527 +       pages_disk = ntfs_zalloc(npages_disk * sizeof(struct page *));
75528 +       if (!pages_disk) {
75529 +               err = -ENOMEM;
75530 +               goto out2;
75531 +       }
75533 +       for (i = 0; i < npages_disk; i++) {
75534 +               pg = alloc_page(GFP_KERNEL);
75535 +               if (!pg) {
75536 +                       err = -ENOMEM;
75537 +                       goto out3;
75538 +               }
75539 +               pages_disk[i] = pg;
75540 +               lock_page(pg);
75541 +               kmap(pg);
75542 +       }
75544 +       /* read 'ondisk_size' bytes from disk */
75545 +       down_read(&ni->file.run_lock);
75546 +       err = ntfs_bio_pages(sbi, run, pages_disk, npages_disk, vbo_disk,
75547 +                            ondisk_size, REQ_OP_READ);
75548 +       up_read(&ni->file.run_lock);
75549 +       if (err)
75550 +               goto out3;
75552 +       /*
75553 +        * To simplify decompress algorithm do vmap for source and target pages
75554 +        */
75555 +       frame_ondisk = vmap(pages_disk, npages_disk, VM_MAP, PAGE_KERNEL_RO);
75556 +       if (!frame_ondisk) {
75557 +               err = -ENOMEM;
75558 +               goto out3;
75559 +       }
75561 +       /* decompress: frame_ondisk -> frame_mem */
75562 +#ifdef CONFIG_NTFS3_LZX_XPRESS
75563 +       if (run != &ni->file.run) {
75564 +               /* LZX or XPRESS */
75565 +               err = decompress_lzx_xpress(
75566 +                       sbi, frame_ondisk + (vbo_disk & (PAGE_SIZE - 1)),
75567 +                       ondisk_size, frame_mem, unc_size, frame_size);
75568 +       } else
75569 +#endif
75570 +       {
75571 +               /* LZNT - native ntfs compression */
75572 +               unc_size = decompress_lznt(frame_ondisk, ondisk_size, frame_mem,
75573 +                                          frame_size);
75574 +               if ((ssize_t)unc_size < 0)
75575 +                       err = unc_size;
75576 +               else if (!unc_size || unc_size > frame_size)
75577 +                       err = -EINVAL;
75578 +       }
75579 +       if (!err && valid_size < frame_vbo + frame_size) {
75580 +               size_t ok = valid_size - frame_vbo;
75582 +               memset(frame_mem + ok, 0, frame_size - ok);
75583 +       }
75585 +       vunmap(frame_ondisk);
75587 +out3:
75588 +       for (i = 0; i < npages_disk; i++) {
75589 +               pg = pages_disk[i];
75590 +               if (pg) {
75591 +                       kunmap(pg);
75592 +                       unlock_page(pg);
75593 +                       put_page(pg);
75594 +               }
75595 +       }
75596 +       ntfs_free(pages_disk);
75598 +out2:
75599 +#ifdef CONFIG_NTFS3_LZX_XPRESS
75600 +       if (run != &ni->file.run)
75601 +               run_free(run);
75602 +#endif
75603 +out1:
75604 +       vunmap(frame_mem);
75605 +out:
75606 +       for (i = 0; i < pages_per_frame; i++) {
75607 +               pg = pages[i];
75608 +               kunmap(pg);
75609 +               ClearPageError(pg);
75610 +               SetPageUptodate(pg);
75611 +       }
75613 +       return err;
75617 + * ni_write_frame
75618 + *
75619 + * pages - array of locked pages
75620 + */
75621 +int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
75622 +                  u32 pages_per_frame)
75624 +       int err;
75625 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
75626 +       u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
75627 +       u32 frame_size = sbi->cluster_size << NTFS_LZNT_CUNIT;
75628 +       u64 frame_vbo = (u64)pages[0]->index << PAGE_SHIFT;
75629 +       CLST frame = frame_vbo >> frame_bits;
75630 +       char *frame_ondisk = NULL;
75631 +       struct page **pages_disk = NULL;
75632 +       struct ATTR_LIST_ENTRY *le = NULL;
75633 +       char *frame_mem;
75634 +       struct ATTRIB *attr;
75635 +       struct mft_inode *mi;
75636 +       u32 i;
75637 +       struct page *pg;
75638 +       size_t compr_size, ondisk_size;
75639 +       struct lznt *lznt;
75641 +       attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, &mi);
75642 +       if (!attr) {
75643 +               err = -ENOENT;
75644 +               goto out;
75645 +       }
75647 +       if (WARN_ON(!is_attr_compressed(attr))) {
75648 +               err = -EINVAL;
75649 +               goto out;
75650 +       }
75652 +       if (sbi->cluster_size > NTFS_LZNT_MAX_CLUSTER) {
75653 +               err = -EOPNOTSUPP;
75654 +               goto out;
75655 +       }
75657 +       if (!attr->non_res) {
75658 +               down_write(&ni->file.run_lock);
75659 +               err = attr_make_nonresident(ni, attr, le, mi,
75660 +                                           le32_to_cpu(attr->res.data_size),
75661 +                                           &ni->file.run, &attr, pages[0]);
75662 +               up_write(&ni->file.run_lock);
75663 +               if (err)
75664 +                       goto out;
75665 +       }
75667 +       if (attr->nres.c_unit != NTFS_LZNT_CUNIT) {
75668 +               err = -EOPNOTSUPP;
75669 +               goto out;
75670 +       }
75672 +       pages_disk = ntfs_zalloc(pages_per_frame * sizeof(struct page *));
75673 +       if (!pages_disk) {
75674 +               err = -ENOMEM;
75675 +               goto out;
75676 +       }
75678 +       for (i = 0; i < pages_per_frame; i++) {
75679 +               pg = alloc_page(GFP_KERNEL);
75680 +               if (!pg) {
75681 +                       err = -ENOMEM;
75682 +                       goto out1;
75683 +               }
75684 +               pages_disk[i] = pg;
75685 +               lock_page(pg);
75686 +               kmap(pg);
75687 +       }
75689 +       /*
75690 +        * To simplify compress algorithm do vmap for source and target pages
75691 +        */
75692 +       frame_ondisk = vmap(pages_disk, pages_per_frame, VM_MAP, PAGE_KERNEL);
75693 +       if (!frame_ondisk) {
75694 +               err = -ENOMEM;
75695 +               goto out1;
75696 +       }
75698 +       for (i = 0; i < pages_per_frame; i++)
75699 +               kmap(pages[i]);
75701 +       /* map in-memory frame for read-only */
75702 +       frame_mem = vmap(pages, pages_per_frame, VM_MAP, PAGE_KERNEL_RO);
75703 +       if (!frame_mem) {
75704 +               err = -ENOMEM;
75705 +               goto out2;
75706 +       }
75708 +       mutex_lock(&sbi->compress.mtx_lznt);
75709 +       lznt = NULL;
75710 +       if (!sbi->compress.lznt) {
75711 +               /*
75712 +                * lznt implements two levels of compression:
75713 +                * 0 - standard compression
75714 +                * 1 - best compression, requires a lot of cpu
75715 +                * use mount option?
75716 +                */
75717 +               lznt = get_lznt_ctx(0);
75718 +               if (!lznt) {
75719 +                       mutex_unlock(&sbi->compress.mtx_lznt);
75720 +                       err = -ENOMEM;
75721 +                       goto out3;
75722 +               }
75724 +               sbi->compress.lznt = lznt;
75725 +               lznt = NULL;
75726 +       }
75728 +       /* compress: frame_mem -> frame_ondisk */
75729 +       compr_size = compress_lznt(frame_mem, frame_size, frame_ondisk,
75730 +                                  frame_size, sbi->compress.lznt);
75731 +       mutex_unlock(&sbi->compress.mtx_lznt);
75732 +       ntfs_free(lznt);
75734 +       if (compr_size + sbi->cluster_size > frame_size) {
75735 +               /* frame is not compressed */
75736 +               compr_size = frame_size;
75737 +               ondisk_size = frame_size;
75738 +       } else if (compr_size) {
75739 +               /* frame is compressed */
75740 +               ondisk_size = ntfs_up_cluster(sbi, compr_size);
75741 +               memset(frame_ondisk + compr_size, 0, ondisk_size - compr_size);
75742 +       } else {
75743 +               /* frame is sparsed */
75744 +               ondisk_size = 0;
75745 +       }
75747 +       down_write(&ni->file.run_lock);
75748 +       run_truncate_around(&ni->file.run, le64_to_cpu(attr->nres.svcn));
75749 +       err = attr_allocate_frame(ni, frame, compr_size, ni->i_valid);
75750 +       up_write(&ni->file.run_lock);
75751 +       if (err)
75752 +               goto out2;
75754 +       if (!ondisk_size)
75755 +               goto out2;
75757 +       down_read(&ni->file.run_lock);
75758 +       err = ntfs_bio_pages(sbi, &ni->file.run,
75759 +                            ondisk_size < frame_size ? pages_disk : pages,
75760 +                            pages_per_frame, frame_vbo, ondisk_size,
75761 +                            REQ_OP_WRITE);
75762 +       up_read(&ni->file.run_lock);
75764 +out3:
75765 +       vunmap(frame_mem);
75767 +out2:
75768 +       for (i = 0; i < pages_per_frame; i++)
75769 +               kunmap(pages[i]);
75771 +       vunmap(frame_ondisk);
75772 +out1:
75773 +       for (i = 0; i < pages_per_frame; i++) {
75774 +               pg = pages_disk[i];
75775 +               if (pg) {
75776 +                       kunmap(pg);
75777 +                       unlock_page(pg);
75778 +                       put_page(pg);
75779 +               }
75780 +       }
75781 +       ntfs_free(pages_disk);
75782 +out:
75783 +       return err;
75787 + * update duplicate info of ATTR_FILE_NAME in MFT and in parent directories
75788 + */
75789 +static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup,
75790 +                            int sync)
75792 +       struct ATTRIB *attr;
75793 +       struct mft_inode *mi;
75794 +       struct ATTR_LIST_ENTRY *le = NULL;
75795 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
75796 +       struct super_block *sb = sbi->sb;
75797 +       bool re_dirty = false;
75798 +       bool active = sb->s_flags & SB_ACTIVE;
75799 +       bool upd_parent = ni->ni_flags & NI_FLAG_UPDATE_PARENT;
75801 +       if (ni->mi.mrec->flags & RECORD_FLAG_DIR) {
75802 +               dup->fa |= FILE_ATTRIBUTE_DIRECTORY;
75803 +               attr = NULL;
75804 +               dup->alloc_size = 0;
75805 +               dup->data_size = 0;
75806 +       } else {
75807 +               dup->fa &= ~FILE_ATTRIBUTE_DIRECTORY;
75809 +               attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL,
75810 +                                   &mi);
75811 +               if (!attr) {
75812 +                       dup->alloc_size = dup->data_size = 0;
75813 +               } else if (!attr->non_res) {
75814 +                       u32 data_size = le32_to_cpu(attr->res.data_size);
75816 +                       dup->alloc_size = cpu_to_le64(QuadAlign(data_size));
75817 +                       dup->data_size = cpu_to_le64(data_size);
75818 +               } else {
75819 +                       u64 new_valid = ni->i_valid;
75820 +                       u64 data_size = le64_to_cpu(attr->nres.data_size);
75821 +                       __le64 valid_le;
75823 +                       dup->alloc_size = is_attr_ext(attr)
75824 +                                                 ? attr->nres.total_size
75825 +                                                 : attr->nres.alloc_size;
75826 +                       dup->data_size = attr->nres.data_size;
75828 +                       if (new_valid > data_size)
75829 +                               new_valid = data_size;
75831 +                       valid_le = cpu_to_le64(new_valid);
75832 +                       if (valid_le != attr->nres.valid_size) {
75833 +                               attr->nres.valid_size = valid_le;
75834 +                               mi->dirty = true;
75835 +                       }
75836 +               }
75837 +       }
75839 +       /* TODO: fill reparse info */
75840 +       dup->reparse = 0;
75841 +       dup->ea_size = 0;
75843 +       if (ni->ni_flags & NI_FLAG_EA) {
75844 +               attr = ni_find_attr(ni, attr, &le, ATTR_EA_INFO, NULL, 0, NULL,
75845 +                                   NULL);
75846 +               if (attr) {
75847 +                       const struct EA_INFO *info;
75849 +                       info = resident_data_ex(attr, sizeof(struct EA_INFO));
75850 +                       dup->ea_size = info->size_pack;
75851 +               }
75852 +       }
75854 +       attr = NULL;
75855 +       le = NULL;
75857 +       while ((attr = ni_find_attr(ni, attr, &le, ATTR_NAME, NULL, 0, NULL,
75858 +                                   &mi))) {
75859 +               struct inode *dir;
75860 +               struct ATTR_FILE_NAME *fname;
75862 +               fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
75863 +               if (!fname)
75864 +                       continue;
75866 +               if (memcmp(&fname->dup, dup, sizeof(fname->dup))) {
75867 +                       memcpy(&fname->dup, dup, sizeof(fname->dup));
75868 +                       mi->dirty = true;
75869 +               } else if (!upd_parent) {
75870 +                       continue;
75871 +               }
75873 +               if (!active)
75874 +                       continue; /*avoid __wait_on_freeing_inode(inode); */
75876 +               /*ntfs_iget5 may sleep*/
75877 +               dir = ntfs_iget5(sb, &fname->home, NULL);
75878 +               if (IS_ERR(dir)) {
75879 +                       ntfs_inode_warn(
75880 +                               &ni->vfs_inode,
75881 +                               "failed to open parent directory r=%lx to update",
75882 +                               (long)ino_get(&fname->home));
75883 +                       continue;
75884 +               }
75886 +               if (!is_bad_inode(dir)) {
75887 +                       struct ntfs_inode *dir_ni = ntfs_i(dir);
75889 +                       if (!ni_trylock(dir_ni)) {
75890 +                               re_dirty = true;
75891 +                       } else {
75892 +                               indx_update_dup(dir_ni, sbi, fname, dup, sync);
75893 +                               ni_unlock(dir_ni);
75894 +                       }
75895 +               }
75896 +               iput(dir);
75897 +       }
75899 +       return re_dirty;
75903 + * ni_write_inode
75904 + *
75905 + * write mft base record and all subrecords to disk
75906 + */
75907 +int ni_write_inode(struct inode *inode, int sync, const char *hint)
75909 +       int err = 0, err2;
75910 +       struct ntfs_inode *ni = ntfs_i(inode);
75911 +       struct super_block *sb = inode->i_sb;
75912 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
75913 +       bool re_dirty = false;
75914 +       struct ATTR_STD_INFO *std;
75915 +       struct rb_node *node, *next;
75916 +       struct NTFS_DUP_INFO dup;
75918 +       if (is_bad_inode(inode) || sb_rdonly(sb))
75919 +               return 0;
75921 +       if (!ni_trylock(ni)) {
75922 +               /* 'ni' is under modification, skip for now */
75923 +               mark_inode_dirty_sync(inode);
75924 +               return 0;
75925 +       }
75927 +       if (is_rec_inuse(ni->mi.mrec) &&
75928 +           !(sbi->flags & NTFS_FLAGS_LOG_REPLAYING) && inode->i_nlink) {
75929 +               bool modified = false;
75931 +               /* update times in standard attribute */
75932 +               std = ni_std(ni);
75933 +               if (!std) {
75934 +                       err = -EINVAL;
75935 +                       goto out;
75936 +               }
75938 +               /* Update the access times if they have changed. */
75939 +               dup.m_time = kernel2nt(&inode->i_mtime);
75940 +               if (std->m_time != dup.m_time) {
75941 +                       std->m_time = dup.m_time;
75942 +                       modified = true;
75943 +               }
75945 +               dup.c_time = kernel2nt(&inode->i_ctime);
75946 +               if (std->c_time != dup.c_time) {
75947 +                       std->c_time = dup.c_time;
75948 +                       modified = true;
75949 +               }
75951 +               dup.a_time = kernel2nt(&inode->i_atime);
75952 +               if (std->a_time != dup.a_time) {
75953 +                       std->a_time = dup.a_time;
75954 +                       modified = true;
75955 +               }
75957 +               dup.fa = ni->std_fa;
75958 +               if (std->fa != dup.fa) {
75959 +                       std->fa = dup.fa;
75960 +                       modified = true;
75961 +               }
75963 +               if (modified)
75964 +                       ni->mi.dirty = true;
75966 +               if (!ntfs_is_meta_file(sbi, inode->i_ino) &&
75967 +                   (modified || (ni->ni_flags & NI_FLAG_UPDATE_PARENT))) {
75968 +                       dup.cr_time = std->cr_time;
75969 +                       /* Not critical if this function fail */
75970 +                       re_dirty = ni_update_parent(ni, &dup, sync);
75972 +                       if (re_dirty)
75973 +                               ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
75974 +                       else
75975 +                               ni->ni_flags &= ~NI_FLAG_UPDATE_PARENT;
75976 +               }
75978 +               /* update attribute list */
75979 +               if (ni->attr_list.size && ni->attr_list.dirty) {
75980 +                       if (inode->i_ino != MFT_REC_MFT || sync) {
75981 +                               err = ni_try_remove_attr_list(ni);
75982 +                               if (err)
75983 +                                       goto out;
75984 +                       }
75986 +                       err = al_update(ni);
75987 +                       if (err)
75988 +                               goto out;
75989 +               }
75990 +       }
75992 +       for (node = rb_first(&ni->mi_tree); node; node = next) {
75993 +               struct mft_inode *mi = rb_entry(node, struct mft_inode, node);
75994 +               bool is_empty;
75996 +               next = rb_next(node);
75998 +               if (!mi->dirty)
75999 +                       continue;
76001 +               is_empty = !mi_enum_attr(mi, NULL);
76003 +               if (is_empty)
76004 +                       clear_rec_inuse(mi->mrec);
76006 +               err2 = mi_write(mi, sync);
76007 +               if (!err && err2)
76008 +                       err = err2;
76010 +               if (is_empty) {
76011 +                       ntfs_mark_rec_free(sbi, mi->rno);
76012 +                       rb_erase(node, &ni->mi_tree);
76013 +                       mi_put(mi);
76014 +               }
76015 +       }
76017 +       if (ni->mi.dirty) {
76018 +               err2 = mi_write(&ni->mi, sync);
76019 +               if (!err && err2)
76020 +                       err = err2;
76021 +       }
76022 +out:
76023 +       ni_unlock(ni);
76025 +       if (err) {
76026 +               ntfs_err(sb, "%s r=%lx failed, %d.", hint, inode->i_ino, err);
76027 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
76028 +               return err;
76029 +       }
76031 +       if (re_dirty && (sb->s_flags & SB_ACTIVE))
76032 +               mark_inode_dirty_sync(inode);
76034 +       return 0;
76036 diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
76037 new file mode 100644
76038 index 000000000000..53da12252408
76039 --- /dev/null
76040 +++ b/fs/ntfs3/fslog.c
76041 @@ -0,0 +1,5181 @@
76042 +// SPDX-License-Identifier: GPL-2.0
76044 + *
76045 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
76046 + *
76047 + */
76049 +#include <linux/blkdev.h>
76050 +#include <linux/buffer_head.h>
76051 +#include <linux/fs.h>
76052 +#include <linux/hash.h>
76053 +#include <linux/nls.h>
76054 +#include <linux/random.h>
76055 +#include <linux/ratelimit.h>
76056 +#include <linux/slab.h>
76058 +#include "debug.h"
76059 +#include "ntfs.h"
76060 +#include "ntfs_fs.h"
76063 + * LOG FILE structs
76064 + */
76066 +// clang-format off
76068 +#define MaxLogFileSize     0x100000000ull
76069 +#define DefaultLogPageSize 4096
76070 +#define MinLogRecordPages  0x30
76072 +struct RESTART_HDR {
76073 +       struct NTFS_RECORD_HEADER rhdr; // 'RSTR'
76074 +       __le32 sys_page_size; // 0x10: Page size of the system which initialized the log
76075 +       __le32 page_size;     // 0x14: Log page size used for this log file
76076 +       __le16 ra_off;        // 0x18:
76077 +       __le16 minor_ver;     // 0x1A:
76078 +       __le16 major_ver;     // 0x1C:
76079 +       __le16 fixups[];
76082 +#define LFS_NO_CLIENT 0xffff
76083 +#define LFS_NO_CLIENT_LE cpu_to_le16(0xffff)
76085 +struct CLIENT_REC {
76086 +       __le64 oldest_lsn;
76087 +       __le64 restart_lsn; // 0x08:
76088 +       __le16 prev_client; // 0x10:
76089 +       __le16 next_client; // 0x12:
76090 +       __le16 seq_num;     // 0x14:
76091 +       u8 align[6];        // 0x16
76092 +       __le32 name_bytes;  // 0x1C: in bytes
76093 +       __le16 name[32];    // 0x20: name of client
76096 +static_assert(sizeof(struct CLIENT_REC) == 0x60);
76098 +/* Two copies of these will exist at the beginning of the log file */
76099 +struct RESTART_AREA {
76100 +       __le64 current_lsn;    // 0x00: Current logical end of log file
76101 +       __le16 log_clients;    // 0x08: Maximum number of clients
76102 +       __le16 client_idx[2];  // 0x0A: free/use index into the client record arrays
76103 +       __le16 flags;          // 0x0E: See RESTART_SINGLE_PAGE_IO
76104 +       __le32 seq_num_bits;   // 0x10: the number of bits in sequence number.
76105 +       __le16 ra_len;         // 0x14:
76106 +       __le16 client_off;     // 0x16:
76107 +       __le64 l_size;         // 0x18: Usable log file size.
76108 +       __le32 last_lsn_data_len; // 0x20:
76109 +       __le16 rec_hdr_len;    // 0x24: log page data offset
76110 +       __le16 data_off;       // 0x26: log page data length
76111 +       __le32 open_log_count; // 0x28:
76112 +       __le32 align[5];       // 0x2C:
76113 +       struct CLIENT_REC clients[]; // 0x40:
76116 +struct LOG_REC_HDR {
76117 +       __le16 redo_op;      // 0x00:  NTFS_LOG_OPERATION
76118 +       __le16 undo_op;      // 0x02:  NTFS_LOG_OPERATION
76119 +       __le16 redo_off;     // 0x04:  Offset to Redo record
76120 +       __le16 redo_len;     // 0x06:  Redo length
76121 +       __le16 undo_off;     // 0x08:  Offset to Undo record
76122 +       __le16 undo_len;     // 0x0A:  Undo length
76123 +       __le16 target_attr;  // 0x0C:
76124 +       __le16 lcns_follow;  // 0x0E:
76125 +       __le16 record_off;   // 0x10:
76126 +       __le16 attr_off;     // 0x12:
76127 +       __le16 cluster_off;  // 0x14:
76128 +       __le16 reserved;     // 0x16:
76129 +       __le64 target_vcn;   // 0x18:
76130 +       __le64 page_lcns[];  // 0x20:
76133 +static_assert(sizeof(struct LOG_REC_HDR) == 0x20);
76135 +#define RESTART_ENTRY_ALLOCATED    0xFFFFFFFF
76136 +#define RESTART_ENTRY_ALLOCATED_LE cpu_to_le32(0xFFFFFFFF)
76138 +struct RESTART_TABLE {
76139 +       __le16 size;       // 0x00:  In bytes
76140 +       __le16 used;       // 0x02: entries
76141 +       __le16 total;      // 0x04: entries
76142 +       __le16 res[3];     // 0x06:
76143 +       __le32 free_goal;  // 0x0C:
76144 +       __le32 first_free; // 0x10
76145 +       __le32 last_free;  // 0x14
76149 +static_assert(sizeof(struct RESTART_TABLE) == 0x18);
76151 +struct ATTR_NAME_ENTRY {
76152 +       __le16 off; // offset in the Open attribute Table
76153 +       __le16 name_bytes;
76154 +       __le16 name[];
76157 +struct OPEN_ATTR_ENRTY {
76158 +       __le32 next;            // 0x00: RESTART_ENTRY_ALLOCATED if allocated
76159 +       __le32 bytes_per_index; // 0x04:
76160 +       enum ATTR_TYPE type;    // 0x08:
76161 +       u8 is_dirty_pages;      // 0x0C:
76162 +       u8 is_attr_name;        // 0x0B: Faked field to manage 'ptr'
76163 +       u8 name_len;            // 0x0C: Faked field to manage 'ptr'
76164 +       u8 res;
76165 +       struct MFT_REF ref; // 0x10: File Reference of file containing attribute
76166 +       __le64 open_record_lsn; // 0x18:
76167 +       void *ptr;              // 0x20:
76170 +/* 32 bit version of 'struct OPEN_ATTR_ENRTY' */
76171 +struct OPEN_ATTR_ENRTY_32 {
76172 +       __le32 next;            // 0x00: RESTART_ENTRY_ALLOCATED if allocated
76173 +       __le32 ptr;             // 0x04:
76174 +       struct MFT_REF ref;     // 0x08:
76175 +       __le64 open_record_lsn; // 0x10:
76176 +       u8 is_dirty_pages;      // 0x18:
76177 +       u8 is_attr_name;        // 0x19
76178 +       u8 res1[2];
76179 +       enum ATTR_TYPE type;    // 0x1C:
76180 +       u8 name_len;            // 0x20:  in wchar
76181 +       u8 res2[3];
76182 +       __le32 AttributeName;   // 0x24:
76183 +       __le32 bytes_per_index; // 0x28:
76186 +#define SIZEOF_OPENATTRIBUTEENTRY0 0x2c
76187 +// static_assert( 0x2C == sizeof(struct OPEN_ATTR_ENRTY_32) );
76188 +static_assert(sizeof(struct OPEN_ATTR_ENRTY) < SIZEOF_OPENATTRIBUTEENTRY0);
76191 + * One entry exists in the Dirty Pages Table for each page which is dirty at the
76192 + * time the Restart Area is written
76193 + */
76194 +struct DIR_PAGE_ENTRY {
76195 +       __le32 next;         // 0x00:  RESTART_ENTRY_ALLOCATED if allocated
76196 +       __le32 target_attr;  // 0x04:  Index into the Open attribute Table
76197 +       __le32 transfer_len; // 0x08:
76198 +       __le32 lcns_follow;  // 0x0C:
76199 +       __le64 vcn;          // 0x10:  Vcn of dirty page
76200 +       __le64 oldest_lsn;   // 0x18:
76201 +       __le64 page_lcns[];  // 0x20:
76204 +static_assert(sizeof(struct DIR_PAGE_ENTRY) == 0x20);
76206 +/* 32 bit version of 'struct DIR_PAGE_ENTRY' */
76207 +struct DIR_PAGE_ENTRY_32 {
76208 +       __le32 next;         // 0x00:  RESTART_ENTRY_ALLOCATED if allocated
76209 +       __le32 target_attr;  // 0x04:  Index into the Open attribute Table
76210 +       __le32 transfer_len; // 0x08:
76211 +       __le32 lcns_follow;  // 0x0C:
76212 +       __le32 reserved;     // 0x10:
76213 +       __le32 vcn_low;      // 0x14:  Vcn of dirty page
76214 +       __le32 vcn_hi;       // 0x18:  Vcn of dirty page
76215 +       __le32 oldest_lsn_low; // 0x1C:
76216 +       __le32 oldest_lsn_hi; // 0x1C:
76217 +       __le32 page_lcns_low; // 0x24:
76218 +       __le32 page_lcns_hi; // 0x24:
76221 +static_assert(offsetof(struct DIR_PAGE_ENTRY_32, vcn_low) == 0x14);
76222 +static_assert(sizeof(struct DIR_PAGE_ENTRY_32) == 0x2c);
76224 +enum transact_state {
76225 +       TransactionUninitialized = 0,
76226 +       TransactionActive,
76227 +       TransactionPrepared,
76228 +       TransactionCommitted
76231 +struct TRANSACTION_ENTRY {
76232 +       __le32 next;          // 0x00: RESTART_ENTRY_ALLOCATED if allocated
76233 +       u8 transact_state;    // 0x04:
76234 +       u8 reserved[3];       // 0x05:
76235 +       __le64 first_lsn;     // 0x08:
76236 +       __le64 prev_lsn;      // 0x10:
76237 +       __le64 undo_next_lsn; // 0x18:
76238 +       __le32 undo_records;  // 0x20: Number of undo log records pending abort
76239 +       __le32 undo_len;      // 0x24: Total undo size
76242 +static_assert(sizeof(struct TRANSACTION_ENTRY) == 0x28);
76244 +struct NTFS_RESTART {
76245 +       __le32 major_ver;             // 0x00:
76246 +       __le32 minor_ver;             // 0x04:
76247 +       __le64 check_point_start;     // 0x08:
76248 +       __le64 open_attr_table_lsn;   // 0x10:
76249 +       __le64 attr_names_lsn;        // 0x18:
76250 +       __le64 dirty_pages_table_lsn; // 0x20:
76251 +       __le64 transact_table_lsn;    // 0x28:
76252 +       __le32 open_attr_len;         // 0x30: In bytes
76253 +       __le32 attr_names_len;        // 0x34: In bytes
76254 +       __le32 dirty_pages_len;       // 0x38: In bytes
76255 +       __le32 transact_table_len;    // 0x3C: In bytes
76258 +static_assert(sizeof(struct NTFS_RESTART) == 0x40);
76260 +struct NEW_ATTRIBUTE_SIZES {
76261 +       __le64 alloc_size;
76262 +       __le64 valid_size;
76263 +       __le64 data_size;
76264 +       __le64 total_size;
76267 +struct BITMAP_RANGE {
76268 +       __le32 bitmap_off;
76269 +       __le32 bits;
76272 +struct LCN_RANGE {
76273 +       __le64 lcn;
76274 +       __le64 len;
76277 +/* The following type defines the different log record types */
76278 +#define LfsClientRecord  cpu_to_le32(1)
76279 +#define LfsClientRestart cpu_to_le32(2)
76281 +/* This is used to uniquely identify a client for a particular log file */
76282 +struct CLIENT_ID {
76283 +       __le16 seq_num;
76284 +       __le16 client_idx;
76287 +/* This is the header that begins every Log Record in the log file */
76288 +struct LFS_RECORD_HDR {
76289 +       __le64 this_lsn;    // 0x00:
76290 +       __le64 client_prev_lsn;  // 0x08:
76291 +       __le64 client_undo_next_lsn; // 0x10:
76292 +       __le32 client_data_len;  // 0x18:
76293 +       struct CLIENT_ID client; // 0x1C: Owner of this log record
76294 +       __le32 record_type; // 0x20: LfsClientRecord or LfsClientRestart
76295 +       __le32 transact_id; // 0x24:
76296 +       __le16 flags;       // 0x28:    LOG_RECORD_MULTI_PAGE
76297 +       u8 align[6];        // 0x2A:
76300 +#define LOG_RECORD_MULTI_PAGE cpu_to_le16(1)
76302 +static_assert(sizeof(struct LFS_RECORD_HDR) == 0x30);
76304 +struct LFS_RECORD {
76305 +       __le16 next_record_off; // 0x00: Offset of the free space in the page
76306 +       u8 align[6];         // 0x02:
76307 +       __le64 last_end_lsn; // 0x08: lsn for the last log record which ends on the page
76310 +static_assert(sizeof(struct LFS_RECORD) == 0x10);
76312 +struct RECORD_PAGE_HDR {
76313 +       struct NTFS_RECORD_HEADER rhdr; // 'RCRD'
76314 +       __le32 rflags;     // 0x10:  See LOG_PAGE_LOG_RECORD_END
76315 +       __le16 page_count; // 0x14:
76316 +       __le16 page_pos;   // 0x16:
76317 +       struct LFS_RECORD record_hdr; // 0x18
76318 +       __le16 fixups[10]; // 0x28
76319 +       __le32 file_off;   // 0x3c: used when major version >= 2
76322 +// clang-format on
76324 +// Page contains the end of a log record
76325 +#define LOG_PAGE_LOG_RECORD_END cpu_to_le32(0x00000001)
76327 +static inline bool is_log_record_end(const struct RECORD_PAGE_HDR *hdr)
76329 +       return hdr->rflags & LOG_PAGE_LOG_RECORD_END;
76332 +static_assert(offsetof(struct RECORD_PAGE_HDR, file_off) == 0x3c);
76335 + * END of NTFS LOG structures
76336 + */
76338 +/* Define some tuning parameters to keep the restart tables a reasonable size */
76339 +#define INITIAL_NUMBER_TRANSACTIONS 5
76341 +enum NTFS_LOG_OPERATION {
76343 +       Noop = 0x00,
76344 +       CompensationLogRecord = 0x01,
76345 +       InitializeFileRecordSegment = 0x02,
76346 +       DeallocateFileRecordSegment = 0x03,
76347 +       WriteEndOfFileRecordSegment = 0x04,
76348 +       CreateAttribute = 0x05,
76349 +       DeleteAttribute = 0x06,
76350 +       UpdateResidentValue = 0x07,
76351 +       UpdateNonresidentValue = 0x08,
76352 +       UpdateMappingPairs = 0x09,
76353 +       DeleteDirtyClusters = 0x0A,
76354 +       SetNewAttributeSizes = 0x0B,
76355 +       AddIndexEntryRoot = 0x0C,
76356 +       DeleteIndexEntryRoot = 0x0D,
76357 +       AddIndexEntryAllocation = 0x0E,
76358 +       DeleteIndexEntryAllocation = 0x0F,
76359 +       WriteEndOfIndexBuffer = 0x10,
76360 +       SetIndexEntryVcnRoot = 0x11,
76361 +       SetIndexEntryVcnAllocation = 0x12,
76362 +       UpdateFileNameRoot = 0x13,
76363 +       UpdateFileNameAllocation = 0x14,
76364 +       SetBitsInNonresidentBitMap = 0x15,
76365 +       ClearBitsInNonresidentBitMap = 0x16,
76366 +       HotFix = 0x17,
76367 +       EndTopLevelAction = 0x18,
76368 +       PrepareTransaction = 0x19,
76369 +       CommitTransaction = 0x1A,
76370 +       ForgetTransaction = 0x1B,
76371 +       OpenNonresidentAttribute = 0x1C,
76372 +       OpenAttributeTableDump = 0x1D,
76373 +       AttributeNamesDump = 0x1E,
76374 +       DirtyPageTableDump = 0x1F,
76375 +       TransactionTableDump = 0x20,
76376 +       UpdateRecordDataRoot = 0x21,
76377 +       UpdateRecordDataAllocation = 0x22,
76379 +       UpdateRelativeDataInIndex =
76380 +               0x23, // NtOfsRestartUpdateRelativeDataInIndex
76381 +       UpdateRelativeDataInIndex2 = 0x24,
76382 +       ZeroEndOfFileRecord = 0x25,
76386 + * Array for log records which require a target attribute
76387 + * A true indicates that the corresponding restart operation requires a target attribute
76388 + */
76389 +static const u8 AttributeRequired[] = {
76390 +       0xFC, 0xFB, 0xFF, 0x10, 0x06,
76393 +static inline bool is_target_required(u16 op)
76395 +       bool ret = op <= UpdateRecordDataAllocation &&
76396 +                  (AttributeRequired[op >> 3] >> (op & 7) & 1);
76397 +       return ret;
76400 +static inline bool can_skip_action(enum NTFS_LOG_OPERATION op)
76402 +       switch (op) {
76403 +       case Noop:
76404 +       case DeleteDirtyClusters:
76405 +       case HotFix:
76406 +       case EndTopLevelAction:
76407 +       case PrepareTransaction:
76408 +       case CommitTransaction:
76409 +       case ForgetTransaction:
76410 +       case CompensationLogRecord:
76411 +       case OpenNonresidentAttribute:
76412 +       case OpenAttributeTableDump:
76413 +       case AttributeNamesDump:
76414 +       case DirtyPageTableDump:
76415 +       case TransactionTableDump:
76416 +               return true;
76417 +       default:
76418 +               return false;
76419 +       }
76422 +enum { lcb_ctx_undo_next, lcb_ctx_prev, lcb_ctx_next };
76424 +/* bytes per restart table */
76425 +static inline u32 bytes_per_rt(const struct RESTART_TABLE *rt)
76427 +       return le16_to_cpu(rt->used) * le16_to_cpu(rt->size) +
76428 +              sizeof(struct RESTART_TABLE);
76431 +/* log record length */
76432 +static inline u32 lrh_length(const struct LOG_REC_HDR *lr)
76434 +       u16 t16 = le16_to_cpu(lr->lcns_follow);
76436 +       return struct_size(lr, page_lcns, max_t(u16, 1, t16));
76439 +struct lcb {
76440 +       struct LFS_RECORD_HDR *lrh; // Log record header of the current lsn
76441 +       struct LOG_REC_HDR *log_rec;
76442 +       u32 ctx_mode; // lcb_ctx_undo_next/lcb_ctx_prev/lcb_ctx_next
76443 +       struct CLIENT_ID client;
76444 +       bool alloc; // if true the we should deallocate 'log_rec'
76447 +static void lcb_put(struct lcb *lcb)
76449 +       if (lcb->alloc)
76450 +               ntfs_free(lcb->log_rec);
76451 +       ntfs_free(lcb->lrh);
76452 +       ntfs_free(lcb);
76456 + * oldest_client_lsn
76457 + *
76458 + * find the oldest lsn from active clients.
76459 + */
76460 +static inline void oldest_client_lsn(const struct CLIENT_REC *ca,
76461 +                                    __le16 next_client, u64 *oldest_lsn)
76463 +       while (next_client != LFS_NO_CLIENT_LE) {
76464 +               const struct CLIENT_REC *cr = ca + le16_to_cpu(next_client);
76465 +               u64 lsn = le64_to_cpu(cr->oldest_lsn);
76467 +               /* ignore this block if it's oldest lsn is 0 */
76468 +               if (lsn && lsn < *oldest_lsn)
76469 +                       *oldest_lsn = lsn;
76471 +               next_client = cr->next_client;
76472 +       }
76475 +static inline bool is_rst_page_hdr_valid(u32 file_off,
76476 +                                        const struct RESTART_HDR *rhdr)
76478 +       u32 sys_page = le32_to_cpu(rhdr->sys_page_size);
76479 +       u32 page_size = le32_to_cpu(rhdr->page_size);
76480 +       u32 end_usa;
76481 +       u16 ro;
76483 +       if (sys_page < SECTOR_SIZE || page_size < SECTOR_SIZE ||
76484 +           sys_page & (sys_page - 1) || page_size & (page_size - 1)) {
76485 +               return false;
76486 +       }
76488 +       /* Check that if the file offset isn't 0, it is the system page size */
76489 +       if (file_off && file_off != sys_page)
76490 +               return false;
76492 +       /* Check support version 1.1+ */
76493 +       if (le16_to_cpu(rhdr->major_ver) <= 1 && !rhdr->minor_ver)
76494 +               return false;
76496 +       if (le16_to_cpu(rhdr->major_ver) > 2)
76497 +               return false;
76499 +       ro = le16_to_cpu(rhdr->ra_off);
76500 +       if (!IsQuadAligned(ro) || ro > sys_page)
76501 +               return false;
76503 +       end_usa = ((sys_page >> SECTOR_SHIFT) + 1) * sizeof(short);
76504 +       end_usa += le16_to_cpu(rhdr->rhdr.fix_off);
76506 +       if (ro < end_usa)
76507 +               return false;
76509 +       return true;
76512 +static inline bool is_rst_area_valid(const struct RESTART_HDR *rhdr)
76514 +       const struct RESTART_AREA *ra;
76515 +       u16 cl, fl, ul;
76516 +       u32 off, l_size, file_dat_bits, file_size_round;
76517 +       u16 ro = le16_to_cpu(rhdr->ra_off);
76518 +       u32 sys_page = le32_to_cpu(rhdr->sys_page_size);
76520 +       if (ro + offsetof(struct RESTART_AREA, l_size) >
76521 +           SECTOR_SIZE - sizeof(short))
76522 +               return false;
76524 +       ra = Add2Ptr(rhdr, ro);
76525 +       cl = le16_to_cpu(ra->log_clients);
76527 +       if (cl > 1)
76528 +               return false;
76530 +       off = le16_to_cpu(ra->client_off);
76532 +       if (!IsQuadAligned(off) || ro + off > SECTOR_SIZE - sizeof(short))
76533 +               return false;
76535 +       off += cl * sizeof(struct CLIENT_REC);
76537 +       if (off > sys_page)
76538 +               return false;
76540 +       /*
76541 +        * Check the restart length field and whether the entire
76542 +        * restart area is contained that length
76543 +        */
76544 +       if (le16_to_cpu(rhdr->ra_off) + le16_to_cpu(ra->ra_len) > sys_page ||
76545 +           off > le16_to_cpu(ra->ra_len)) {
76546 +               return false;
76547 +       }
76549 +       /*
76550 +        * As a final check make sure that the use list and the free list
76551 +        * are either empty or point to a valid client
76552 +        */
76553 +       fl = le16_to_cpu(ra->client_idx[0]);
76554 +       ul = le16_to_cpu(ra->client_idx[1]);
76555 +       if ((fl != LFS_NO_CLIENT && fl >= cl) ||
76556 +           (ul != LFS_NO_CLIENT && ul >= cl))
76557 +               return false;
76559 +       /* Make sure the sequence number bits match the log file size */
76560 +       l_size = le64_to_cpu(ra->l_size);
76562 +       file_dat_bits = sizeof(u64) * 8 - le32_to_cpu(ra->seq_num_bits);
76563 +       file_size_round = 1u << (file_dat_bits + 3);
76564 +       if (file_size_round != l_size &&
76565 +           (file_size_round < l_size || (file_size_round / 2) > l_size)) {
76566 +               return false;
76567 +       }
76569 +       /* The log page data offset and record header length must be quad-aligned */
76570 +       if (!IsQuadAligned(le16_to_cpu(ra->data_off)) ||
76571 +           !IsQuadAligned(le16_to_cpu(ra->rec_hdr_len)))
76572 +               return false;
76574 +       return true;
76577 +static inline bool is_client_area_valid(const struct RESTART_HDR *rhdr,
76578 +                                       bool usa_error)
76580 +       u16 ro = le16_to_cpu(rhdr->ra_off);
76581 +       const struct RESTART_AREA *ra = Add2Ptr(rhdr, ro);
76582 +       u16 ra_len = le16_to_cpu(ra->ra_len);
76583 +       const struct CLIENT_REC *ca;
76584 +       u32 i;
76586 +       if (usa_error && ra_len + ro > SECTOR_SIZE - sizeof(short))
76587 +               return false;
76589 +       /* Find the start of the client array */
76590 +       ca = Add2Ptr(ra, le16_to_cpu(ra->client_off));
76592 +       /*
76593 +        * Start with the free list
76594 +        * Check that all the clients are valid and that there isn't a cycle
76595 +        * Do the in-use list on the second pass
76596 +        */
76597 +       for (i = 0; i < 2; i++) {
76598 +               u16 client_idx = le16_to_cpu(ra->client_idx[i]);
76599 +               bool first_client = true;
76600 +               u16 clients = le16_to_cpu(ra->log_clients);
76602 +               while (client_idx != LFS_NO_CLIENT) {
76603 +                       const struct CLIENT_REC *cr;
76605 +                       if (!clients ||
76606 +                           client_idx >= le16_to_cpu(ra->log_clients))
76607 +                               return false;
76609 +                       clients -= 1;
76610 +                       cr = ca + client_idx;
76612 +                       client_idx = le16_to_cpu(cr->next_client);
76614 +                       if (first_client) {
76615 +                               first_client = false;
76616 +                               if (cr->prev_client != LFS_NO_CLIENT_LE)
76617 +                                       return false;
76618 +                       }
76619 +               }
76620 +       }
76622 +       return true;
76626 + * remove_client
76627 + *
76628 + * remove a client record from a client record list an restart area
76629 + */
76630 +static inline void remove_client(struct CLIENT_REC *ca,
76631 +                                const struct CLIENT_REC *cr, __le16 *head)
76633 +       if (cr->prev_client == LFS_NO_CLIENT_LE)
76634 +               *head = cr->next_client;
76635 +       else
76636 +               ca[le16_to_cpu(cr->prev_client)].next_client = cr->next_client;
76638 +       if (cr->next_client != LFS_NO_CLIENT_LE)
76639 +               ca[le16_to_cpu(cr->next_client)].prev_client = cr->prev_client;
76643 + * add_client
76644 + *
76645 + * add a client record to the start of a list
76646 + */
76647 +static inline void add_client(struct CLIENT_REC *ca, u16 index, __le16 *head)
76649 +       struct CLIENT_REC *cr = ca + index;
76651 +       cr->prev_client = LFS_NO_CLIENT_LE;
76652 +       cr->next_client = *head;
76654 +       if (*head != LFS_NO_CLIENT_LE)
76655 +               ca[le16_to_cpu(*head)].prev_client = cpu_to_le16(index);
76657 +       *head = cpu_to_le16(index);
76661 + * enum_rstbl
76662 + *
76663 + */
76664 +static inline void *enum_rstbl(struct RESTART_TABLE *t, void *c)
76666 +       __le32 *e;
76667 +       u32 bprt;
76668 +       u16 rsize = t ? le16_to_cpu(t->size) : 0;
76670 +       if (!c) {
76671 +               if (!t || !t->total)
76672 +                       return NULL;
76673 +               e = Add2Ptr(t, sizeof(struct RESTART_TABLE));
76674 +       } else {
76675 +               e = Add2Ptr(c, rsize);
76676 +       }
76678 +       /* Loop until we hit the first one allocated, or the end of the list */
76679 +       for (bprt = bytes_per_rt(t); PtrOffset(t, e) < bprt;
76680 +            e = Add2Ptr(e, rsize)) {
76681 +               if (*e == RESTART_ENTRY_ALLOCATED_LE)
76682 +                       return e;
76683 +       }
76684 +       return NULL;
76688 + * find_dp
76689 + *
76690 + * searches for a 'vcn' in Dirty Page Table,
76691 + */
76692 +static inline struct DIR_PAGE_ENTRY *find_dp(struct RESTART_TABLE *dptbl,
76693 +                                            u32 target_attr, u64 vcn)
76695 +       __le32 ta = cpu_to_le32(target_attr);
76696 +       struct DIR_PAGE_ENTRY *dp = NULL;
76698 +       while ((dp = enum_rstbl(dptbl, dp))) {
76699 +               u64 dp_vcn = le64_to_cpu(dp->vcn);
76701 +               if (dp->target_attr == ta && vcn >= dp_vcn &&
76702 +                   vcn < dp_vcn + le32_to_cpu(dp->lcns_follow)) {
76703 +                       return dp;
76704 +               }
76705 +       }
76706 +       return NULL;
76709 +static inline u32 norm_file_page(u32 page_size, u32 *l_size, bool use_default)
76711 +       if (use_default)
76712 +               page_size = DefaultLogPageSize;
76714 +       /* Round the file size down to a system page boundary */
76715 +       *l_size &= ~(page_size - 1);
76717 +       /* File should contain at least 2 restart pages and MinLogRecordPages pages */
76718 +       if (*l_size < (MinLogRecordPages + 2) * page_size)
76719 +               return 0;
76721 +       return page_size;
76724 +static bool check_log_rec(const struct LOG_REC_HDR *lr, u32 bytes, u32 tr,
76725 +                         u32 bytes_per_attr_entry)
76727 +       u16 t16;
76729 +       if (bytes < sizeof(struct LOG_REC_HDR))
76730 +               return false;
76731 +       if (!tr)
76732 +               return false;
76734 +       if ((tr - sizeof(struct RESTART_TABLE)) %
76735 +           sizeof(struct TRANSACTION_ENTRY))
76736 +               return false;
76738 +       if (le16_to_cpu(lr->redo_off) & 7)
76739 +               return false;
76741 +       if (le16_to_cpu(lr->undo_off) & 7)
76742 +               return false;
76744 +       if (lr->target_attr)
76745 +               goto check_lcns;
76747 +       if (is_target_required(le16_to_cpu(lr->redo_op)))
76748 +               return false;
76750 +       if (is_target_required(le16_to_cpu(lr->undo_op)))
76751 +               return false;
76753 +check_lcns:
76754 +       if (!lr->lcns_follow)
76755 +               goto check_length;
76757 +       t16 = le16_to_cpu(lr->target_attr);
76758 +       if ((t16 - sizeof(struct RESTART_TABLE)) % bytes_per_attr_entry)
76759 +               return false;
76761 +check_length:
76762 +       if (bytes < lrh_length(lr))
76763 +               return false;
76765 +       return true;
76768 +static bool check_rstbl(const struct RESTART_TABLE *rt, size_t bytes)
76770 +       u32 ts;
76771 +       u32 i, off;
76772 +       u16 rsize = le16_to_cpu(rt->size);
76773 +       u16 ne = le16_to_cpu(rt->used);
76774 +       u32 ff = le32_to_cpu(rt->first_free);
76775 +       u32 lf = le32_to_cpu(rt->last_free);
76777 +       ts = rsize * ne + sizeof(struct RESTART_TABLE);
76779 +       if (!rsize || rsize > bytes ||
76780 +           rsize + sizeof(struct RESTART_TABLE) > bytes || bytes < ts ||
76781 +           le16_to_cpu(rt->total) > ne || ff > ts || lf > ts ||
76782 +           (ff && ff < sizeof(struct RESTART_TABLE)) ||
76783 +           (lf && lf < sizeof(struct RESTART_TABLE))) {
76784 +               return false;
76785 +       }
76787 +       /* Verify each entry is either allocated or points
76788 +        * to a valid offset the table
76789 +        */
76790 +       for (i = 0; i < ne; i++) {
76791 +               off = le32_to_cpu(*(__le32 *)Add2Ptr(
76792 +                       rt, i * rsize + sizeof(struct RESTART_TABLE)));
76794 +               if (off != RESTART_ENTRY_ALLOCATED && off &&
76795 +                   (off < sizeof(struct RESTART_TABLE) ||
76796 +                    ((off - sizeof(struct RESTART_TABLE)) % rsize))) {
76797 +                       return false;
76798 +               }
76799 +       }
76801 +       /* Walk through the list headed by the first entry to make
76802 +        * sure none of the entries are currently being used
76803 +        */
76804 +       for (off = ff; off;) {
76805 +               if (off == RESTART_ENTRY_ALLOCATED)
76806 +                       return false;
76808 +               off = le32_to_cpu(*(__le32 *)Add2Ptr(rt, off));
76809 +       }
76811 +       return true;
76815 + * free_rsttbl_idx
76816 + *
76817 + * frees a previously allocated index a Restart Table.
76818 + */
76819 +static inline void free_rsttbl_idx(struct RESTART_TABLE *rt, u32 off)
76821 +       __le32 *e;
76822 +       u32 lf = le32_to_cpu(rt->last_free);
76823 +       __le32 off_le = cpu_to_le32(off);
76825 +       e = Add2Ptr(rt, off);
76827 +       if (off < le32_to_cpu(rt->free_goal)) {
76828 +               *e = rt->first_free;
76829 +               rt->first_free = off_le;
76830 +               if (!lf)
76831 +                       rt->last_free = off_le;
76832 +       } else {
76833 +               if (lf)
76834 +                       *(__le32 *)Add2Ptr(rt, lf) = off_le;
76835 +               else
76836 +                       rt->first_free = off_le;
76838 +               rt->last_free = off_le;
76839 +               *e = 0;
76840 +       }
76842 +       le16_sub_cpu(&rt->total, 1);
76845 +static inline struct RESTART_TABLE *init_rsttbl(u16 esize, u16 used)
76847 +       __le32 *e, *last_free;
76848 +       u32 off;
76849 +       u32 bytes = esize * used + sizeof(struct RESTART_TABLE);
76850 +       u32 lf = sizeof(struct RESTART_TABLE) + (used - 1) * esize;
76851 +       struct RESTART_TABLE *t = ntfs_zalloc(bytes);
76853 +       t->size = cpu_to_le16(esize);
76854 +       t->used = cpu_to_le16(used);
76855 +       t->free_goal = cpu_to_le32(~0u);
76856 +       t->first_free = cpu_to_le32(sizeof(struct RESTART_TABLE));
76857 +       t->last_free = cpu_to_le32(lf);
76859 +       e = (__le32 *)(t + 1);
76860 +       last_free = Add2Ptr(t, lf);
76862 +       for (off = sizeof(struct RESTART_TABLE) + esize; e < last_free;
76863 +            e = Add2Ptr(e, esize), off += esize) {
76864 +               *e = cpu_to_le32(off);
76865 +       }
76866 +       return t;
76869 +static inline struct RESTART_TABLE *extend_rsttbl(struct RESTART_TABLE *tbl,
76870 +                                                 u32 add, u32 free_goal)
76872 +       u16 esize = le16_to_cpu(tbl->size);
76873 +       __le32 osize = cpu_to_le32(bytes_per_rt(tbl));
76874 +       u32 used = le16_to_cpu(tbl->used);
76875 +       struct RESTART_TABLE *rt = init_rsttbl(esize, used + add);
76877 +       memcpy(rt + 1, tbl + 1, esize * used);
76879 +       rt->free_goal = free_goal == ~0u
76880 +                               ? cpu_to_le32(~0u)
76881 +                               : cpu_to_le32(sizeof(struct RESTART_TABLE) +
76882 +                                             free_goal * esize);
76884 +       if (tbl->first_free) {
76885 +               rt->first_free = tbl->first_free;
76886 +               *(__le32 *)Add2Ptr(rt, le32_to_cpu(tbl->last_free)) = osize;
76887 +       } else {
76888 +               rt->first_free = osize;
76889 +       }
76891 +       rt->total = tbl->total;
76893 +       ntfs_free(tbl);
76894 +       return rt;
76898 + * alloc_rsttbl_idx
76899 + *
76900 + * allocates an index from within a previously initialized Restart Table
76901 + */
76902 +static inline void *alloc_rsttbl_idx(struct RESTART_TABLE **tbl)
76904 +       u32 off;
76905 +       __le32 *e;
76906 +       struct RESTART_TABLE *t = *tbl;
76908 +       if (!t->first_free)
76909 +               *tbl = t = extend_rsttbl(t, 16, ~0u);
76911 +       off = le32_to_cpu(t->first_free);
76913 +       /* Dequeue this entry and zero it. */
76914 +       e = Add2Ptr(t, off);
76916 +       t->first_free = *e;
76918 +       memset(e, 0, le16_to_cpu(t->size));
76920 +       *e = RESTART_ENTRY_ALLOCATED_LE;
76922 +       /* If list is going empty, then we fix the last_free as well. */
76923 +       if (!t->first_free)
76924 +               t->last_free = 0;
76926 +       le16_add_cpu(&t->total, 1);
76928 +       return Add2Ptr(t, off);
76932 + * alloc_rsttbl_from_idx
76933 + *
76934 + * allocates a specific index from within a previously initialized Restart Table
76935 + */
76936 +static inline void *alloc_rsttbl_from_idx(struct RESTART_TABLE **tbl, u32 vbo)
76938 +       u32 off;
76939 +       __le32 *e;
76940 +       struct RESTART_TABLE *rt = *tbl;
76941 +       u32 bytes = bytes_per_rt(rt);
76942 +       u16 esize = le16_to_cpu(rt->size);
76944 +       /* If the entry is not the table, we will have to extend the table */
76945 +       if (vbo >= bytes) {
76946 +               /*
76947 +                * extend the size by computing the number of entries between
76948 +                * the existing size and the desired index and adding
76949 +                * 1 to that
76950 +                */
76951 +               u32 bytes2idx = vbo - bytes;
76953 +               /* There should always be an integral number of entries being added */
76954 +               /* Now extend the table */
76955 +               *tbl = rt = extend_rsttbl(rt, bytes2idx / esize + 1, bytes);
76956 +               if (!rt)
76957 +                       return NULL;
76958 +       }
76960 +       /* see if the entry is already allocated, and just return if it is. */
76961 +       e = Add2Ptr(rt, vbo);
76963 +       if (*e == RESTART_ENTRY_ALLOCATED_LE)
76964 +               return e;
76966 +       /*
76967 +        * Walk through the table, looking for the entry we're
76968 +        * interested and the previous entry
76969 +        */
76970 +       off = le32_to_cpu(rt->first_free);
76971 +       e = Add2Ptr(rt, off);
76973 +       if (off == vbo) {
76974 +               /* this is a match */
76975 +               rt->first_free = *e;
76976 +               goto skip_looking;
76977 +       }
76979 +       /*
76980 +        * need to walk through the list looking for the predecessor of our entry
76981 +        */
76982 +       for (;;) {
76983 +               /* Remember the entry just found */
76984 +               u32 last_off = off;
76985 +               __le32 *last_e = e;
76987 +               /* should never run of entries. */
76989 +               /* Lookup up the next entry the list */
76990 +               off = le32_to_cpu(*last_e);
76991 +               e = Add2Ptr(rt, off);
76993 +               /* If this is our match we are done */
76994 +               if (off == vbo) {
76995 +                       *last_e = *e;
76997 +                       /* If this was the last entry, we update that the table as well */
76998 +                       if (le32_to_cpu(rt->last_free) == off)
76999 +                               rt->last_free = cpu_to_le32(last_off);
77000 +                       break;
77001 +               }
77002 +       }
77004 +skip_looking:
77005 +       /* If the list is now empty, we fix the last_free as well */
77006 +       if (!rt->first_free)
77007 +               rt->last_free = 0;
77009 +       /* Zero this entry */
77010 +       memset(e, 0, esize);
77011 +       *e = RESTART_ENTRY_ALLOCATED_LE;
77013 +       le16_add_cpu(&rt->total, 1);
77015 +       return e;
77018 +#define RESTART_SINGLE_PAGE_IO cpu_to_le16(0x0001)
77020 +#define NTFSLOG_WRAPPED 0x00000001
77021 +#define NTFSLOG_MULTIPLE_PAGE_IO 0x00000002
77022 +#define NTFSLOG_NO_LAST_LSN 0x00000004
77023 +#define NTFSLOG_REUSE_TAIL 0x00000010
77024 +#define NTFSLOG_NO_OLDEST_LSN 0x00000020
77027 + * Helper struct to work with NTFS LogFile
77028 + */
77029 +struct ntfs_log {
77030 +       struct ntfs_inode *ni;
77032 +       u32 l_size;
77033 +       u32 sys_page_size;
77034 +       u32 sys_page_mask;
77035 +       u32 page_size;
77036 +       u32 page_mask; // page_size - 1
77037 +       u8 page_bits;
77038 +       struct RECORD_PAGE_HDR *one_page_buf;
77040 +       struct RESTART_TABLE *open_attr_tbl;
77041 +       u32 transaction_id;
77042 +       u32 clst_per_page;
77044 +       u32 first_page;
77045 +       u32 next_page;
77046 +       u32 ra_off;
77047 +       u32 data_off;
77048 +       u32 restart_size;
77049 +       u32 data_size;
77050 +       u16 record_header_len;
77051 +       u64 seq_num;
77052 +       u32 seq_num_bits;
77053 +       u32 file_data_bits;
77054 +       u32 seq_num_mask; /* (1 << file_data_bits) - 1 */
77056 +       struct RESTART_AREA *ra; /* in-memory image of the next restart area */
77057 +       u32 ra_size; /* the usable size of the restart area */
77059 +       /*
77060 +        * If true, then the in-memory restart area is to be written
77061 +        * to the first position on the disk
77062 +        */
77063 +       bool init_ra;
77064 +       bool set_dirty; /* true if we need to set dirty flag */
77066 +       u64 oldest_lsn;
77068 +       u32 oldest_lsn_off;
77069 +       u64 last_lsn;
77071 +       u32 total_avail;
77072 +       u32 total_avail_pages;
77073 +       u32 total_undo_commit;
77074 +       u32 max_current_avail;
77075 +       u32 current_avail;
77076 +       u32 reserved;
77078 +       short major_ver;
77079 +       short minor_ver;
77081 +       u32 l_flags; /* See NTFSLOG_XXX */
77082 +       u32 current_openlog_count; /* On-disk value for open_log_count */
77084 +       struct CLIENT_ID client_id;
77085 +       u32 client_undo_commit;
77088 +static inline u32 lsn_to_vbo(struct ntfs_log *log, const u64 lsn)
77090 +       u32 vbo = (lsn << log->seq_num_bits) >> (log->seq_num_bits - 3);
77092 +       return vbo;
77095 +/* compute the offset in the log file of the next log page */
77096 +static inline u32 next_page_off(struct ntfs_log *log, u32 off)
77098 +       off = (off & ~log->sys_page_mask) + log->page_size;
77099 +       return off >= log->l_size ? log->first_page : off;
77102 +static inline u32 lsn_to_page_off(struct ntfs_log *log, u64 lsn)
77104 +       return (((u32)lsn) << 3) & log->page_mask;
77107 +static inline u64 vbo_to_lsn(struct ntfs_log *log, u32 off, u64 Seq)
77109 +       return (off >> 3) + (Seq << log->file_data_bits);
77112 +static inline bool is_lsn_in_file(struct ntfs_log *log, u64 lsn)
77114 +       return lsn >= log->oldest_lsn &&
77115 +              lsn <= le64_to_cpu(log->ra->current_lsn);
77118 +static inline u32 hdr_file_off(struct ntfs_log *log,
77119 +                              struct RECORD_PAGE_HDR *hdr)
77121 +       if (log->major_ver < 2)
77122 +               return le64_to_cpu(hdr->rhdr.lsn);
77124 +       return le32_to_cpu(hdr->file_off);
77127 +static inline u64 base_lsn(struct ntfs_log *log,
77128 +                          const struct RECORD_PAGE_HDR *hdr, u64 lsn)
77130 +       u64 h_lsn = le64_to_cpu(hdr->rhdr.lsn);
77131 +       u64 ret = (((h_lsn >> log->file_data_bits) +
77132 +                   (lsn < (lsn_to_vbo(log, h_lsn) & ~log->page_mask) ? 1 : 0))
77133 +                  << log->file_data_bits) +
77134 +                 ((((is_log_record_end(hdr) &&
77135 +                     h_lsn <= le64_to_cpu(hdr->record_hdr.last_end_lsn))
77136 +                            ? le16_to_cpu(hdr->record_hdr.next_record_off)
77137 +                            : log->page_size) +
77138 +                   lsn) >>
77139 +                  3);
77141 +       return ret;
77144 +static inline bool verify_client_lsn(struct ntfs_log *log,
77145 +                                    const struct CLIENT_REC *client, u64 lsn)
77147 +       return lsn >= le64_to_cpu(client->oldest_lsn) &&
77148 +              lsn <= le64_to_cpu(log->ra->current_lsn) && lsn;
77151 +struct restart_info {
77152 +       u64 last_lsn;
77153 +       struct RESTART_HDR *r_page;
77154 +       u32 vbo;
77155 +       bool chkdsk_was_run;
77156 +       bool valid_page;
77157 +       bool initialized;
77158 +       bool restart;
77161 +static int read_log_page(struct ntfs_log *log, u32 vbo,
77162 +                        struct RECORD_PAGE_HDR **buffer, bool *usa_error)
77164 +       int err = 0;
77165 +       u32 page_idx = vbo >> log->page_bits;
77166 +       u32 page_off = vbo & log->page_mask;
77167 +       u32 bytes = log->page_size - page_off;
77168 +       void *to_free = NULL;
77169 +       u32 page_vbo = page_idx << log->page_bits;
77170 +       struct RECORD_PAGE_HDR *page_buf;
77171 +       struct ntfs_inode *ni = log->ni;
77172 +       bool bBAAD;
77174 +       if (vbo >= log->l_size)
77175 +               return -EINVAL;
77177 +       if (!*buffer) {
77178 +               to_free = ntfs_malloc(bytes);
77179 +               if (!to_free)
77180 +                       return -ENOMEM;
77181 +               *buffer = to_free;
77182 +       }
77184 +       page_buf = page_off ? log->one_page_buf : *buffer;
77186 +       err = ntfs_read_run_nb(ni->mi.sbi, &ni->file.run, page_vbo, page_buf,
77187 +                              log->page_size, NULL);
77188 +       if (err)
77189 +               goto out;
77191 +       if (page_buf->rhdr.sign != NTFS_FFFF_SIGNATURE)
77192 +               ntfs_fix_post_read(&page_buf->rhdr, PAGE_SIZE, false);
77194 +       if (page_buf != *buffer)
77195 +               memcpy(*buffer, Add2Ptr(page_buf, page_off), bytes);
77197 +       bBAAD = page_buf->rhdr.sign == NTFS_BAAD_SIGNATURE;
77199 +       if (usa_error)
77200 +               *usa_error = bBAAD;
77201 +       /* Check that the update sequence array for this page is valid */
77202 +       /* If we don't allow errors, raise an error status */
77203 +       else if (bBAAD)
77204 +               err = -EINVAL;
77206 +out:
77207 +       if (err && to_free) {
77208 +               ntfs_free(to_free);
77209 +               *buffer = NULL;
77210 +       }
77212 +       return err;
77216 + * log_read_rst
77217 + *
77218 + * it walks through 512 blocks of the file looking for a valid restart page header
77219 + * It will stop the first time we find a valid page header
77220 + */
77221 +static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
77222 +                       struct restart_info *info)
77224 +       u32 skip, vbo;
77225 +       struct RESTART_HDR *r_page = ntfs_malloc(DefaultLogPageSize);
77227 +       if (!r_page)
77228 +               return -ENOMEM;
77230 +       memset(info, 0, sizeof(struct restart_info));
77232 +       /* Determine which restart area we are looking for */
77233 +       if (first) {
77234 +               vbo = 0;
77235 +               skip = 512;
77236 +       } else {
77237 +               vbo = 512;
77238 +               skip = 0;
77239 +       }
77241 +       /* loop continuously until we succeed */
77242 +       for (; vbo < l_size; vbo = 2 * vbo + skip, skip = 0) {
77243 +               bool usa_error;
77244 +               u32 sys_page_size;
77245 +               bool brst, bchk;
77246 +               struct RESTART_AREA *ra;
77248 +               /* Read a page header at the current offset */
77249 +               if (read_log_page(log, vbo, (struct RECORD_PAGE_HDR **)&r_page,
77250 +                                 &usa_error)) {
77251 +                       /* ignore any errors */
77252 +                       continue;
77253 +               }
77255 +               /* exit if the signature is a log record page */
77256 +               if (r_page->rhdr.sign == NTFS_RCRD_SIGNATURE) {
77257 +                       info->initialized = true;
77258 +                       break;
77259 +               }
77261 +               brst = r_page->rhdr.sign == NTFS_RSTR_SIGNATURE;
77262 +               bchk = r_page->rhdr.sign == NTFS_CHKD_SIGNATURE;
77264 +               if (!bchk && !brst) {
77265 +                       if (r_page->rhdr.sign != NTFS_FFFF_SIGNATURE) {
77266 +                               /*
77267 +                                * Remember if the signature does not
77268 +                                * indicate uninitialized file
77269 +                                */
77270 +                               info->initialized = true;
77271 +                       }
77272 +                       continue;
77273 +               }
77275 +               ra = NULL;
77276 +               info->valid_page = false;
77277 +               info->initialized = true;
77278 +               info->vbo = vbo;
77280 +               /* Let's check the restart area if this is a valid page */
77281 +               if (!is_rst_page_hdr_valid(vbo, r_page))
77282 +                       goto check_result;
77283 +               ra = Add2Ptr(r_page, le16_to_cpu(r_page->ra_off));
77285 +               if (!is_rst_area_valid(r_page))
77286 +                       goto check_result;
77288 +               /*
77289 +                * We have a valid restart page header and restart area.
77290 +                * If chkdsk was run or we have no clients then we have
77291 +                * no more checking to do
77292 +                */
77293 +               if (bchk || ra->client_idx[1] == LFS_NO_CLIENT_LE) {
77294 +                       info->valid_page = true;
77295 +                       goto check_result;
77296 +               }
77298 +               /* Read the entire restart area */
77299 +               sys_page_size = le32_to_cpu(r_page->sys_page_size);
77300 +               if (DefaultLogPageSize != sys_page_size) {
77301 +                       ntfs_free(r_page);
77302 +                       r_page = ntfs_zalloc(sys_page_size);
77303 +                       if (!r_page)
77304 +                               return -ENOMEM;
77306 +                       if (read_log_page(log, vbo,
77307 +                                         (struct RECORD_PAGE_HDR **)&r_page,
77308 +                                         &usa_error)) {
77309 +                               /* ignore any errors */
77310 +                               ntfs_free(r_page);
77311 +                               r_page = NULL;
77312 +                               continue;
77313 +                       }
77314 +               }
77316 +               if (is_client_area_valid(r_page, usa_error)) {
77317 +                       info->valid_page = true;
77318 +                       ra = Add2Ptr(r_page, le16_to_cpu(r_page->ra_off));
77319 +               }
77321 +check_result:
77322 +               /* If chkdsk was run then update the caller's values and return */
77323 +               if (r_page->rhdr.sign == NTFS_CHKD_SIGNATURE) {
77324 +                       info->chkdsk_was_run = true;
77325 +                       info->last_lsn = le64_to_cpu(r_page->rhdr.lsn);
77326 +                       info->restart = true;
77327 +                       info->r_page = r_page;
77328 +                       return 0;
77329 +               }
77331 +               /* If we have a valid page then copy the values we need from it */
77332 +               if (info->valid_page) {
77333 +                       info->last_lsn = le64_to_cpu(ra->current_lsn);
77334 +                       info->restart = true;
77335 +                       info->r_page = r_page;
77336 +                       return 0;
77337 +               }
77338 +       }
77340 +       ntfs_free(r_page);
77342 +       return 0;
77346 + * log_init_pg_hdr
77347 + *
77348 + * init "log' from restart page header
77349 + */
77350 +static void log_init_pg_hdr(struct ntfs_log *log, u32 sys_page_size,
77351 +                           u32 page_size, u16 major_ver, u16 minor_ver)
77353 +       log->sys_page_size = sys_page_size;
77354 +       log->sys_page_mask = sys_page_size - 1;
77355 +       log->page_size = page_size;
77356 +       log->page_mask = page_size - 1;
77357 +       log->page_bits = blksize_bits(page_size);
77359 +       log->clst_per_page = log->page_size >> log->ni->mi.sbi->cluster_bits;
77360 +       if (!log->clst_per_page)
77361 +               log->clst_per_page = 1;
77363 +       log->first_page = major_ver >= 2
77364 +                                 ? 0x22 * page_size
77365 +                                 : ((sys_page_size << 1) + (page_size << 1));
77366 +       log->major_ver = major_ver;
77367 +       log->minor_ver = minor_ver;
77371 + * log_create
77372 + *
77373 + * init "log" in cases when we don't have a restart area to use
77374 + */
77375 +static void log_create(struct ntfs_log *log, u32 l_size, const u64 last_lsn,
77376 +                      u32 open_log_count, bool wrapped, bool use_multi_page)
77378 +       log->l_size = l_size;
77379 +       /* All file offsets must be quadword aligned */
77380 +       log->file_data_bits = blksize_bits(l_size) - 3;
77381 +       log->seq_num_mask = (8 << log->file_data_bits) - 1;
77382 +       log->seq_num_bits = sizeof(u64) * 8 - log->file_data_bits;
77383 +       log->seq_num = (last_lsn >> log->file_data_bits) + 2;
77384 +       log->next_page = log->first_page;
77385 +       log->oldest_lsn = log->seq_num << log->file_data_bits;
77386 +       log->oldest_lsn_off = 0;
77387 +       log->last_lsn = log->oldest_lsn;
77389 +       log->l_flags |= NTFSLOG_NO_LAST_LSN | NTFSLOG_NO_OLDEST_LSN;
77391 +       /* Set the correct flags for the I/O and indicate if we have wrapped */
77392 +       if (wrapped)
77393 +               log->l_flags |= NTFSLOG_WRAPPED;
77395 +       if (use_multi_page)
77396 +               log->l_flags |= NTFSLOG_MULTIPLE_PAGE_IO;
77398 +       /* Compute the log page values */
77399 +       log->data_off = QuadAlign(
77400 +               offsetof(struct RECORD_PAGE_HDR, fixups) +
77401 +               sizeof(short) * ((log->page_size >> SECTOR_SHIFT) + 1));
77402 +       log->data_size = log->page_size - log->data_off;
77403 +       log->record_header_len = sizeof(struct LFS_RECORD_HDR);
77405 +       /* Remember the different page sizes for reservation */
77406 +       log->reserved = log->data_size - log->record_header_len;
77408 +       /* Compute the restart page values. */
77409 +       log->ra_off = QuadAlign(
77410 +               offsetof(struct RESTART_HDR, fixups) +
77411 +               sizeof(short) * ((log->sys_page_size >> SECTOR_SHIFT) + 1));
77412 +       log->restart_size = log->sys_page_size - log->ra_off;
77413 +       log->ra_size = struct_size(log->ra, clients, 1);
77414 +       log->current_openlog_count = open_log_count;
77416 +       /*
77417 +        * The total available log file space is the number of
77418 +        * log file pages times the space available on each page
77419 +        */
77420 +       log->total_avail_pages = log->l_size - log->first_page;
77421 +       log->total_avail = log->total_avail_pages >> log->page_bits;
77423 +       /*
77424 +        * We assume that we can't use the end of the page less than
77425 +        * the file record size
77426 +        * Then we won't need to reserve more than the caller asks for
77427 +        */
77428 +       log->max_current_avail = log->total_avail * log->reserved;
77429 +       log->total_avail = log->total_avail * log->data_size;
77430 +       log->current_avail = log->max_current_avail;
77434 + * log_create_ra
77435 + *
77436 + * This routine is called to fill a restart area from the values stored in 'log'
77437 + */
77438 +static struct RESTART_AREA *log_create_ra(struct ntfs_log *log)
77440 +       struct CLIENT_REC *cr;
77441 +       struct RESTART_AREA *ra = ntfs_zalloc(log->restart_size);
77443 +       if (!ra)
77444 +               return NULL;
77446 +       ra->current_lsn = cpu_to_le64(log->last_lsn);
77447 +       ra->log_clients = cpu_to_le16(1);
77448 +       ra->client_idx[1] = LFS_NO_CLIENT_LE;
77449 +       if (log->l_flags & NTFSLOG_MULTIPLE_PAGE_IO)
77450 +               ra->flags = RESTART_SINGLE_PAGE_IO;
77451 +       ra->seq_num_bits = cpu_to_le32(log->seq_num_bits);
77452 +       ra->ra_len = cpu_to_le16(log->ra_size);
77453 +       ra->client_off = cpu_to_le16(offsetof(struct RESTART_AREA, clients));
77454 +       ra->l_size = cpu_to_le64(log->l_size);
77455 +       ra->rec_hdr_len = cpu_to_le16(log->record_header_len);
77456 +       ra->data_off = cpu_to_le16(log->data_off);
77457 +       ra->open_log_count = cpu_to_le32(log->current_openlog_count + 1);
77459 +       cr = ra->clients;
77461 +       cr->prev_client = LFS_NO_CLIENT_LE;
77462 +       cr->next_client = LFS_NO_CLIENT_LE;
77464 +       return ra;
77467 +static u32 final_log_off(struct ntfs_log *log, u64 lsn, u32 data_len)
77469 +       u32 base_vbo = lsn << 3;
77470 +       u32 final_log_off = (base_vbo & log->seq_num_mask) & ~log->page_mask;
77471 +       u32 page_off = base_vbo & log->page_mask;
77472 +       u32 tail = log->page_size - page_off;
77474 +       page_off -= 1;
77476 +       /* Add the length of the header */
77477 +       data_len += log->record_header_len;
77479 +       /*
77480 +        * If this lsn is contained this log page we are done
77481 +        * Otherwise we need to walk through several log pages
77482 +        */
77483 +       if (data_len > tail) {
77484 +               data_len -= tail;
77485 +               tail = log->data_size;
77486 +               page_off = log->data_off - 1;
77488 +               for (;;) {
77489 +                       final_log_off = next_page_off(log, final_log_off);
77491 +                       /* We are done if the remaining bytes fit on this page */
77492 +                       if (data_len <= tail)
77493 +                               break;
77494 +                       data_len -= tail;
77495 +               }
77496 +       }
77498 +       /*
77499 +        * We add the remaining bytes to our starting position on this page
77500 +        * and then add that value to the file offset of this log page
77501 +        */
77502 +       return final_log_off + data_len + page_off;
77505 +static int next_log_lsn(struct ntfs_log *log, const struct LFS_RECORD_HDR *rh,
77506 +                       u64 *lsn)
77508 +       int err;
77509 +       u64 this_lsn = le64_to_cpu(rh->this_lsn);
77510 +       u32 vbo = lsn_to_vbo(log, this_lsn);
77511 +       u32 end =
77512 +               final_log_off(log, this_lsn, le32_to_cpu(rh->client_data_len));
77513 +       u32 hdr_off = end & ~log->sys_page_mask;
77514 +       u64 seq = this_lsn >> log->file_data_bits;
77515 +       struct RECORD_PAGE_HDR *page = NULL;
77517 +       /* Remember if we wrapped */
77518 +       if (end <= vbo)
77519 +               seq += 1;
77521 +       /* log page header for this page */
77522 +       err = read_log_page(log, hdr_off, &page, NULL);
77523 +       if (err)
77524 +               return err;
77526 +       /*
77527 +        * If the lsn we were given was not the last lsn on this page,
77528 +        * then the starting offset for the next lsn is on a quad word
77529 +        * boundary following the last file offset for the current lsn
77530 +        * Otherwise the file offset is the start of the data on the next page
77531 +        */
77532 +       if (this_lsn == le64_to_cpu(page->rhdr.lsn)) {
77533 +               /* If we wrapped, we need to increment the sequence number */
77534 +               hdr_off = next_page_off(log, hdr_off);
77535 +               if (hdr_off == log->first_page)
77536 +                       seq += 1;
77538 +               vbo = hdr_off + log->data_off;
77539 +       } else {
77540 +               vbo = QuadAlign(end);
77541 +       }
77543 +       /* Compute the lsn based on the file offset and the sequence count */
77544 +       *lsn = vbo_to_lsn(log, vbo, seq);
77546 +       /*
77547 +        * If this lsn is within the legal range for the file, we return true
77548 +        * Otherwise false indicates that there are no more lsn's
77549 +        */
77550 +       if (!is_lsn_in_file(log, *lsn))
77551 +               *lsn = 0;
77553 +       ntfs_free(page);
77555 +       return 0;
77559 + * current_log_avail
77560 + *
77561 + * calculate the number of bytes available for log records
77562 + */
77563 +static u32 current_log_avail(struct ntfs_log *log)
77565 +       u32 oldest_off, next_free_off, free_bytes;
77567 +       if (log->l_flags & NTFSLOG_NO_LAST_LSN) {
77568 +               /* The entire file is available */
77569 +               return log->max_current_avail;
77570 +       }
77572 +       /*
77573 +        * If there is a last lsn the restart area then we know that we will
77574 +        * have to compute the free range
77575 +        * If there is no oldest lsn then start at the first page of the file
77576 +        */
77577 +       oldest_off = (log->l_flags & NTFSLOG_NO_OLDEST_LSN)
77578 +                            ? log->first_page
77579 +                            : (log->oldest_lsn_off & ~log->sys_page_mask);
77581 +       /*
77582 +        * We will use the next log page offset to compute the next free page\
77583 +        * If we are going to reuse this page go to the next page
77584 +        * If we are at the first page then use the end of the file
77585 +        */
77586 +       next_free_off = (log->l_flags & NTFSLOG_REUSE_TAIL)
77587 +                               ? log->next_page + log->page_size
77588 +                       : log->next_page == log->first_page ? log->l_size
77589 +                                                           : log->next_page;
77591 +       /* If the two offsets are the same then there is no available space */
77592 +       if (oldest_off == next_free_off)
77593 +               return 0;
77594 +       /*
77595 +        * If the free offset follows the oldest offset then subtract
77596 +        * this range from the total available pages
77597 +        */
77598 +       free_bytes =
77599 +               oldest_off < next_free_off
77600 +                       ? log->total_avail_pages - (next_free_off - oldest_off)
77601 +                       : oldest_off - next_free_off;
77603 +       free_bytes >>= log->page_bits;
77604 +       return free_bytes * log->reserved;
77607 +static bool check_subseq_log_page(struct ntfs_log *log,
77608 +                                 const struct RECORD_PAGE_HDR *rp, u32 vbo,
77609 +                                 u64 seq)
77611 +       u64 lsn_seq;
77612 +       const struct NTFS_RECORD_HEADER *rhdr = &rp->rhdr;
77613 +       u64 lsn = le64_to_cpu(rhdr->lsn);
77615 +       if (rhdr->sign == NTFS_FFFF_SIGNATURE || !rhdr->sign)
77616 +               return false;
77618 +       /*
77619 +        * If the last lsn on the page occurs was written after the page
77620 +        * that caused the original error then we have a fatal error
77621 +        */
77622 +       lsn_seq = lsn >> log->file_data_bits;
77624 +       /*
77625 +        * If the sequence number for the lsn the page is equal or greater
77626 +        * than lsn we expect, then this is a subsequent write
77627 +        */
77628 +       return lsn_seq >= seq ||
77629 +              (lsn_seq == seq - 1 && log->first_page == vbo &&
77630 +               vbo != (lsn_to_vbo(log, lsn) & ~log->page_mask));
77634 + * last_log_lsn
77635 + *
77636 + * This routine walks through the log pages for a file, searching for the
77637 + * last log page written to the file
77638 + */
77639 +static int last_log_lsn(struct ntfs_log *log)
77641 +       int err;
77642 +       bool usa_error = false;
77643 +       bool replace_page = false;
77644 +       bool reuse_page = log->l_flags & NTFSLOG_REUSE_TAIL;
77645 +       bool wrapped_file, wrapped;
77647 +       u32 page_cnt = 1, page_pos = 1;
77648 +       u32 page_off = 0, page_off1 = 0, saved_off = 0;
77649 +       u32 final_off, second_off, final_off_prev = 0, second_off_prev = 0;
77650 +       u32 first_file_off = 0, second_file_off = 0;
77651 +       u32 part_io_count = 0;
77652 +       u32 tails = 0;
77653 +       u32 this_off, curpage_off, nextpage_off, remain_pages;
77655 +       u64 expected_seq, seq_base = 0, lsn_base = 0;
77656 +       u64 best_lsn, best_lsn1, best_lsn2;
77657 +       u64 lsn_cur, lsn1, lsn2;
77658 +       u64 last_ok_lsn = reuse_page ? log->last_lsn : 0;
77660 +       u16 cur_pos, best_page_pos;
77662 +       struct RECORD_PAGE_HDR *page = NULL;
77663 +       struct RECORD_PAGE_HDR *tst_page = NULL;
77664 +       struct RECORD_PAGE_HDR *first_tail = NULL;
77665 +       struct RECORD_PAGE_HDR *second_tail = NULL;
77666 +       struct RECORD_PAGE_HDR *tail_page = NULL;
77667 +       struct RECORD_PAGE_HDR *second_tail_prev = NULL;
77668 +       struct RECORD_PAGE_HDR *first_tail_prev = NULL;
77669 +       struct RECORD_PAGE_HDR *page_bufs = NULL;
77670 +       struct RECORD_PAGE_HDR *best_page;
77672 +       if (log->major_ver >= 2) {
77673 +               final_off = 0x02 * log->page_size;
77674 +               second_off = 0x12 * log->page_size;
77676 +               // 0x10 == 0x12 - 0x2
77677 +               page_bufs = ntfs_malloc(log->page_size * 0x10);
77678 +               if (!page_bufs)
77679 +                       return -ENOMEM;
77680 +       } else {
77681 +               second_off = log->first_page - log->page_size;
77682 +               final_off = second_off - log->page_size;
77683 +       }
77685 +next_tail:
77686 +       /* Read second tail page (at pos 3/0x12000) */
77687 +       if (read_log_page(log, second_off, &second_tail, &usa_error) ||
77688 +           usa_error || second_tail->rhdr.sign != NTFS_RCRD_SIGNATURE) {
77689 +               ntfs_free(second_tail);
77690 +               second_tail = NULL;
77691 +               second_file_off = 0;
77692 +               lsn2 = 0;
77693 +       } else {
77694 +               second_file_off = hdr_file_off(log, second_tail);
77695 +               lsn2 = le64_to_cpu(second_tail->record_hdr.last_end_lsn);
77696 +       }
77698 +       /* Read first tail page (at pos 2/0x2000 ) */
77699 +       if (read_log_page(log, final_off, &first_tail, &usa_error) ||
77700 +           usa_error || first_tail->rhdr.sign != NTFS_RCRD_SIGNATURE) {
77701 +               ntfs_free(first_tail);
77702 +               first_tail = NULL;
77703 +               first_file_off = 0;
77704 +               lsn1 = 0;
77705 +       } else {
77706 +               first_file_off = hdr_file_off(log, first_tail);
77707 +               lsn1 = le64_to_cpu(first_tail->record_hdr.last_end_lsn);
77708 +       }
77710 +       if (log->major_ver < 2) {
77711 +               int best_page;
77713 +               first_tail_prev = first_tail;
77714 +               final_off_prev = first_file_off;
77715 +               second_tail_prev = second_tail;
77716 +               second_off_prev = second_file_off;
77717 +               tails = 1;
77719 +               if (!first_tail && !second_tail)
77720 +                       goto tail_read;
77722 +               if (first_tail && second_tail)
77723 +                       best_page = lsn1 < lsn2 ? 1 : 0;
77724 +               else if (first_tail)
77725 +                       best_page = 0;
77726 +               else
77727 +                       best_page = 1;
77729 +               page_off = best_page ? second_file_off : first_file_off;
77730 +               seq_base = (best_page ? lsn2 : lsn1) >> log->file_data_bits;
77731 +               goto tail_read;
77732 +       }
77734 +       best_lsn1 = first_tail ? base_lsn(log, first_tail, first_file_off) : 0;
77735 +       best_lsn2 =
77736 +               second_tail ? base_lsn(log, second_tail, second_file_off) : 0;
77738 +       if (first_tail && second_tail) {
77739 +               if (best_lsn1 > best_lsn2) {
77740 +                       best_lsn = best_lsn1;
77741 +                       best_page = first_tail;
77742 +                       this_off = first_file_off;
77743 +               } else {
77744 +                       best_lsn = best_lsn2;
77745 +                       best_page = second_tail;
77746 +                       this_off = second_file_off;
77747 +               }
77748 +       } else if (first_tail) {
77749 +               best_lsn = best_lsn1;
77750 +               best_page = first_tail;
77751 +               this_off = first_file_off;
77752 +       } else if (second_tail) {
77753 +               best_lsn = best_lsn2;
77754 +               best_page = second_tail;
77755 +               this_off = second_file_off;
77756 +       } else {
77757 +               goto tail_read;
77758 +       }
77760 +       best_page_pos = le16_to_cpu(best_page->page_pos);
77762 +       if (!tails) {
77763 +               if (best_page_pos == page_pos) {
77764 +                       seq_base = best_lsn >> log->file_data_bits;
77765 +                       saved_off = page_off = le32_to_cpu(best_page->file_off);
77766 +                       lsn_base = best_lsn;
77768 +                       memmove(page_bufs, best_page, log->page_size);
77770 +                       page_cnt = le16_to_cpu(best_page->page_count);
77771 +                       if (page_cnt > 1)
77772 +                               page_pos += 1;
77774 +                       tails = 1;
77775 +               }
77776 +       } else if (seq_base == (best_lsn >> log->file_data_bits) &&
77777 +                  saved_off + log->page_size == this_off &&
77778 +                  lsn_base < best_lsn &&
77779 +                  (page_pos != page_cnt || best_page_pos == page_pos ||
77780 +                   best_page_pos == 1) &&
77781 +                  (page_pos >= page_cnt || best_page_pos == page_pos)) {
77782 +               u16 bppc = le16_to_cpu(best_page->page_count);
77784 +               saved_off += log->page_size;
77785 +               lsn_base = best_lsn;
77787 +               memmove(Add2Ptr(page_bufs, tails * log->page_size), best_page,
77788 +                       log->page_size);
77790 +               tails += 1;
77792 +               if (best_page_pos != bppc) {
77793 +                       page_cnt = bppc;
77794 +                       page_pos = best_page_pos;
77796 +                       if (page_cnt > 1)
77797 +                               page_pos += 1;
77798 +               } else {
77799 +                       page_pos = page_cnt = 1;
77800 +               }
77801 +       } else {
77802 +               ntfs_free(first_tail);
77803 +               ntfs_free(second_tail);
77804 +               goto tail_read;
77805 +       }
77807 +       ntfs_free(first_tail_prev);
77808 +       first_tail_prev = first_tail;
77809 +       final_off_prev = first_file_off;
77810 +       first_tail = NULL;
77812 +       ntfs_free(second_tail_prev);
77813 +       second_tail_prev = second_tail;
77814 +       second_off_prev = second_file_off;
77815 +       second_tail = NULL;
77817 +       final_off += log->page_size;
77818 +       second_off += log->page_size;
77820 +       if (tails < 0x10)
77821 +               goto next_tail;
77822 +tail_read:
77823 +       first_tail = first_tail_prev;
77824 +       final_off = final_off_prev;
77826 +       second_tail = second_tail_prev;
77827 +       second_off = second_off_prev;
77829 +       page_cnt = page_pos = 1;
77831 +       curpage_off = seq_base == log->seq_num ? min(log->next_page, page_off)
77832 +                                              : log->next_page;
77834 +       wrapped_file =
77835 +               curpage_off == log->first_page &&
77836 +               !(log->l_flags & (NTFSLOG_NO_LAST_LSN | NTFSLOG_REUSE_TAIL));
77838 +       expected_seq = wrapped_file ? (log->seq_num + 1) : log->seq_num;
77840 +       nextpage_off = curpage_off;
77842 +next_page:
77843 +       tail_page = NULL;
77844 +       /* Read the next log page */
77845 +       err = read_log_page(log, curpage_off, &page, &usa_error);
77847 +       /* Compute the next log page offset the file */
77848 +       nextpage_off = next_page_off(log, curpage_off);
77849 +       wrapped = nextpage_off == log->first_page;
77851 +       if (tails > 1) {
77852 +               struct RECORD_PAGE_HDR *cur_page =
77853 +                       Add2Ptr(page_bufs, curpage_off - page_off);
77855 +               if (curpage_off == saved_off) {
77856 +                       tail_page = cur_page;
77857 +                       goto use_tail_page;
77858 +               }
77860 +               if (page_off > curpage_off || curpage_off >= saved_off)
77861 +                       goto use_tail_page;
77863 +               if (page_off1)
77864 +                       goto use_cur_page;
77866 +               if (!err && !usa_error &&
77867 +                   page->rhdr.sign == NTFS_RCRD_SIGNATURE &&
77868 +                   cur_page->rhdr.lsn == page->rhdr.lsn &&
77869 +                   cur_page->record_hdr.next_record_off ==
77870 +                           page->record_hdr.next_record_off &&
77871 +                   ((page_pos == page_cnt &&
77872 +                     le16_to_cpu(page->page_pos) == 1) ||
77873 +                    (page_pos != page_cnt &&
77874 +                     le16_to_cpu(page->page_pos) == page_pos + 1 &&
77875 +                     le16_to_cpu(page->page_count) == page_cnt))) {
77876 +                       cur_page = NULL;
77877 +                       goto use_tail_page;
77878 +               }
77880 +               page_off1 = page_off;
77882 +use_cur_page:
77884 +               lsn_cur = le64_to_cpu(cur_page->rhdr.lsn);
77886 +               if (last_ok_lsn !=
77887 +                           le64_to_cpu(cur_page->record_hdr.last_end_lsn) &&
77888 +                   ((lsn_cur >> log->file_data_bits) +
77889 +                    ((curpage_off <
77890 +                      (lsn_to_vbo(log, lsn_cur) & ~log->page_mask))
77891 +                             ? 1
77892 +                             : 0)) != expected_seq) {
77893 +                       goto check_tail;
77894 +               }
77896 +               if (!is_log_record_end(cur_page)) {
77897 +                       tail_page = NULL;
77898 +                       last_ok_lsn = lsn_cur;
77899 +                       goto next_page_1;
77900 +               }
77902 +               log->seq_num = expected_seq;
77903 +               log->l_flags &= ~NTFSLOG_NO_LAST_LSN;
77904 +               log->last_lsn = le64_to_cpu(cur_page->record_hdr.last_end_lsn);
77905 +               log->ra->current_lsn = cur_page->record_hdr.last_end_lsn;
77907 +               if (log->record_header_len <=
77908 +                   log->page_size -
77909 +                           le16_to_cpu(cur_page->record_hdr.next_record_off)) {
77910 +                       log->l_flags |= NTFSLOG_REUSE_TAIL;
77911 +                       log->next_page = curpage_off;
77912 +               } else {
77913 +                       log->l_flags &= ~NTFSLOG_REUSE_TAIL;
77914 +                       log->next_page = nextpage_off;
77915 +               }
77917 +               if (wrapped_file)
77918 +                       log->l_flags |= NTFSLOG_WRAPPED;
77920 +               last_ok_lsn = le64_to_cpu(cur_page->record_hdr.last_end_lsn);
77921 +               goto next_page_1;
77922 +       }
77924 +       /*
77925 +        * If we are at the expected first page of a transfer check to see
77926 +        * if either tail copy is at this offset
77927 +        * If this page is the last page of a transfer, check if we wrote
77928 +        * a subsequent tail copy
77929 +        */
77930 +       if (page_cnt == page_pos || page_cnt == page_pos + 1) {
77931 +               /*
77932 +                * Check if the offset matches either the first or second
77933 +                * tail copy. It is possible it will match both
77934 +                */
77935 +               if (curpage_off == final_off)
77936 +                       tail_page = first_tail;
77938 +               /*
77939 +                * If we already matched on the first page then
77940 +                * check the ending lsn's.
77941 +                */
77942 +               if (curpage_off == second_off) {
77943 +                       if (!tail_page ||
77944 +                           (second_tail &&
77945 +                            le64_to_cpu(second_tail->record_hdr.last_end_lsn) >
77946 +                                    le64_to_cpu(first_tail->record_hdr
77947 +                                                        .last_end_lsn))) {
77948 +                               tail_page = second_tail;
77949 +                       }
77950 +               }
77951 +       }
77953 +use_tail_page:
77954 +       if (tail_page) {
77955 +               /* we have a candidate for a tail copy */
77956 +               lsn_cur = le64_to_cpu(tail_page->record_hdr.last_end_lsn);
77958 +               if (last_ok_lsn < lsn_cur) {
77959 +                       /*
77960 +                        * If the sequence number is not expected,
77961 +                        * then don't use the tail copy
77962 +                        */
77963 +                       if (expected_seq != (lsn_cur >> log->file_data_bits))
77964 +                               tail_page = NULL;
77965 +               } else if (last_ok_lsn > lsn_cur) {
77966 +                       /*
77967 +                        * If the last lsn is greater than the one on
77968 +                        * this page then forget this tail
77969 +                        */
77970 +                       tail_page = NULL;
77971 +               }
77972 +       }
77974 +       /* If we have an error on the current page, we will break of this loop */
77975 +       if (err || usa_error)
77976 +               goto check_tail;
77978 +       /*
77979 +        * Done if the last lsn on this page doesn't match the previous known
77980 +        * last lsn or the sequence number is not expected
77981 +        */
77982 +       lsn_cur = le64_to_cpu(page->rhdr.lsn);
77983 +       if (last_ok_lsn != lsn_cur &&
77984 +           expected_seq != (lsn_cur >> log->file_data_bits)) {
77985 +               goto check_tail;
77986 +       }
77988 +       /*
77989 +        * Check that the page position and page count values are correct
77990 +        * If this is the first page of a transfer the position must be 1
77991 +        * and the count will be unknown
77992 +        */
77993 +       if (page_cnt == page_pos) {
77994 +               if (page->page_pos != cpu_to_le16(1) &&
77995 +                   (!reuse_page || page->page_pos != page->page_count)) {
77996 +                       /*
77997 +                        * If the current page is the first page we are
77998 +                        * looking at and we are reusing this page then
77999 +                        * it can be either the first or last page of a
78000 +                        * transfer. Otherwise it can only be the first.
78001 +                        */
78002 +                       goto check_tail;
78003 +               }
78004 +       } else if (le16_to_cpu(page->page_count) != page_cnt ||
78005 +                  le16_to_cpu(page->page_pos) != page_pos + 1) {
78006 +               /*
78007 +                * The page position better be 1 more than the last page
78008 +                * position and the page count better match
78009 +                */
78010 +               goto check_tail;
78011 +       }
78013 +       /*
78014 +        * We have a valid page the file and may have a valid page
78015 +        * the tail copy area
78016 +        * If the tail page was written after the page the file then
78017 +        * break of the loop
78018 +        */
78019 +       if (tail_page &&
78020 +           le64_to_cpu(tail_page->record_hdr.last_end_lsn) > lsn_cur) {
78021 +               /* Remember if we will replace the page */
78022 +               replace_page = true;
78023 +               goto check_tail;
78024 +       }
78026 +       tail_page = NULL;
78028 +       if (is_log_record_end(page)) {
78029 +               /*
78030 +                * Since we have read this page we know the sequence number
78031 +                * is the same as our expected value
78032 +                */
78033 +               log->seq_num = expected_seq;
78034 +               log->last_lsn = le64_to_cpu(page->record_hdr.last_end_lsn);
78035 +               log->ra->current_lsn = page->record_hdr.last_end_lsn;
78036 +               log->l_flags &= ~NTFSLOG_NO_LAST_LSN;
78038 +               /*
78039 +                * If there is room on this page for another header then
78040 +                * remember we want to reuse the page
78041 +                */
78042 +               if (log->record_header_len <=
78043 +                   log->page_size -
78044 +                           le16_to_cpu(page->record_hdr.next_record_off)) {
78045 +                       log->l_flags |= NTFSLOG_REUSE_TAIL;
78046 +                       log->next_page = curpage_off;
78047 +               } else {
78048 +                       log->l_flags &= ~NTFSLOG_REUSE_TAIL;
78049 +                       log->next_page = nextpage_off;
78050 +               }
78052 +               /* Remember if we wrapped the log file */
78053 +               if (wrapped_file)
78054 +                       log->l_flags |= NTFSLOG_WRAPPED;
78055 +       }
78057 +       /*
78058 +        * Remember the last page count and position.
78059 +        * Also remember the last known lsn
78060 +        */
78061 +       page_cnt = le16_to_cpu(page->page_count);
78062 +       page_pos = le16_to_cpu(page->page_pos);
78063 +       last_ok_lsn = le64_to_cpu(page->rhdr.lsn);
78065 +next_page_1:
78067 +       if (wrapped) {
78068 +               expected_seq += 1;
78069 +               wrapped_file = 1;
78070 +       }
78072 +       curpage_off = nextpage_off;
78073 +       ntfs_free(page);
78074 +       page = NULL;
78075 +       reuse_page = 0;
78076 +       goto next_page;
78078 +check_tail:
78079 +       if (tail_page) {
78080 +               log->seq_num = expected_seq;
78081 +               log->last_lsn = le64_to_cpu(tail_page->record_hdr.last_end_lsn);
78082 +               log->ra->current_lsn = tail_page->record_hdr.last_end_lsn;
78083 +               log->l_flags &= ~NTFSLOG_NO_LAST_LSN;
78085 +               if (log->page_size -
78086 +                           le16_to_cpu(
78087 +                                   tail_page->record_hdr.next_record_off) >=
78088 +                   log->record_header_len) {
78089 +                       log->l_flags |= NTFSLOG_REUSE_TAIL;
78090 +                       log->next_page = curpage_off;
78091 +               } else {
78092 +                       log->l_flags &= ~NTFSLOG_REUSE_TAIL;
78093 +                       log->next_page = nextpage_off;
78094 +               }
78096 +               if (wrapped)
78097 +                       log->l_flags |= NTFSLOG_WRAPPED;
78098 +       }
78100 +       /* Remember that the partial IO will start at the next page */
78101 +       second_off = nextpage_off;
78103 +       /*
78104 +        * If the next page is the first page of the file then update
78105 +        * the sequence number for log records which begon the next page
78106 +        */
78107 +       if (wrapped)
78108 +               expected_seq += 1;
78110 +       /*
78111 +        * If we have a tail copy or are performing single page I/O we can
78112 +        * immediately look at the next page
78113 +        */
78114 +       if (replace_page || (log->ra->flags & RESTART_SINGLE_PAGE_IO)) {
78115 +               page_cnt = 2;
78116 +               page_pos = 1;
78117 +               goto check_valid;
78118 +       }
78120 +       if (page_pos != page_cnt)
78121 +               goto check_valid;
78122 +       /*
78123 +        * If the next page causes us to wrap to the beginning of the log
78124 +        * file then we know which page to check next.
78125 +        */
78126 +       if (wrapped) {
78127 +               page_cnt = 2;
78128 +               page_pos = 1;
78129 +               goto check_valid;
78130 +       }
78132 +       cur_pos = 2;
78134 +next_test_page:
78135 +       ntfs_free(tst_page);
78136 +       tst_page = NULL;
78138 +       /* Walk through the file, reading log pages */
78139 +       err = read_log_page(log, nextpage_off, &tst_page, &usa_error);
78141 +       /*
78142 +        * If we get a USA error then assume that we correctly found
78143 +        * the end of the original transfer
78144 +        */
78145 +       if (usa_error)
78146 +               goto file_is_valid;
78148 +       /*
78149 +        * If we were able to read the page, we examine it to see if it
78150 +        * is the same or different Io block
78151 +        */
78152 +       if (err)
78153 +               goto next_test_page_1;
78155 +       if (le16_to_cpu(tst_page->page_pos) == cur_pos &&
78156 +           check_subseq_log_page(log, tst_page, nextpage_off, expected_seq)) {
78157 +               page_cnt = le16_to_cpu(tst_page->page_count) + 1;
78158 +               page_pos = le16_to_cpu(tst_page->page_pos);
78159 +               goto check_valid;
78160 +       } else {
78161 +               goto file_is_valid;
78162 +       }
78164 +next_test_page_1:
78166 +       nextpage_off = next_page_off(log, curpage_off);
78167 +       wrapped = nextpage_off == log->first_page;
78169 +       if (wrapped) {
78170 +               expected_seq += 1;
78171 +               page_cnt = 2;
78172 +               page_pos = 1;
78173 +       }
78175 +       cur_pos += 1;
78176 +       part_io_count += 1;
78177 +       if (!wrapped)
78178 +               goto next_test_page;
78180 +check_valid:
78181 +       /* Skip over the remaining pages this transfer */
78182 +       remain_pages = page_cnt - page_pos - 1;
78183 +       part_io_count += remain_pages;
78185 +       while (remain_pages--) {
78186 +               nextpage_off = next_page_off(log, curpage_off);
78187 +               wrapped = nextpage_off == log->first_page;
78189 +               if (wrapped)
78190 +                       expected_seq += 1;
78191 +       }
78193 +       /* Call our routine to check this log page */
78194 +       ntfs_free(tst_page);
78195 +       tst_page = NULL;
78197 +       err = read_log_page(log, nextpage_off, &tst_page, &usa_error);
78198 +       if (!err && !usa_error &&
78199 +           check_subseq_log_page(log, tst_page, nextpage_off, expected_seq)) {
78200 +               err = -EINVAL;
78201 +               goto out;
78202 +       }
78204 +file_is_valid:
78206 +       /* We have a valid file */
78207 +       if (page_off1 || tail_page) {
78208 +               struct RECORD_PAGE_HDR *tmp_page;
78210 +               if (sb_rdonly(log->ni->mi.sbi->sb)) {
78211 +                       err = -EROFS;
78212 +                       goto out;
78213 +               }
78215 +               if (page_off1) {
78216 +                       tmp_page = Add2Ptr(page_bufs, page_off1 - page_off);
78217 +                       tails -= (page_off1 - page_off) / log->page_size;
78218 +                       if (!tail_page)
78219 +                               tails -= 1;
78220 +               } else {
78221 +                       tmp_page = tail_page;
78222 +                       tails = 1;
78223 +               }
78225 +               while (tails--) {
78226 +                       u64 off = hdr_file_off(log, tmp_page);
78228 +                       if (!page) {
78229 +                               page = ntfs_malloc(log->page_size);
78230 +                               if (!page)
78231 +                                       return -ENOMEM;
78232 +                       }
78234 +                       /*
78235 +                        * Correct page and copy the data from this page
78236 +                        * into it and flush it to disk
78237 +                        */
78238 +                       memcpy(page, tmp_page, log->page_size);
78240 +                       /* Fill last flushed lsn value flush the page */
78241 +                       if (log->major_ver < 2)
78242 +                               page->rhdr.lsn = page->record_hdr.last_end_lsn;
78243 +                       else
78244 +                               page->file_off = 0;
78246 +                       page->page_pos = page->page_count = cpu_to_le16(1);
78248 +                       ntfs_fix_pre_write(&page->rhdr, log->page_size);
78250 +                       err = ntfs_sb_write_run(log->ni->mi.sbi,
78251 +                                               &log->ni->file.run, off, page,
78252 +                                               log->page_size);
78254 +                       if (err)
78255 +                               goto out;
78257 +                       if (part_io_count && second_off == off) {
78258 +                               second_off += log->page_size;
78259 +                               part_io_count -= 1;
78260 +                       }
78262 +                       tmp_page = Add2Ptr(tmp_page, log->page_size);
78263 +               }
78264 +       }
78266 +       if (part_io_count) {
78267 +               if (sb_rdonly(log->ni->mi.sbi->sb)) {
78268 +                       err = -EROFS;
78269 +                       goto out;
78270 +               }
78271 +       }
78273 +out:
78274 +       ntfs_free(second_tail);
78275 +       ntfs_free(first_tail);
78276 +       ntfs_free(page);
78277 +       ntfs_free(tst_page);
78278 +       ntfs_free(page_bufs);
78280 +       return err;
78284 + * read_log_rec_buf
78285 + *
78286 + * copies a log record from the file to a buffer
78287 + * The log record may span several log pages and may even wrap the file
78288 + */
78289 +static int read_log_rec_buf(struct ntfs_log *log,
78290 +                           const struct LFS_RECORD_HDR *rh, void *buffer)
78292 +       int err;
78293 +       struct RECORD_PAGE_HDR *ph = NULL;
78294 +       u64 lsn = le64_to_cpu(rh->this_lsn);
78295 +       u32 vbo = lsn_to_vbo(log, lsn) & ~log->page_mask;
78296 +       u32 off = lsn_to_page_off(log, lsn) + log->record_header_len;
78297 +       u32 data_len = le32_to_cpu(rh->client_data_len);
78299 +       /*
78300 +        * While there are more bytes to transfer,
78301 +        * we continue to attempt to perform the read
78302 +        */
78303 +       for (;;) {
78304 +               bool usa_error;
78305 +               u32 tail = log->page_size - off;
78307 +               if (tail >= data_len)
78308 +                       tail = data_len;
78310 +               data_len -= tail;
78312 +               err = read_log_page(log, vbo, &ph, &usa_error);
78313 +               if (err)
78314 +                       goto out;
78316 +               /*
78317 +                * The last lsn on this page better be greater or equal
78318 +                * to the lsn we are copying
78319 +                */
78320 +               if (lsn > le64_to_cpu(ph->rhdr.lsn)) {
78321 +                       err = -EINVAL;
78322 +                       goto out;
78323 +               }
78325 +               memcpy(buffer, Add2Ptr(ph, off), tail);
78327 +               /* If there are no more bytes to transfer, we exit the loop */
78328 +               if (!data_len) {
78329 +                       if (!is_log_record_end(ph) ||
78330 +                           lsn > le64_to_cpu(ph->record_hdr.last_end_lsn)) {
78331 +                               err = -EINVAL;
78332 +                               goto out;
78333 +                       }
78334 +                       break;
78335 +               }
78337 +               if (ph->rhdr.lsn == ph->record_hdr.last_end_lsn ||
78338 +                   lsn > le64_to_cpu(ph->rhdr.lsn)) {
78339 +                       err = -EINVAL;
78340 +                       goto out;
78341 +               }
78343 +               vbo = next_page_off(log, vbo);
78344 +               off = log->data_off;
78346 +               /*
78347 +                * adjust our pointer the user's buffer to transfer
78348 +                * the next block to
78349 +                */
78350 +               buffer = Add2Ptr(buffer, tail);
78351 +       }
78353 +out:
78354 +       ntfs_free(ph);
78355 +       return err;
78358 +static int read_rst_area(struct ntfs_log *log, struct NTFS_RESTART **rst_,
78359 +                        u64 *lsn)
78361 +       int err;
78362 +       struct LFS_RECORD_HDR *rh = NULL;
78363 +       const struct CLIENT_REC *cr =
78364 +               Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off));
78365 +       u64 lsnr, lsnc = le64_to_cpu(cr->restart_lsn);
78366 +       u32 len;
78367 +       struct NTFS_RESTART *rst;
78369 +       *lsn = 0;
78370 +       *rst_ = NULL;
78372 +       /* If the client doesn't have a restart area, go ahead and exit now */
78373 +       if (!lsnc)
78374 +               return 0;
78376 +       err = read_log_page(log, lsn_to_vbo(log, lsnc),
78377 +                           (struct RECORD_PAGE_HDR **)&rh, NULL);
78378 +       if (err)
78379 +               return err;
78381 +       rst = NULL;
78382 +       lsnr = le64_to_cpu(rh->this_lsn);
78384 +       if (lsnc != lsnr) {
78385 +               /* If the lsn values don't match, then the disk is corrupt */
78386 +               err = -EINVAL;
78387 +               goto out;
78388 +       }
78390 +       *lsn = lsnr;
78391 +       len = le32_to_cpu(rh->client_data_len);
78393 +       if (!len) {
78394 +               err = 0;
78395 +               goto out;
78396 +       }
78398 +       if (len < sizeof(struct NTFS_RESTART)) {
78399 +               err = -EINVAL;
78400 +               goto out;
78401 +       }
78403 +       rst = ntfs_malloc(len);
78404 +       if (!rst) {
78405 +               err = -ENOMEM;
78406 +               goto out;
78407 +       }
78409 +       /* Copy the data into the 'rst' buffer */
78410 +       err = read_log_rec_buf(log, rh, rst);
78411 +       if (err)
78412 +               goto out;
78414 +       *rst_ = rst;
78415 +       rst = NULL;
78417 +out:
78418 +       ntfs_free(rh);
78419 +       ntfs_free(rst);
78421 +       return err;
78424 +static int find_log_rec(struct ntfs_log *log, u64 lsn, struct lcb *lcb)
78426 +       int err;
78427 +       struct LFS_RECORD_HDR *rh = lcb->lrh;
78428 +       u32 rec_len, len;
78430 +       /* Read the record header for this lsn */
78431 +       if (!rh) {
78432 +               err = read_log_page(log, lsn_to_vbo(log, lsn),
78433 +                                   (struct RECORD_PAGE_HDR **)&rh, NULL);
78435 +               lcb->lrh = rh;
78436 +               if (err)
78437 +                       return err;
78438 +       }
78440 +       /*
78441 +        * If the lsn the log record doesn't match the desired
78442 +        * lsn then the disk is corrupt
78443 +        */
78444 +       if (lsn != le64_to_cpu(rh->this_lsn))
78445 +               return -EINVAL;
78447 +       len = le32_to_cpu(rh->client_data_len);
78449 +       /*
78450 +        * check that the length field isn't greater than the total
78451 +        * available space the log file
78452 +        */
78453 +       rec_len = len + log->record_header_len;
78454 +       if (rec_len >= log->total_avail)
78455 +               return -EINVAL;
78457 +       /*
78458 +        * If the entire log record is on this log page,
78459 +        * put a pointer to the log record the context block
78460 +        */
78461 +       if (rh->flags & LOG_RECORD_MULTI_PAGE) {
78462 +               void *lr = ntfs_malloc(len);
78464 +               if (!lr)
78465 +                       return -ENOMEM;
78467 +               lcb->log_rec = lr;
78468 +               lcb->alloc = true;
78470 +               /* Copy the data into the buffer returned */
78471 +               err = read_log_rec_buf(log, rh, lr);
78472 +               if (err)
78473 +                       return err;
78474 +       } else {
78475 +               /* If beyond the end of the current page -> an error */
78476 +               u32 page_off = lsn_to_page_off(log, lsn);
78478 +               if (page_off + len + log->record_header_len > log->page_size)
78479 +                       return -EINVAL;
78481 +               lcb->log_rec = Add2Ptr(rh, sizeof(struct LFS_RECORD_HDR));
78482 +               lcb->alloc = false;
78483 +       }
78485 +       return 0;
78489 + * read_log_rec_lcb
78490 + *
78491 + * initiates the query operation.
78492 + */
78493 +static int read_log_rec_lcb(struct ntfs_log *log, u64 lsn, u32 ctx_mode,
78494 +                           struct lcb **lcb_)
78496 +       int err;
78497 +       const struct CLIENT_REC *cr;
78498 +       struct lcb *lcb;
78500 +       switch (ctx_mode) {
78501 +       case lcb_ctx_undo_next:
78502 +       case lcb_ctx_prev:
78503 +       case lcb_ctx_next:
78504 +               break;
78505 +       default:
78506 +               return -EINVAL;
78507 +       }
78509 +       /* check that the given lsn is the legal range for this client */
78510 +       cr = Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off));
78512 +       if (!verify_client_lsn(log, cr, lsn))
78513 +               return -EINVAL;
78515 +       lcb = ntfs_zalloc(sizeof(struct lcb));
78516 +       if (!lcb)
78517 +               return -ENOMEM;
78518 +       lcb->client = log->client_id;
78519 +       lcb->ctx_mode = ctx_mode;
78521 +       /* Find the log record indicated by the given lsn */
78522 +       err = find_log_rec(log, lsn, lcb);
78523 +       if (err)
78524 +               goto out;
78526 +       *lcb_ = lcb;
78527 +       return 0;
78529 +out:
78530 +       lcb_put(lcb);
78531 +       *lcb_ = NULL;
78532 +       return err;
78536 + * find_client_next_lsn
78537 + *
78538 + * attempt to find the next lsn to return to a client based on the context mode.
78539 + */
78540 +static int find_client_next_lsn(struct ntfs_log *log, struct lcb *lcb, u64 *lsn)
78542 +       int err;
78543 +       u64 next_lsn;
78544 +       struct LFS_RECORD_HDR *hdr;
78546 +       hdr = lcb->lrh;
78547 +       *lsn = 0;
78549 +       if (lcb_ctx_next != lcb->ctx_mode)
78550 +               goto check_undo_next;
78552 +       /* Loop as long as another lsn can be found */
78553 +       for (;;) {
78554 +               u64 current_lsn;
78556 +               err = next_log_lsn(log, hdr, &current_lsn);
78557 +               if (err)
78558 +                       goto out;
78560 +               if (!current_lsn)
78561 +                       break;
78563 +               if (hdr != lcb->lrh)
78564 +                       ntfs_free(hdr);
78566 +               hdr = NULL;
78567 +               err = read_log_page(log, lsn_to_vbo(log, current_lsn),
78568 +                                   (struct RECORD_PAGE_HDR **)&hdr, NULL);
78569 +               if (err)
78570 +                       goto out;
78572 +               if (memcmp(&hdr->client, &lcb->client,
78573 +                          sizeof(struct CLIENT_ID))) {
78574 +                       /*err = -EINVAL; */
78575 +               } else if (LfsClientRecord == hdr->record_type) {
78576 +                       ntfs_free(lcb->lrh);
78577 +                       lcb->lrh = hdr;
78578 +                       *lsn = current_lsn;
78579 +                       return 0;
78580 +               }
78581 +       }
78583 +out:
78584 +       if (hdr != lcb->lrh)
78585 +               ntfs_free(hdr);
78586 +       return err;
78588 +check_undo_next:
78589 +       if (lcb_ctx_undo_next == lcb->ctx_mode)
78590 +               next_lsn = le64_to_cpu(hdr->client_undo_next_lsn);
78591 +       else if (lcb_ctx_prev == lcb->ctx_mode)
78592 +               next_lsn = le64_to_cpu(hdr->client_prev_lsn);
78593 +       else
78594 +               return 0;
78596 +       if (!next_lsn)
78597 +               return 0;
78599 +       if (!verify_client_lsn(
78600 +                   log, Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off)),
78601 +                   next_lsn))
78602 +               return 0;
78604 +       hdr = NULL;
78605 +       err = read_log_page(log, lsn_to_vbo(log, next_lsn),
78606 +                           (struct RECORD_PAGE_HDR **)&hdr, NULL);
78607 +       if (err)
78608 +               return err;
78609 +       ntfs_free(lcb->lrh);
78610 +       lcb->lrh = hdr;
78612 +       *lsn = next_lsn;
78614 +       return 0;
78617 +static int read_next_log_rec(struct ntfs_log *log, struct lcb *lcb, u64 *lsn)
78619 +       int err;
78621 +       err = find_client_next_lsn(log, lcb, lsn);
78622 +       if (err)
78623 +               return err;
78625 +       if (!*lsn)
78626 +               return 0;
78628 +       if (lcb->alloc)
78629 +               ntfs_free(lcb->log_rec);
78631 +       lcb->log_rec = NULL;
78632 +       lcb->alloc = false;
78633 +       ntfs_free(lcb->lrh);
78634 +       lcb->lrh = NULL;
78636 +       return find_log_rec(log, *lsn, lcb);
78639 +static inline bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes)
78641 +       __le16 mask;
78642 +       u32 min_de, de_off, used, total;
78643 +       const struct NTFS_DE *e;
78645 +       if (hdr_has_subnode(hdr)) {
78646 +               min_de = sizeof(struct NTFS_DE) + sizeof(u64);
78647 +               mask = NTFS_IE_HAS_SUBNODES;
78648 +       } else {
78649 +               min_de = sizeof(struct NTFS_DE);
78650 +               mask = 0;
78651 +       }
78653 +       de_off = le32_to_cpu(hdr->de_off);
78654 +       used = le32_to_cpu(hdr->used);
78655 +       total = le32_to_cpu(hdr->total);
78657 +       if (de_off > bytes - min_de || used > bytes || total > bytes ||
78658 +           de_off + min_de > used || used > total) {
78659 +               return false;
78660 +       }
78662 +       e = Add2Ptr(hdr, de_off);
78663 +       for (;;) {
78664 +               u16 esize = le16_to_cpu(e->size);
78665 +               struct NTFS_DE *next = Add2Ptr(e, esize);
78667 +               if (esize < min_de || PtrOffset(hdr, next) > used ||
78668 +                   (e->flags & NTFS_IE_HAS_SUBNODES) != mask) {
78669 +                       return false;
78670 +               }
78672 +               if (de_is_last(e))
78673 +                       break;
78675 +               e = next;
78676 +       }
78678 +       return true;
78681 +static inline bool check_index_buffer(const struct INDEX_BUFFER *ib, u32 bytes)
78683 +       u16 fo;
78684 +       const struct NTFS_RECORD_HEADER *r = &ib->rhdr;
78686 +       if (r->sign != NTFS_INDX_SIGNATURE)
78687 +               return false;
78689 +       fo = (SECTOR_SIZE - ((bytes >> SECTOR_SHIFT) + 1) * sizeof(short));
78691 +       if (le16_to_cpu(r->fix_off) > fo)
78692 +               return false;
78694 +       if ((le16_to_cpu(r->fix_num) - 1) * SECTOR_SIZE != bytes)
78695 +               return false;
78697 +       return check_index_header(&ib->ihdr,
78698 +                                 bytes - offsetof(struct INDEX_BUFFER, ihdr));
78701 +static inline bool check_index_root(const struct ATTRIB *attr,
78702 +                                   struct ntfs_sb_info *sbi)
78704 +       bool ret;
78705 +       const struct INDEX_ROOT *root = resident_data(attr);
78706 +       u8 index_bits = le32_to_cpu(root->index_block_size) >= sbi->cluster_size
78707 +                               ? sbi->cluster_bits
78708 +                               : SECTOR_SHIFT;
78709 +       u8 block_clst = root->index_block_clst;
78711 +       if (le32_to_cpu(attr->res.data_size) < sizeof(struct INDEX_ROOT) ||
78712 +           (root->type != ATTR_NAME && root->type != ATTR_ZERO) ||
78713 +           (root->type == ATTR_NAME &&
78714 +            root->rule != NTFS_COLLATION_TYPE_FILENAME) ||
78715 +           (le32_to_cpu(root->index_block_size) !=
78716 +            (block_clst << index_bits)) ||
78717 +           (block_clst != 1 && block_clst != 2 && block_clst != 4 &&
78718 +            block_clst != 8 && block_clst != 0x10 && block_clst != 0x20 &&
78719 +            block_clst != 0x40 && block_clst != 0x80)) {
78720 +               return false;
78721 +       }
78723 +       ret = check_index_header(&root->ihdr,
78724 +                                le32_to_cpu(attr->res.data_size) -
78725 +                                        offsetof(struct INDEX_ROOT, ihdr));
78726 +       return ret;
78729 +static inline bool check_attr(const struct MFT_REC *rec,
78730 +                             const struct ATTRIB *attr,
78731 +                             struct ntfs_sb_info *sbi)
78733 +       u32 asize = le32_to_cpu(attr->size);
78734 +       u32 rsize = 0;
78735 +       u64 dsize, svcn, evcn;
78736 +       u16 run_off;
78738 +       /* Check the fixed part of the attribute record header */
78739 +       if (asize >= sbi->record_size ||
78740 +           asize + PtrOffset(rec, attr) >= sbi->record_size ||
78741 +           (attr->name_len &&
78742 +            le16_to_cpu(attr->name_off) + attr->name_len * sizeof(short) >
78743 +                    asize)) {
78744 +               return false;
78745 +       }
78747 +       /* Check the attribute fields */
78748 +       switch (attr->non_res) {
78749 +       case 0:
78750 +               rsize = le32_to_cpu(attr->res.data_size);
78751 +               if (rsize >= asize ||
78752 +                   le16_to_cpu(attr->res.data_off) + rsize > asize) {
78753 +                       return false;
78754 +               }
78755 +               break;
78757 +       case 1:
78758 +               dsize = le64_to_cpu(attr->nres.data_size);
78759 +               svcn = le64_to_cpu(attr->nres.svcn);
78760 +               evcn = le64_to_cpu(attr->nres.evcn);
78761 +               run_off = le16_to_cpu(attr->nres.run_off);
78763 +               if (svcn > evcn + 1 || run_off >= asize ||
78764 +                   le64_to_cpu(attr->nres.valid_size) > dsize ||
78765 +                   dsize > le64_to_cpu(attr->nres.alloc_size)) {
78766 +                       return false;
78767 +               }
78769 +               if (run_unpack(NULL, sbi, 0, svcn, evcn, svcn,
78770 +                              Add2Ptr(attr, run_off), asize - run_off) < 0) {
78771 +                       return false;
78772 +               }
78774 +               return true;
78776 +       default:
78777 +               return false;
78778 +       }
78780 +       switch (attr->type) {
78781 +       case ATTR_NAME:
78782 +               if (fname_full_size(Add2Ptr(
78783 +                           attr, le16_to_cpu(attr->res.data_off))) > asize) {
78784 +                       return false;
78785 +               }
78786 +               break;
78788 +       case ATTR_ROOT:
78789 +               return check_index_root(attr, sbi);
78791 +       case ATTR_STD:
78792 +               if (rsize < sizeof(struct ATTR_STD_INFO5) &&
78793 +                   rsize != sizeof(struct ATTR_STD_INFO)) {
78794 +                       return false;
78795 +               }
78796 +               break;
78798 +       case ATTR_LIST:
78799 +       case ATTR_ID:
78800 +       case ATTR_SECURE:
78801 +       case ATTR_LABEL:
78802 +       case ATTR_VOL_INFO:
78803 +       case ATTR_DATA:
78804 +       case ATTR_ALLOC:
78805 +       case ATTR_BITMAP:
78806 +       case ATTR_REPARSE:
78807 +       case ATTR_EA_INFO:
78808 +       case ATTR_EA:
78809 +       case ATTR_PROPERTYSET:
78810 +       case ATTR_LOGGED_UTILITY_STREAM:
78811 +               break;
78813 +       default:
78814 +               return false;
78815 +       }
78817 +       return true;
78820 +static inline bool check_file_record(const struct MFT_REC *rec,
78821 +                                    const struct MFT_REC *rec2,
78822 +                                    struct ntfs_sb_info *sbi)
78824 +       const struct ATTRIB *attr;
78825 +       u16 fo = le16_to_cpu(rec->rhdr.fix_off);
78826 +       u16 fn = le16_to_cpu(rec->rhdr.fix_num);
78827 +       u16 ao = le16_to_cpu(rec->attr_off);
78828 +       u32 rs = sbi->record_size;
78830 +       /* check the file record header for consistency */
78831 +       if (rec->rhdr.sign != NTFS_FILE_SIGNATURE ||
78832 +           fo > (SECTOR_SIZE - ((rs >> SECTOR_SHIFT) + 1) * sizeof(short)) ||
78833 +           (fn - 1) * SECTOR_SIZE != rs || ao < MFTRECORD_FIXUP_OFFSET_1 ||
78834 +           ao > sbi->record_size - SIZEOF_RESIDENT || !is_rec_inuse(rec) ||
78835 +           le32_to_cpu(rec->total) != rs) {
78836 +               return false;
78837 +       }
78839 +       /* Loop to check all of the attributes */
78840 +       for (attr = Add2Ptr(rec, ao); attr->type != ATTR_END;
78841 +            attr = Add2Ptr(attr, le32_to_cpu(attr->size))) {
78842 +               if (check_attr(rec, attr, sbi))
78843 +                       continue;
78844 +               return false;
78845 +       }
78847 +       return true;
78850 +static inline int check_lsn(const struct NTFS_RECORD_HEADER *hdr,
78851 +                           const u64 *rlsn)
78853 +       u64 lsn;
78855 +       if (!rlsn)
78856 +               return true;
78858 +       lsn = le64_to_cpu(hdr->lsn);
78860 +       if (hdr->sign == NTFS_HOLE_SIGNATURE)
78861 +               return false;
78863 +       if (*rlsn > lsn)
78864 +               return true;
78866 +       return false;
78869 +static inline bool check_if_attr(const struct MFT_REC *rec,
78870 +                                const struct LOG_REC_HDR *lrh)
78872 +       u16 ro = le16_to_cpu(lrh->record_off);
78873 +       u16 o = le16_to_cpu(rec->attr_off);
78874 +       const struct ATTRIB *attr = Add2Ptr(rec, o);
78876 +       while (o < ro) {
78877 +               u32 asize;
78879 +               if (attr->type == ATTR_END)
78880 +                       break;
78882 +               asize = le32_to_cpu(attr->size);
78883 +               if (!asize)
78884 +                       break;
78886 +               o += asize;
78887 +               attr = Add2Ptr(attr, asize);
78888 +       }
78890 +       return o == ro;
78893 +static inline bool check_if_index_root(const struct MFT_REC *rec,
78894 +                                      const struct LOG_REC_HDR *lrh)
78896 +       u16 ro = le16_to_cpu(lrh->record_off);
78897 +       u16 o = le16_to_cpu(rec->attr_off);
78898 +       const struct ATTRIB *attr = Add2Ptr(rec, o);
78900 +       while (o < ro) {
78901 +               u32 asize;
78903 +               if (attr->type == ATTR_END)
78904 +                       break;
78906 +               asize = le32_to_cpu(attr->size);
78907 +               if (!asize)
78908 +                       break;
78910 +               o += asize;
78911 +               attr = Add2Ptr(attr, asize);
78912 +       }
78914 +       return o == ro && attr->type == ATTR_ROOT;
78917 +static inline bool check_if_root_index(const struct ATTRIB *attr,
78918 +                                      const struct INDEX_HDR *hdr,
78919 +                                      const struct LOG_REC_HDR *lrh)
78921 +       u16 ao = le16_to_cpu(lrh->attr_off);
78922 +       u32 de_off = le32_to_cpu(hdr->de_off);
78923 +       u32 o = PtrOffset(attr, hdr) + de_off;
78924 +       const struct NTFS_DE *e = Add2Ptr(hdr, de_off);
78925 +       u32 asize = le32_to_cpu(attr->size);
78927 +       while (o < ao) {
78928 +               u16 esize;
78930 +               if (o >= asize)
78931 +                       break;
78933 +               esize = le16_to_cpu(e->size);
78934 +               if (!esize)
78935 +                       break;
78937 +               o += esize;
78938 +               e = Add2Ptr(e, esize);
78939 +       }
78941 +       return o == ao;
78944 +static inline bool check_if_alloc_index(const struct INDEX_HDR *hdr,
78945 +                                       u32 attr_off)
78947 +       u32 de_off = le32_to_cpu(hdr->de_off);
78948 +       u32 o = offsetof(struct INDEX_BUFFER, ihdr) + de_off;
78949 +       const struct NTFS_DE *e = Add2Ptr(hdr, de_off);
78950 +       u32 used = le32_to_cpu(hdr->used);
78952 +       while (o < attr_off) {
78953 +               u16 esize;
78955 +               if (de_off >= used)
78956 +                       break;
78958 +               esize = le16_to_cpu(e->size);
78959 +               if (!esize)
78960 +                       break;
78962 +               o += esize;
78963 +               de_off += esize;
78964 +               e = Add2Ptr(e, esize);
78965 +       }
78967 +       return o == attr_off;
78970 +static inline void change_attr_size(struct MFT_REC *rec, struct ATTRIB *attr,
78971 +                                   u32 nsize)
78973 +       u32 asize = le32_to_cpu(attr->size);
78974 +       int dsize = nsize - asize;
78975 +       u8 *next = Add2Ptr(attr, asize);
78976 +       u32 used = le32_to_cpu(rec->used);
78978 +       memmove(Add2Ptr(attr, nsize), next, used - PtrOffset(rec, next));
78980 +       rec->used = cpu_to_le32(used + dsize);
78981 +       attr->size = cpu_to_le32(nsize);
78984 +struct OpenAttr {
78985 +       struct ATTRIB *attr;
78986 +       struct runs_tree *run1;
78987 +       struct runs_tree run0;
78988 +       struct ntfs_inode *ni;
78989 +       // CLST rno;
78992 +/* Returns 0 if 'attr' has the same type and name */
78993 +static inline int cmp_type_and_name(const struct ATTRIB *a1,
78994 +                                   const struct ATTRIB *a2)
78996 +       return a1->type != a2->type || a1->name_len != a2->name_len ||
78997 +              (a1->name_len && memcmp(attr_name(a1), attr_name(a2),
78998 +                                      a1->name_len * sizeof(short)));
79001 +static struct OpenAttr *find_loaded_attr(struct ntfs_log *log,
79002 +                                        const struct ATTRIB *attr, CLST rno)
79004 +       struct OPEN_ATTR_ENRTY *oe = NULL;
79006 +       while ((oe = enum_rstbl(log->open_attr_tbl, oe))) {
79007 +               struct OpenAttr *op_attr;
79009 +               if (ino_get(&oe->ref) != rno)
79010 +                       continue;
79012 +               op_attr = (struct OpenAttr *)oe->ptr;
79013 +               if (!cmp_type_and_name(op_attr->attr, attr))
79014 +                       return op_attr;
79015 +       }
79016 +       return NULL;
79019 +static struct ATTRIB *attr_create_nonres_log(struct ntfs_sb_info *sbi,
79020 +                                            enum ATTR_TYPE type, u64 size,
79021 +                                            const u16 *name, size_t name_len,
79022 +                                            __le16 flags)
79024 +       struct ATTRIB *attr;
79025 +       u32 name_size = QuadAlign(name_len * sizeof(short));
79026 +       bool is_ext = flags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED);
79027 +       u32 asize = name_size +
79028 +                   (is_ext ? SIZEOF_NONRESIDENT_EX : SIZEOF_NONRESIDENT);
79030 +       attr = ntfs_zalloc(asize);
79031 +       if (!attr)
79032 +               return NULL;
79034 +       attr->type = type;
79035 +       attr->size = cpu_to_le32(asize);
79036 +       attr->flags = flags;
79037 +       attr->non_res = 1;
79038 +       attr->name_len = name_len;
79040 +       attr->nres.evcn = cpu_to_le64((u64)bytes_to_cluster(sbi, size) - 1);
79041 +       attr->nres.alloc_size = cpu_to_le64(ntfs_up_cluster(sbi, size));
79042 +       attr->nres.data_size = cpu_to_le64(size);
79043 +       attr->nres.valid_size = attr->nres.data_size;
79044 +       if (is_ext) {
79045 +               attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
79046 +               if (is_attr_compressed(attr))
79047 +                       attr->nres.c_unit = COMPRESSION_UNIT;
79049 +               attr->nres.run_off =
79050 +                       cpu_to_le16(SIZEOF_NONRESIDENT_EX + name_size);
79051 +               memcpy(Add2Ptr(attr, SIZEOF_NONRESIDENT_EX), name,
79052 +                      name_len * sizeof(short));
79053 +       } else {
79054 +               attr->name_off = SIZEOF_NONRESIDENT_LE;
79055 +               attr->nres.run_off =
79056 +                       cpu_to_le16(SIZEOF_NONRESIDENT + name_size);
79057 +               memcpy(Add2Ptr(attr, SIZEOF_NONRESIDENT), name,
79058 +                      name_len * sizeof(short));
79059 +       }
79061 +       return attr;
79065 + * do_action
79066 + *
79067 + * common routine for the Redo and Undo Passes
79068 + * If rlsn is NULL then undo
79069 + */
79070 +static int do_action(struct ntfs_log *log, struct OPEN_ATTR_ENRTY *oe,
79071 +                    const struct LOG_REC_HDR *lrh, u32 op, void *data,
79072 +                    u32 dlen, u32 rec_len, const u64 *rlsn)
79074 +       int err = 0;
79075 +       struct ntfs_sb_info *sbi = log->ni->mi.sbi;
79076 +       struct inode *inode = NULL, *inode_parent;
79077 +       struct mft_inode *mi = NULL, *mi2_child = NULL;
79078 +       CLST rno = 0, rno_base = 0;
79079 +       struct INDEX_BUFFER *ib = NULL;
79080 +       struct MFT_REC *rec = NULL;
79081 +       struct ATTRIB *attr = NULL, *attr2;
79082 +       struct INDEX_HDR *hdr;
79083 +       struct INDEX_ROOT *root;
79084 +       struct NTFS_DE *e, *e1, *e2;
79085 +       struct NEW_ATTRIBUTE_SIZES *new_sz;
79086 +       struct ATTR_FILE_NAME *fname;
79087 +       struct OpenAttr *oa, *oa2;
79088 +       u32 nsize, t32, asize, used, esize, bmp_off, bmp_bits;
79089 +       u16 id, id2;
79090 +       u32 record_size = sbi->record_size;
79091 +       u64 t64;
79092 +       u16 roff = le16_to_cpu(lrh->record_off);
79093 +       u16 aoff = le16_to_cpu(lrh->attr_off);
79094 +       u64 lco = 0;
79095 +       u64 cbo = (u64)le16_to_cpu(lrh->cluster_off) << SECTOR_SHIFT;
79096 +       u64 tvo = le64_to_cpu(lrh->target_vcn) << sbi->cluster_bits;
79097 +       u64 vbo = cbo + tvo;
79098 +       void *buffer_le = NULL;
79099 +       u32 bytes = 0;
79100 +       bool a_dirty = false;
79101 +       u16 data_off;
79103 +       oa = oe->ptr;
79105 +       /* Big switch to prepare */
79106 +       switch (op) {
79107 +       /* ============================================================
79108 +        * Process MFT records, as described by the current log record
79109 +        * ============================================================
79110 +        */
79111 +       case InitializeFileRecordSegment:
79112 +       case DeallocateFileRecordSegment:
79113 +       case WriteEndOfFileRecordSegment:
79114 +       case CreateAttribute:
79115 +       case DeleteAttribute:
79116 +       case UpdateResidentValue:
79117 +       case UpdateMappingPairs:
79118 +       case SetNewAttributeSizes:
79119 +       case AddIndexEntryRoot:
79120 +       case DeleteIndexEntryRoot:
79121 +       case SetIndexEntryVcnRoot:
79122 +       case UpdateFileNameRoot:
79123 +       case UpdateRecordDataRoot:
79124 +       case ZeroEndOfFileRecord:
79125 +               rno = vbo >> sbi->record_bits;
79126 +               inode = ilookup(sbi->sb, rno);
79127 +               if (inode) {
79128 +                       mi = &ntfs_i(inode)->mi;
79129 +               } else if (op == InitializeFileRecordSegment) {
79130 +                       mi = ntfs_zalloc(sizeof(struct mft_inode));
79131 +                       if (!mi)
79132 +                               return -ENOMEM;
79133 +                       err = mi_format_new(mi, sbi, rno, 0, false);
79134 +                       if (err)
79135 +                               goto out;
79136 +               } else {
79137 +                       /* read from disk */
79138 +                       err = mi_get(sbi, rno, &mi);
79139 +                       if (err)
79140 +                               return err;
79141 +               }
79142 +               rec = mi->mrec;
79144 +               if (op == DeallocateFileRecordSegment)
79145 +                       goto skip_load_parent;
79147 +               if (InitializeFileRecordSegment != op) {
79148 +                       if (rec->rhdr.sign == NTFS_BAAD_SIGNATURE)
79149 +                               goto dirty_vol;
79150 +                       if (!check_lsn(&rec->rhdr, rlsn))
79151 +                               goto out;
79152 +                       if (!check_file_record(rec, NULL, sbi))
79153 +                               goto dirty_vol;
79154 +                       attr = Add2Ptr(rec, roff);
79155 +               }
79157 +               if (is_rec_base(rec) || InitializeFileRecordSegment == op) {
79158 +                       rno_base = rno;
79159 +                       goto skip_load_parent;
79160 +               }
79162 +               rno_base = ino_get(&rec->parent_ref);
79163 +               inode_parent = ntfs_iget5(sbi->sb, &rec->parent_ref, NULL);
79164 +               if (IS_ERR(inode_parent))
79165 +                       goto skip_load_parent;
79167 +               if (is_bad_inode(inode_parent)) {
79168 +                       iput(inode_parent);
79169 +                       goto skip_load_parent;
79170 +               }
79172 +               if (ni_load_mi_ex(ntfs_i(inode_parent), rno, &mi2_child)) {
79173 +                       iput(inode_parent);
79174 +               } else {
79175 +                       if (mi2_child->mrec != mi->mrec)
79176 +                               memcpy(mi2_child->mrec, mi->mrec,
79177 +                                      sbi->record_size);
79179 +                       if (inode)
79180 +                               iput(inode);
79181 +                       else if (mi)
79182 +                               mi_put(mi);
79184 +                       inode = inode_parent;
79185 +                       mi = mi2_child;
79186 +                       rec = mi2_child->mrec;
79187 +                       attr = Add2Ptr(rec, roff);
79188 +               }
79190 +skip_load_parent:
79191 +               inode_parent = NULL;
79192 +               break;
79194 +       /* ============================================================
79195 +        * Process attributes, as described by the current log record
79196 +        * ============================================================
79197 +        */
79198 +       case UpdateNonresidentValue:
79199 +       case AddIndexEntryAllocation:
79200 +       case DeleteIndexEntryAllocation:
79201 +       case WriteEndOfIndexBuffer:
79202 +       case SetIndexEntryVcnAllocation:
79203 +       case UpdateFileNameAllocation:
79204 +       case SetBitsInNonresidentBitMap:
79205 +       case ClearBitsInNonresidentBitMap:
79206 +       case UpdateRecordDataAllocation:
79207 +               attr = oa->attr;
79208 +               bytes = UpdateNonresidentValue == op ? dlen : 0;
79209 +               lco = (u64)le16_to_cpu(lrh->lcns_follow) << sbi->cluster_bits;
79211 +               if (attr->type == ATTR_ALLOC) {
79212 +                       t32 = le32_to_cpu(oe->bytes_per_index);
79213 +                       if (bytes < t32)
79214 +                               bytes = t32;
79215 +               }
79217 +               if (!bytes)
79218 +                       bytes = lco - cbo;
79220 +               bytes += roff;
79221 +               if (attr->type == ATTR_ALLOC)
79222 +                       bytes = (bytes + 511) & ~511; // align
79224 +               buffer_le = ntfs_malloc(bytes);
79225 +               if (!buffer_le)
79226 +                       return -ENOMEM;
79228 +               err = ntfs_read_run_nb(sbi, oa->run1, vbo, buffer_le, bytes,
79229 +                                      NULL);
79230 +               if (err)
79231 +                       goto out;
79233 +               if (attr->type == ATTR_ALLOC && *(int *)buffer_le)
79234 +                       ntfs_fix_post_read(buffer_le, bytes, false);
79235 +               break;
79237 +       default:
79238 +               WARN_ON(1);
79239 +       }
79241 +       /* Big switch to do operation */
79242 +       switch (op) {
79243 +       case InitializeFileRecordSegment:
79244 +               if (roff + dlen > record_size)
79245 +                       goto dirty_vol;
79247 +               memcpy(Add2Ptr(rec, roff), data, dlen);
79248 +               mi->dirty = true;
79249 +               break;
79251 +       case DeallocateFileRecordSegment:
79252 +               clear_rec_inuse(rec);
79253 +               le16_add_cpu(&rec->seq, 1);
79254 +               mi->dirty = true;
79255 +               break;
79257 +       case WriteEndOfFileRecordSegment:
79258 +               attr2 = (struct ATTRIB *)data;
79259 +               if (!check_if_attr(rec, lrh) || roff + dlen > record_size)
79260 +                       goto dirty_vol;
79262 +               memmove(attr, attr2, dlen);
79263 +               rec->used = cpu_to_le32(QuadAlign(roff + dlen));
79265 +               mi->dirty = true;
79266 +               break;
79268 +       case CreateAttribute:
79269 +               attr2 = (struct ATTRIB *)data;
79270 +               asize = le32_to_cpu(attr2->size);
79271 +               used = le32_to_cpu(rec->used);
79273 +               if (!check_if_attr(rec, lrh) || dlen < SIZEOF_RESIDENT ||
79274 +                   !IsQuadAligned(asize) ||
79275 +                   Add2Ptr(attr2, asize) > Add2Ptr(lrh, rec_len) ||
79276 +                   dlen > record_size - used) {
79277 +                       goto dirty_vol;
79278 +               }
79280 +               memmove(Add2Ptr(attr, asize), attr, used - roff);
79281 +               memcpy(attr, attr2, asize);
79283 +               rec->used = cpu_to_le32(used + asize);
79284 +               id = le16_to_cpu(rec->next_attr_id);
79285 +               id2 = le16_to_cpu(attr2->id);
79286 +               if (id <= id2)
79287 +                       rec->next_attr_id = cpu_to_le16(id2 + 1);
79288 +               if (is_attr_indexed(attr))
79289 +                       le16_add_cpu(&rec->hard_links, 1);
79291 +               oa2 = find_loaded_attr(log, attr, rno_base);
79292 +               if (oa2) {
79293 +                       void *p2 = ntfs_memdup(attr, le32_to_cpu(attr->size));
79295 +                       if (p2) {
79296 +                               // run_close(oa2->run1);
79297 +                               ntfs_free(oa2->attr);
79298 +                               oa2->attr = p2;
79299 +                       }
79300 +               }
79302 +               mi->dirty = true;
79303 +               break;
79305 +       case DeleteAttribute:
79306 +               asize = le32_to_cpu(attr->size);
79307 +               used = le32_to_cpu(rec->used);
79309 +               if (!check_if_attr(rec, lrh))
79310 +                       goto dirty_vol;
79312 +               rec->used = cpu_to_le32(used - asize);
79313 +               if (is_attr_indexed(attr))
79314 +                       le16_add_cpu(&rec->hard_links, -1);
79316 +               memmove(attr, Add2Ptr(attr, asize), used - asize - roff);
79318 +               mi->dirty = true;
79319 +               break;
79321 +       case UpdateResidentValue:
79322 +               nsize = aoff + dlen;
79324 +               if (!check_if_attr(rec, lrh))
79325 +                       goto dirty_vol;
79327 +               asize = le32_to_cpu(attr->size);
79328 +               used = le32_to_cpu(rec->used);
79330 +               if (lrh->redo_len == lrh->undo_len) {
79331 +                       if (nsize > asize)
79332 +                               goto dirty_vol;
79333 +                       goto move_data;
79334 +               }
79336 +               if (nsize > asize && nsize - asize > record_size - used)
79337 +                       goto dirty_vol;
79339 +               nsize = QuadAlign(nsize);
79340 +               data_off = le16_to_cpu(attr->res.data_off);
79342 +               if (nsize < asize) {
79343 +                       memmove(Add2Ptr(attr, aoff), data, dlen);
79344 +                       data = NULL; // To skip below memmove
79345 +               }
79347 +               memmove(Add2Ptr(attr, nsize), Add2Ptr(attr, asize),
79348 +                       used - le16_to_cpu(lrh->record_off) - asize);
79350 +               rec->used = cpu_to_le32(used + nsize - asize);
79351 +               attr->size = cpu_to_le32(nsize);
79352 +               attr->res.data_size = cpu_to_le32(aoff + dlen - data_off);
79354 +move_data:
79355 +               if (data)
79356 +                       memmove(Add2Ptr(attr, aoff), data, dlen);
79358 +               oa2 = find_loaded_attr(log, attr, rno_base);
79359 +               if (oa2) {
79360 +                       void *p2 = ntfs_memdup(attr, le32_to_cpu(attr->size));
79362 +                       if (p2) {
79363 +                               // run_close(&oa2->run0);
79364 +                               oa2->run1 = &oa2->run0;
79365 +                               ntfs_free(oa2->attr);
79366 +                               oa2->attr = p2;
79367 +                       }
79368 +               }
79370 +               mi->dirty = true;
79371 +               break;
79373 +       case UpdateMappingPairs:
79374 +               nsize = aoff + dlen;
79375 +               asize = le32_to_cpu(attr->size);
79376 +               used = le32_to_cpu(rec->used);
79378 +               if (!check_if_attr(rec, lrh) || !attr->non_res ||
79379 +                   aoff < le16_to_cpu(attr->nres.run_off) || aoff > asize ||
79380 +                   (nsize > asize && nsize - asize > record_size - used)) {
79381 +                       goto dirty_vol;
79382 +               }
79384 +               nsize = QuadAlign(nsize);
79386 +               memmove(Add2Ptr(attr, nsize), Add2Ptr(attr, asize),
79387 +                       used - le16_to_cpu(lrh->record_off) - asize);
79388 +               rec->used = cpu_to_le32(used + nsize - asize);
79389 +               attr->size = cpu_to_le32(nsize);
79390 +               memmove(Add2Ptr(attr, aoff), data, dlen);
79392 +               if (run_get_highest_vcn(le64_to_cpu(attr->nres.svcn),
79393 +                                       attr_run(attr), &t64)) {
79394 +                       goto dirty_vol;
79395 +               }
79397 +               attr->nres.evcn = cpu_to_le64(t64);
79398 +               oa2 = find_loaded_attr(log, attr, rno_base);
79399 +               if (oa2 && oa2->attr->non_res)
79400 +                       oa2->attr->nres.evcn = attr->nres.evcn;
79402 +               mi->dirty = true;
79403 +               break;
79405 +       case SetNewAttributeSizes:
79406 +               new_sz = data;
79407 +               if (!check_if_attr(rec, lrh) || !attr->non_res)
79408 +                       goto dirty_vol;
79410 +               attr->nres.alloc_size = new_sz->alloc_size;
79411 +               attr->nres.data_size = new_sz->data_size;
79412 +               attr->nres.valid_size = new_sz->valid_size;
79414 +               if (dlen >= sizeof(struct NEW_ATTRIBUTE_SIZES))
79415 +                       attr->nres.total_size = new_sz->total_size;
79417 +               oa2 = find_loaded_attr(log, attr, rno_base);
79418 +               if (oa2) {
79419 +                       void *p2 = ntfs_memdup(attr, le32_to_cpu(attr->size));
79421 +                       if (p2) {
79422 +                               ntfs_free(oa2->attr);
79423 +                               oa2->attr = p2;
79424 +                       }
79425 +               }
79426 +               mi->dirty = true;
79427 +               break;
79429 +       case AddIndexEntryRoot:
79430 +               e = (struct NTFS_DE *)data;
79431 +               esize = le16_to_cpu(e->size);
79432 +               root = resident_data(attr);
79433 +               hdr = &root->ihdr;
79434 +               used = le32_to_cpu(hdr->used);
79436 +               if (!check_if_index_root(rec, lrh) ||
79437 +                   !check_if_root_index(attr, hdr, lrh) ||
79438 +                   Add2Ptr(data, esize) > Add2Ptr(lrh, rec_len) ||
79439 +                   esize > le32_to_cpu(rec->total) - le32_to_cpu(rec->used)) {
79440 +                       goto dirty_vol;
79441 +               }
79443 +               e1 = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
79445 +               change_attr_size(rec, attr, le32_to_cpu(attr->size) + esize);
79447 +               memmove(Add2Ptr(e1, esize), e1,
79448 +                       PtrOffset(e1, Add2Ptr(hdr, used)));
79449 +               memmove(e1, e, esize);
79451 +               le32_add_cpu(&attr->res.data_size, esize);
79452 +               hdr->used = cpu_to_le32(used + esize);
79453 +               le32_add_cpu(&hdr->total, esize);
79455 +               mi->dirty = true;
79456 +               break;
79458 +       case DeleteIndexEntryRoot:
79459 +               root = resident_data(attr);
79460 +               hdr = &root->ihdr;
79461 +               used = le32_to_cpu(hdr->used);
79463 +               if (!check_if_index_root(rec, lrh) ||
79464 +                   !check_if_root_index(attr, hdr, lrh)) {
79465 +                       goto dirty_vol;
79466 +               }
79468 +               e1 = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
79469 +               esize = le16_to_cpu(e1->size);
79470 +               e2 = Add2Ptr(e1, esize);
79472 +               memmove(e1, e2, PtrOffset(e2, Add2Ptr(hdr, used)));
79474 +               le32_sub_cpu(&attr->res.data_size, esize);
79475 +               hdr->used = cpu_to_le32(used - esize);
79476 +               le32_sub_cpu(&hdr->total, esize);
79478 +               change_attr_size(rec, attr, le32_to_cpu(attr->size) - esize);
79480 +               mi->dirty = true;
79481 +               break;
79483 +       case SetIndexEntryVcnRoot:
79484 +               root = resident_data(attr);
79485 +               hdr = &root->ihdr;
79487 +               if (!check_if_index_root(rec, lrh) ||
79488 +                   !check_if_root_index(attr, hdr, lrh)) {
79489 +                       goto dirty_vol;
79490 +               }
79492 +               e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
79494 +               de_set_vbn_le(e, *(__le64 *)data);
79495 +               mi->dirty = true;
79496 +               break;
79498 +       case UpdateFileNameRoot:
79499 +               root = resident_data(attr);
79500 +               hdr = &root->ihdr;
79502 +               if (!check_if_index_root(rec, lrh) ||
79503 +                   !check_if_root_index(attr, hdr, lrh)) {
79504 +                       goto dirty_vol;
79505 +               }
79507 +               e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
79508 +               fname = (struct ATTR_FILE_NAME *)(e + 1);
79509 +               memmove(&fname->dup, data, sizeof(fname->dup)); //
79510 +               mi->dirty = true;
79511 +               break;
79513 +       case UpdateRecordDataRoot:
79514 +               root = resident_data(attr);
79515 +               hdr = &root->ihdr;
79517 +               if (!check_if_index_root(rec, lrh) ||
79518 +                   !check_if_root_index(attr, hdr, lrh)) {
79519 +                       goto dirty_vol;
79520 +               }
79522 +               e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
79524 +               memmove(Add2Ptr(e, le16_to_cpu(e->view.data_off)), data, dlen);
79526 +               mi->dirty = true;
79527 +               break;
79529 +       case ZeroEndOfFileRecord:
79530 +               if (roff + dlen > record_size)
79531 +                       goto dirty_vol;
79533 +               memset(attr, 0, dlen);
79534 +               mi->dirty = true;
79535 +               break;
79537 +       case UpdateNonresidentValue:
79538 +               if (lco < cbo + roff + dlen)
79539 +                       goto dirty_vol;
79541 +               memcpy(Add2Ptr(buffer_le, roff), data, dlen);
79543 +               a_dirty = true;
79544 +               if (attr->type == ATTR_ALLOC)
79545 +                       ntfs_fix_pre_write(buffer_le, bytes);
79546 +               break;
79548 +       case AddIndexEntryAllocation:
79549 +               ib = Add2Ptr(buffer_le, roff);
79550 +               hdr = &ib->ihdr;
79551 +               e = data;
79552 +               esize = le16_to_cpu(e->size);
79553 +               e1 = Add2Ptr(ib, aoff);
79555 +               if (is_baad(&ib->rhdr))
79556 +                       goto dirty_vol;
79557 +               if (!check_lsn(&ib->rhdr, rlsn))
79558 +                       goto out;
79560 +               used = le32_to_cpu(hdr->used);
79562 +               if (!check_index_buffer(ib, bytes) ||
79563 +                   !check_if_alloc_index(hdr, aoff) ||
79564 +                   Add2Ptr(e, esize) > Add2Ptr(lrh, rec_len) ||
79565 +                   used + esize > le32_to_cpu(hdr->total)) {
79566 +                       goto dirty_vol;
79567 +               }
79569 +               memmove(Add2Ptr(e1, esize), e1,
79570 +                       PtrOffset(e1, Add2Ptr(hdr, used)));
79571 +               memcpy(e1, e, esize);
79573 +               hdr->used = cpu_to_le32(used + esize);
79575 +               a_dirty = true;
79577 +               ntfs_fix_pre_write(&ib->rhdr, bytes);
79578 +               break;
79580 +       case DeleteIndexEntryAllocation:
79581 +               ib = Add2Ptr(buffer_le, roff);
79582 +               hdr = &ib->ihdr;
79583 +               e = Add2Ptr(ib, aoff);
79584 +               esize = le16_to_cpu(e->size);
79586 +               if (is_baad(&ib->rhdr))
79587 +                       goto dirty_vol;
79588 +               if (!check_lsn(&ib->rhdr, rlsn))
79589 +                       goto out;
79591 +               if (!check_index_buffer(ib, bytes) ||
79592 +                   !check_if_alloc_index(hdr, aoff)) {
79593 +                       goto dirty_vol;
79594 +               }
79596 +               e1 = Add2Ptr(e, esize);
79597 +               nsize = esize;
79598 +               used = le32_to_cpu(hdr->used);
79600 +               memmove(e, e1, PtrOffset(e1, Add2Ptr(hdr, used)));
79602 +               hdr->used = cpu_to_le32(used - nsize);
79604 +               a_dirty = true;
79606 +               ntfs_fix_pre_write(&ib->rhdr, bytes);
79607 +               break;
79609 +       case WriteEndOfIndexBuffer:
79610 +               ib = Add2Ptr(buffer_le, roff);
79611 +               hdr = &ib->ihdr;
79612 +               e = Add2Ptr(ib, aoff);
79614 +               if (is_baad(&ib->rhdr))
79615 +                       goto dirty_vol;
79616 +               if (!check_lsn(&ib->rhdr, rlsn))
79617 +                       goto out;
79618 +               if (!check_index_buffer(ib, bytes) ||
79619 +                   !check_if_alloc_index(hdr, aoff) ||
79620 +                   aoff + dlen > offsetof(struct INDEX_BUFFER, ihdr) +
79621 +                                         le32_to_cpu(hdr->total)) {
79622 +                       goto dirty_vol;
79623 +               }
79625 +               hdr->used = cpu_to_le32(dlen + PtrOffset(hdr, e));
79626 +               memmove(e, data, dlen);
79628 +               a_dirty = true;
79629 +               ntfs_fix_pre_write(&ib->rhdr, bytes);
79630 +               break;
79632 +       case SetIndexEntryVcnAllocation:
79633 +               ib = Add2Ptr(buffer_le, roff);
79634 +               hdr = &ib->ihdr;
79635 +               e = Add2Ptr(ib, aoff);
79637 +               if (is_baad(&ib->rhdr))
79638 +                       goto dirty_vol;
79640 +               if (!check_lsn(&ib->rhdr, rlsn))
79641 +                       goto out;
79642 +               if (!check_index_buffer(ib, bytes) ||
79643 +                   !check_if_alloc_index(hdr, aoff)) {
79644 +                       goto dirty_vol;
79645 +               }
79647 +               de_set_vbn_le(e, *(__le64 *)data);
79649 +               a_dirty = true;
79650 +               ntfs_fix_pre_write(&ib->rhdr, bytes);
79651 +               break;
79653 +       case UpdateFileNameAllocation:
79654 +               ib = Add2Ptr(buffer_le, roff);
79655 +               hdr = &ib->ihdr;
79656 +               e = Add2Ptr(ib, aoff);
79658 +               if (is_baad(&ib->rhdr))
79659 +                       goto dirty_vol;
79661 +               if (!check_lsn(&ib->rhdr, rlsn))
79662 +                       goto out;
79663 +               if (!check_index_buffer(ib, bytes) ||
79664 +                   !check_if_alloc_index(hdr, aoff)) {
79665 +                       goto dirty_vol;
79666 +               }
79668 +               fname = (struct ATTR_FILE_NAME *)(e + 1);
79669 +               memmove(&fname->dup, data, sizeof(fname->dup));
79671 +               a_dirty = true;
79672 +               ntfs_fix_pre_write(&ib->rhdr, bytes);
79673 +               break;
79675 +       case SetBitsInNonresidentBitMap:
79676 +               bmp_off =
79677 +                       le32_to_cpu(((struct BITMAP_RANGE *)data)->bitmap_off);
79678 +               bmp_bits = le32_to_cpu(((struct BITMAP_RANGE *)data)->bits);
79680 +               if (cbo + (bmp_off + 7) / 8 > lco ||
79681 +                   cbo + ((bmp_off + bmp_bits + 7) / 8) > lco) {
79682 +                       goto dirty_vol;
79683 +               }
79685 +               __bitmap_set(Add2Ptr(buffer_le, roff), bmp_off, bmp_bits);
79686 +               a_dirty = true;
79687 +               break;
79689 +       case ClearBitsInNonresidentBitMap:
79690 +               bmp_off =
79691 +                       le32_to_cpu(((struct BITMAP_RANGE *)data)->bitmap_off);
79692 +               bmp_bits = le32_to_cpu(((struct BITMAP_RANGE *)data)->bits);
79694 +               if (cbo + (bmp_off + 7) / 8 > lco ||
79695 +                   cbo + ((bmp_off + bmp_bits + 7) / 8) > lco) {
79696 +                       goto dirty_vol;
79697 +               }
79699 +               __bitmap_clear(Add2Ptr(buffer_le, roff), bmp_off, bmp_bits);
79700 +               a_dirty = true;
79701 +               break;
79703 +       case UpdateRecordDataAllocation:
79704 +               ib = Add2Ptr(buffer_le, roff);
79705 +               hdr = &ib->ihdr;
79706 +               e = Add2Ptr(ib, aoff);
79708 +               if (is_baad(&ib->rhdr))
79709 +                       goto dirty_vol;
79711 +               if (!check_lsn(&ib->rhdr, rlsn))
79712 +                       goto out;
79713 +               if (!check_index_buffer(ib, bytes) ||
79714 +                   !check_if_alloc_index(hdr, aoff)) {
79715 +                       goto dirty_vol;
79716 +               }
79718 +               memmove(Add2Ptr(e, le16_to_cpu(e->view.data_off)), data, dlen);
79720 +               a_dirty = true;
79721 +               ntfs_fix_pre_write(&ib->rhdr, bytes);
79722 +               break;
79724 +       default:
79725 +               WARN_ON(1);
79726 +       }
79728 +       if (rlsn) {
79729 +               __le64 t64 = cpu_to_le64(*rlsn);
79731 +               if (rec)
79732 +                       rec->rhdr.lsn = t64;
79733 +               if (ib)
79734 +                       ib->rhdr.lsn = t64;
79735 +       }
79737 +       if (mi && mi->dirty) {
79738 +               err = mi_write(mi, 0);
79739 +               if (err)
79740 +                       goto out;
79741 +       }
79743 +       if (a_dirty) {
79744 +               attr = oa->attr;
79745 +               err = ntfs_sb_write_run(sbi, oa->run1, vbo, buffer_le, bytes);
79746 +               if (err)
79747 +                       goto out;
79748 +       }
79750 +out:
79752 +       if (inode)
79753 +               iput(inode);
79754 +       else if (mi != mi2_child)
79755 +               mi_put(mi);
79757 +       ntfs_free(buffer_le);
79759 +       return err;
79761 +dirty_vol:
79762 +       log->set_dirty = true;
79763 +       goto out;
79767 + * log_replay
79768 + *
79769 + * this function is called during mount operation
79770 + * it replays log and empties it
79771 + * initialized is set false if logfile contains '-1'
79772 + */
79773 +int log_replay(struct ntfs_inode *ni, bool *initialized)
79775 +       int err;
79776 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
79777 +       struct ntfs_log *log;
79779 +       struct restart_info rst_info, rst_info2;
79780 +       u64 rec_lsn, ra_lsn, checkpt_lsn = 0, rlsn = 0;
79781 +       struct ATTR_NAME_ENTRY *attr_names = NULL;
79782 +       struct ATTR_NAME_ENTRY *ane;
79783 +       struct RESTART_TABLE *dptbl = NULL;
79784 +       struct RESTART_TABLE *trtbl = NULL;
79785 +       const struct RESTART_TABLE *rt;
79786 +       struct RESTART_TABLE *oatbl = NULL;
79787 +       struct inode *inode;
79788 +       struct OpenAttr *oa;
79789 +       struct ntfs_inode *ni_oe;
79790 +       struct ATTRIB *attr = NULL;
79791 +       u64 size, vcn, undo_next_lsn;
79792 +       CLST rno, lcn, lcn0, len0, clen;
79793 +       void *data;
79794 +       struct NTFS_RESTART *rst = NULL;
79795 +       struct lcb *lcb = NULL;
79796 +       struct OPEN_ATTR_ENRTY *oe;
79797 +       struct TRANSACTION_ENTRY *tr;
79798 +       struct DIR_PAGE_ENTRY *dp;
79799 +       u32 i, bytes_per_attr_entry;
79800 +       u32 l_size = ni->vfs_inode.i_size;
79801 +       u32 orig_file_size = l_size;
79802 +       u32 page_size, vbo, tail, off, dlen;
79803 +       u32 saved_len, rec_len, transact_id;
79804 +       bool use_second_page;
79805 +       struct RESTART_AREA *ra2, *ra = NULL;
79806 +       struct CLIENT_REC *ca, *cr;
79807 +       __le16 client;
79808 +       struct RESTART_HDR *rh;
79809 +       const struct LFS_RECORD_HDR *frh;
79810 +       const struct LOG_REC_HDR *lrh;
79811 +       bool is_mapped;
79812 +       bool is_ro = sb_rdonly(sbi->sb);
79813 +       u64 t64;
79814 +       u16 t16;
79815 +       u32 t32;
79817 +       /* Get the size of page. NOTE: To replay we can use default page */
79818 +#if PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <= DefaultLogPageSize * 2
79819 +       page_size = norm_file_page(PAGE_SIZE, &l_size, true);
79820 +#else
79821 +       page_size = norm_file_page(PAGE_SIZE, &l_size, false);
79822 +#endif
79823 +       if (!page_size)
79824 +               return -EINVAL;
79826 +       log = ntfs_zalloc(sizeof(struct ntfs_log));
79827 +       if (!log)
79828 +               return -ENOMEM;
79830 +       log->ni = ni;
79831 +       log->l_size = l_size;
79832 +       log->one_page_buf = ntfs_malloc(page_size);
79834 +       if (!log->one_page_buf) {
79835 +               err = -ENOMEM;
79836 +               goto out;
79837 +       }
79839 +       log->page_size = page_size;
79840 +       log->page_mask = page_size - 1;
79841 +       log->page_bits = blksize_bits(page_size);
79843 +       /* Look for a restart area on the disk */
79844 +       err = log_read_rst(log, l_size, true, &rst_info);
79845 +       if (err)
79846 +               goto out;
79848 +       /* remember 'initialized' */
79849 +       *initialized = rst_info.initialized;
79851 +       if (!rst_info.restart) {
79852 +               if (rst_info.initialized) {
79853 +                       /* no restart area but the file is not initialized */
79854 +                       err = -EINVAL;
79855 +                       goto out;
79856 +               }
79858 +               log_init_pg_hdr(log, page_size, page_size, 1, 1);
79859 +               log_create(log, l_size, 0, get_random_int(), false, false);
79861 +               log->ra = ra;
79863 +               ra = log_create_ra(log);
79864 +               if (!ra) {
79865 +                       err = -ENOMEM;
79866 +                       goto out;
79867 +               }
79868 +               log->ra = ra;
79869 +               log->init_ra = true;
79871 +               goto process_log;
79872 +       }
79874 +       /*
79875 +        * If the restart offset above wasn't zero then we won't
79876 +        * look for a second restart
79877 +        */
79878 +       if (rst_info.vbo)
79879 +               goto check_restart_area;
79881 +       err = log_read_rst(log, l_size, false, &rst_info2);
79883 +       /* Determine which restart area to use */
79884 +       if (!rst_info2.restart || rst_info2.last_lsn <= rst_info.last_lsn)
79885 +               goto use_first_page;
79887 +       use_second_page = true;
79889 +       if (rst_info.chkdsk_was_run && page_size != rst_info.vbo) {
79890 +               struct RECORD_PAGE_HDR *sp = NULL;
79891 +               bool usa_error;
79893 +               if (!read_log_page(log, page_size, &sp, &usa_error) &&
79894 +                   sp->rhdr.sign == NTFS_CHKD_SIGNATURE) {
79895 +                       use_second_page = false;
79896 +               }
79897 +               ntfs_free(sp);
79898 +       }
79900 +       if (use_second_page) {
79901 +               ntfs_free(rst_info.r_page);
79902 +               memcpy(&rst_info, &rst_info2, sizeof(struct restart_info));
79903 +               rst_info2.r_page = NULL;
79904 +       }
79906 +use_first_page:
79907 +       ntfs_free(rst_info2.r_page);
79909 +check_restart_area:
79910 +       /* If the restart area is at offset 0, we want to write the second restart area first */
79911 +       log->init_ra = !!rst_info.vbo;
79913 +       /* If we have a valid page then grab a pointer to the restart area */
79914 +       ra2 = rst_info.valid_page
79915 +                     ? Add2Ptr(rst_info.r_page,
79916 +                               le16_to_cpu(rst_info.r_page->ra_off))
79917 +                     : NULL;
79919 +       if (rst_info.chkdsk_was_run ||
79920 +           (ra2 && ra2->client_idx[1] == LFS_NO_CLIENT_LE)) {
79921 +               bool wrapped = false;
79922 +               bool use_multi_page = false;
79923 +               u32 open_log_count;
79925 +               /* Do some checks based on whether we have a valid log page */
79926 +               if (!rst_info.valid_page) {
79927 +                       open_log_count = get_random_int();
79928 +                       goto init_log_instance;
79929 +               }
79930 +               open_log_count = le32_to_cpu(ra2->open_log_count);
79932 +               /*
79933 +                * If the restart page size isn't changing then we want to
79934 +                * check how much work we need to do
79935 +                */
79936 +               if (page_size != le32_to_cpu(rst_info.r_page->sys_page_size))
79937 +                       goto init_log_instance;
79939 +init_log_instance:
79940 +               log_init_pg_hdr(log, page_size, page_size, 1, 1);
79942 +               log_create(log, l_size, rst_info.last_lsn, open_log_count,
79943 +                          wrapped, use_multi_page);
79945 +               ra = log_create_ra(log);
79946 +               if (!ra) {
79947 +                       err = -ENOMEM;
79948 +                       goto out;
79949 +               }
79950 +               log->ra = ra;
79952 +               /* Put the restart areas and initialize the log file as required */
79953 +               goto process_log;
79954 +       }
79956 +       if (!ra2) {
79957 +               err = -EINVAL;
79958 +               goto out;
79959 +       }
79961 +       /*
79962 +        * If the log page or the system page sizes have changed, we can't use the log file
79963 +        * We must use the system page size instead of the default size
79964 +        * if there is not a clean shutdown
79965 +        */
79966 +       t32 = le32_to_cpu(rst_info.r_page->sys_page_size);
79967 +       if (page_size != t32) {
79968 +               l_size = orig_file_size;
79969 +               page_size =
79970 +                       norm_file_page(t32, &l_size, t32 == DefaultLogPageSize);
79971 +       }
79973 +       if (page_size != t32 ||
79974 +           page_size != le32_to_cpu(rst_info.r_page->page_size)) {
79975 +               err = -EINVAL;
79976 +               goto out;
79977 +       }
79979 +       /* If the file size has shrunk then we won't mount it */
79980 +       if (l_size < le64_to_cpu(ra2->l_size)) {
79981 +               err = -EINVAL;
79982 +               goto out;
79983 +       }
79985 +       log_init_pg_hdr(log, page_size, page_size,
79986 +                       le16_to_cpu(rst_info.r_page->major_ver),
79987 +                       le16_to_cpu(rst_info.r_page->minor_ver));
79989 +       log->l_size = le64_to_cpu(ra2->l_size);
79990 +       log->seq_num_bits = le32_to_cpu(ra2->seq_num_bits);
79991 +       log->file_data_bits = sizeof(u64) * 8 - log->seq_num_bits;
79992 +       log->seq_num_mask = (8 << log->file_data_bits) - 1;
79993 +       log->last_lsn = le64_to_cpu(ra2->current_lsn);
79994 +       log->seq_num = log->last_lsn >> log->file_data_bits;
79995 +       log->ra_off = le16_to_cpu(rst_info.r_page->ra_off);
79996 +       log->restart_size = log->sys_page_size - log->ra_off;
79997 +       log->record_header_len = le16_to_cpu(ra2->rec_hdr_len);
79998 +       log->ra_size = le16_to_cpu(ra2->ra_len);
79999 +       log->data_off = le16_to_cpu(ra2->data_off);
80000 +       log->data_size = log->page_size - log->data_off;
80001 +       log->reserved = log->data_size - log->record_header_len;
80003 +       vbo = lsn_to_vbo(log, log->last_lsn);
80005 +       if (vbo < log->first_page) {
80006 +               /* This is a pseudo lsn */
80007 +               log->l_flags |= NTFSLOG_NO_LAST_LSN;
80008 +               log->next_page = log->first_page;
80009 +               goto find_oldest;
80010 +       }
80012 +       /* Find the end of this log record */
80013 +       off = final_log_off(log, log->last_lsn,
80014 +                           le32_to_cpu(ra2->last_lsn_data_len));
80016 +       /* If we wrapped the file then increment the sequence number */
80017 +       if (off <= vbo) {
80018 +               log->seq_num += 1;
80019 +               log->l_flags |= NTFSLOG_WRAPPED;
80020 +       }
80022 +       /* Now compute the next log page to use */
80023 +       vbo &= ~log->sys_page_mask;
80024 +       tail = log->page_size - (off & log->page_mask) - 1;
80026 +       /* If we can fit another log record on the page, move back a page the log file */
80027 +       if (tail >= log->record_header_len) {
80028 +               log->l_flags |= NTFSLOG_REUSE_TAIL;
80029 +               log->next_page = vbo;
80030 +       } else {
80031 +               log->next_page = next_page_off(log, vbo);
80032 +       }
80034 +find_oldest:
80035 +       /* Find the oldest client lsn. Use the last flushed lsn as a starting point */
80036 +       log->oldest_lsn = log->last_lsn;
80037 +       oldest_client_lsn(Add2Ptr(ra2, le16_to_cpu(ra2->client_off)),
80038 +                         ra2->client_idx[1], &log->oldest_lsn);
80039 +       log->oldest_lsn_off = lsn_to_vbo(log, log->oldest_lsn);
80041 +       if (log->oldest_lsn_off < log->first_page)
80042 +               log->l_flags |= NTFSLOG_NO_OLDEST_LSN;
80044 +       if (!(ra2->flags & RESTART_SINGLE_PAGE_IO))
80045 +               log->l_flags |= NTFSLOG_WRAPPED | NTFSLOG_MULTIPLE_PAGE_IO;
80047 +       log->current_openlog_count = le32_to_cpu(ra2->open_log_count);
80048 +       log->total_avail_pages = log->l_size - log->first_page;
80049 +       log->total_avail = log->total_avail_pages >> log->page_bits;
80050 +       log->max_current_avail = log->total_avail * log->reserved;
80051 +       log->total_avail = log->total_avail * log->data_size;
80053 +       log->current_avail = current_log_avail(log);
80055 +       ra = ntfs_zalloc(log->restart_size);
80056 +       if (!ra) {
80057 +               err = -ENOMEM;
80058 +               goto out;
80059 +       }
80060 +       log->ra = ra;
80062 +       t16 = le16_to_cpu(ra2->client_off);
80063 +       if (t16 == offsetof(struct RESTART_AREA, clients)) {
80064 +               memcpy(ra, ra2, log->ra_size);
80065 +       } else {
80066 +               memcpy(ra, ra2, offsetof(struct RESTART_AREA, clients));
80067 +               memcpy(ra->clients, Add2Ptr(ra2, t16),
80068 +                      le16_to_cpu(ra2->ra_len) - t16);
80070 +               log->current_openlog_count = get_random_int();
80071 +               ra->open_log_count = cpu_to_le32(log->current_openlog_count);
80072 +               log->ra_size = offsetof(struct RESTART_AREA, clients) +
80073 +                              sizeof(struct CLIENT_REC);
80074 +               ra->client_off =
80075 +                       cpu_to_le16(offsetof(struct RESTART_AREA, clients));
80076 +               ra->ra_len = cpu_to_le16(log->ra_size);
80077 +       }
80079 +       le32_add_cpu(&ra->open_log_count, 1);
80081 +       /* Now we need to walk through looking for the last lsn */
80082 +       err = last_log_lsn(log);
80083 +       if (err)
80084 +               goto out;
80086 +       log->current_avail = current_log_avail(log);
80088 +       /* Remember which restart area to write first */
80089 +       log->init_ra = rst_info.vbo;
80091 +process_log:
80092 +       /* 1.0, 1.1, 2.0 log->major_ver/minor_ver - short values */
80093 +       switch ((log->major_ver << 16) + log->minor_ver) {
80094 +       case 0x10000:
80095 +       case 0x10001:
80096 +       case 0x20000:
80097 +               break;
80098 +       default:
80099 +               ntfs_warn(sbi->sb, "\x24LogFile version %d.%d is not supported",
80100 +                         log->major_ver, log->minor_ver);
80101 +               err = -EOPNOTSUPP;
80102 +               log->set_dirty = true;
80103 +               goto out;
80104 +       }
80106 +       /* One client "NTFS" per logfile */
80107 +       ca = Add2Ptr(ra, le16_to_cpu(ra->client_off));
80109 +       for (client = ra->client_idx[1];; client = cr->next_client) {
80110 +               if (client == LFS_NO_CLIENT_LE) {
80111 +                       /* Insert "NTFS" client LogFile */
80112 +                       client = ra->client_idx[0];
80113 +                       if (client == LFS_NO_CLIENT_LE)
80114 +                               return -EINVAL;
80116 +                       t16 = le16_to_cpu(client);
80117 +                       cr = ca + t16;
80119 +                       remove_client(ca, cr, &ra->client_idx[0]);
80121 +                       cr->restart_lsn = 0;
80122 +                       cr->oldest_lsn = cpu_to_le64(log->oldest_lsn);
80123 +                       cr->name_bytes = cpu_to_le32(8);
80124 +                       cr->name[0] = cpu_to_le16('N');
80125 +                       cr->name[1] = cpu_to_le16('T');
80126 +                       cr->name[2] = cpu_to_le16('F');
80127 +                       cr->name[3] = cpu_to_le16('S');
80129 +                       add_client(ca, t16, &ra->client_idx[1]);
80130 +                       break;
80131 +               }
80133 +               cr = ca + le16_to_cpu(client);
80135 +               if (cpu_to_le32(8) == cr->name_bytes &&
80136 +                   cpu_to_le16('N') == cr->name[0] &&
80137 +                   cpu_to_le16('T') == cr->name[1] &&
80138 +                   cpu_to_le16('F') == cr->name[2] &&
80139 +                   cpu_to_le16('S') == cr->name[3])
80140 +                       break;
80141 +       }
80143 +       /* Update the client handle with the client block information */
80144 +       log->client_id.seq_num = cr->seq_num;
80145 +       log->client_id.client_idx = client;
80147 +       err = read_rst_area(log, &rst, &ra_lsn);
80148 +       if (err)
80149 +               goto out;
80151 +       if (!rst)
80152 +               goto out;
80154 +       bytes_per_attr_entry = !rst->major_ver ? 0x2C : 0x28;
80156 +       checkpt_lsn = le64_to_cpu(rst->check_point_start);
80157 +       if (!checkpt_lsn)
80158 +               checkpt_lsn = ra_lsn;
80160 +       /* Allocate and Read the Transaction Table */
80161 +       if (!rst->transact_table_len)
80162 +               goto check_dirty_page_table;
80164 +       t64 = le64_to_cpu(rst->transact_table_lsn);
80165 +       err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
80166 +       if (err)
80167 +               goto out;
80169 +       lrh = lcb->log_rec;
80170 +       frh = lcb->lrh;
80171 +       rec_len = le32_to_cpu(frh->client_data_len);
80173 +       if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
80174 +                          bytes_per_attr_entry)) {
80175 +               err = -EINVAL;
80176 +               goto out;
80177 +       }
80179 +       t16 = le16_to_cpu(lrh->redo_off);
80181 +       rt = Add2Ptr(lrh, t16);
80182 +       t32 = rec_len - t16;
80184 +       /* Now check that this is a valid restart table */
80185 +       if (!check_rstbl(rt, t32)) {
80186 +               err = -EINVAL;
80187 +               goto out;
80188 +       }
80190 +       trtbl = ntfs_memdup(rt, t32);
80191 +       if (!trtbl) {
80192 +               err = -ENOMEM;
80193 +               goto out;
80194 +       }
80196 +       lcb_put(lcb);
80197 +       lcb = NULL;
80199 +check_dirty_page_table:
80200 +       /* The next record back should be the Dirty Pages Table */
80201 +       if (!rst->dirty_pages_len)
80202 +               goto check_attribute_names;
80204 +       t64 = le64_to_cpu(rst->dirty_pages_table_lsn);
80205 +       err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
80206 +       if (err)
80207 +               goto out;
80209 +       lrh = lcb->log_rec;
80210 +       frh = lcb->lrh;
80211 +       rec_len = le32_to_cpu(frh->client_data_len);
80213 +       if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
80214 +                          bytes_per_attr_entry)) {
80215 +               err = -EINVAL;
80216 +               goto out;
80217 +       }
80219 +       t16 = le16_to_cpu(lrh->redo_off);
80221 +       rt = Add2Ptr(lrh, t16);
80222 +       t32 = rec_len - t16;
80224 +       /* Now check that this is a valid restart table */
80225 +       if (!check_rstbl(rt, t32)) {
80226 +               err = -EINVAL;
80227 +               goto out;
80228 +       }
80230 +       dptbl = ntfs_memdup(rt, t32);
80231 +       if (!dptbl) {
80232 +               err = -ENOMEM;
80233 +               goto out;
80234 +       }
80236 +       /* Convert Ra version '0' into version '1' */
80237 +       if (rst->major_ver)
80238 +               goto end_conv_1;
80240 +       dp = NULL;
80241 +       while ((dp = enum_rstbl(dptbl, dp))) {
80242 +               struct DIR_PAGE_ENTRY_32 *dp0 = (struct DIR_PAGE_ENTRY_32 *)dp;
80243 +               // NOTE: Danger. Check for of boundary
80244 +               memmove(&dp->vcn, &dp0->vcn_low,
80245 +                       2 * sizeof(u64) +
80246 +                               le32_to_cpu(dp->lcns_follow) * sizeof(u64));
80247 +       }
80249 +end_conv_1:
80250 +       lcb_put(lcb);
80251 +       lcb = NULL;
80253 +       /* Go through the table and remove the duplicates, remembering the oldest lsn values */
80254 +       if (sbi->cluster_size <= log->page_size)
80255 +               goto trace_dp_table;
80257 +       dp = NULL;
80258 +       while ((dp = enum_rstbl(dptbl, dp))) {
80259 +               struct DIR_PAGE_ENTRY *next = dp;
80261 +               while ((next = enum_rstbl(dptbl, next))) {
80262 +                       if (next->target_attr == dp->target_attr &&
80263 +                           next->vcn == dp->vcn) {
80264 +                               if (le64_to_cpu(next->oldest_lsn) <
80265 +                                   le64_to_cpu(dp->oldest_lsn)) {
80266 +                                       dp->oldest_lsn = next->oldest_lsn;
80267 +                               }
80269 +                               free_rsttbl_idx(dptbl, PtrOffset(dptbl, next));
80270 +                       }
80271 +               }
80272 +       }
80273 +trace_dp_table:
80274 +check_attribute_names:
80275 +       /* The next record should be the Attribute Names */
80276 +       if (!rst->attr_names_len)
80277 +               goto check_attr_table;
80279 +       t64 = le64_to_cpu(rst->attr_names_lsn);
80280 +       err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
80281 +       if (err)
80282 +               goto out;
80284 +       lrh = lcb->log_rec;
80285 +       frh = lcb->lrh;
80286 +       rec_len = le32_to_cpu(frh->client_data_len);
80288 +       if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
80289 +                          bytes_per_attr_entry)) {
80290 +               err = -EINVAL;
80291 +               goto out;
80292 +       }
80294 +       t32 = lrh_length(lrh);
80295 +       rec_len -= t32;
80297 +       attr_names = ntfs_memdup(Add2Ptr(lrh, t32), rec_len);
80299 +       lcb_put(lcb);
80300 +       lcb = NULL;
80302 +check_attr_table:
80303 +       /* The next record should be the attribute Table */
80304 +       if (!rst->open_attr_len)
80305 +               goto check_attribute_names2;
80307 +       t64 = le64_to_cpu(rst->open_attr_table_lsn);
80308 +       err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
80309 +       if (err)
80310 +               goto out;
80312 +       lrh = lcb->log_rec;
80313 +       frh = lcb->lrh;
80314 +       rec_len = le32_to_cpu(frh->client_data_len);
80316 +       if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
80317 +                          bytes_per_attr_entry)) {
80318 +               err = -EINVAL;
80319 +               goto out;
80320 +       }
80322 +       t16 = le16_to_cpu(lrh->redo_off);
80324 +       rt = Add2Ptr(lrh, t16);
80325 +       t32 = rec_len - t16;
80327 +       if (!check_rstbl(rt, t32)) {
80328 +               err = -EINVAL;
80329 +               goto out;
80330 +       }
80332 +       oatbl = ntfs_memdup(rt, t32);
80333 +       if (!oatbl) {
80334 +               err = -ENOMEM;
80335 +               goto out;
80336 +       }
80338 +       log->open_attr_tbl = oatbl;
80340 +       /* Clear all of the Attr pointers */
80341 +       oe = NULL;
80342 +       while ((oe = enum_rstbl(oatbl, oe))) {
80343 +               if (!rst->major_ver) {
80344 +                       struct OPEN_ATTR_ENRTY_32 oe0;
80346 +                       /* Really 'oe' points to OPEN_ATTR_ENRTY_32 */
80347 +                       memcpy(&oe0, oe, SIZEOF_OPENATTRIBUTEENTRY0);
80349 +                       oe->bytes_per_index = oe0.bytes_per_index;
80350 +                       oe->type = oe0.type;
80351 +                       oe->is_dirty_pages = oe0.is_dirty_pages;
80352 +                       oe->name_len = 0;
80353 +                       oe->ref = oe0.ref;
80354 +                       oe->open_record_lsn = oe0.open_record_lsn;
80355 +               }
80357 +               oe->is_attr_name = 0;
80358 +               oe->ptr = NULL;
80359 +       }
80361 +       lcb_put(lcb);
80362 +       lcb = NULL;
80364 +check_attribute_names2:
80365 +       if (!rst->attr_names_len)
80366 +               goto trace_attribute_table;
80368 +       ane = attr_names;
80369 +       if (!oatbl)
80370 +               goto trace_attribute_table;
80371 +       while (ane->off) {
80372 +               /* TODO: Clear table on exit! */
80373 +               oe = Add2Ptr(oatbl, le16_to_cpu(ane->off));
80374 +               t16 = le16_to_cpu(ane->name_bytes);
80375 +               oe->name_len = t16 / sizeof(short);
80376 +               oe->ptr = ane->name;
80377 +               oe->is_attr_name = 2;
80378 +               ane = Add2Ptr(ane, sizeof(struct ATTR_NAME_ENTRY) + t16);
80379 +       }
80381 +trace_attribute_table:
80382 +       /*
80383 +        * If the checkpt_lsn is zero, then this is a freshly
80384 +        * formatted disk and we have no work to do
80385 +        */
80386 +       if (!checkpt_lsn) {
80387 +               err = 0;
80388 +               goto out;
80389 +       }
80391 +       if (!oatbl) {
80392 +               oatbl = init_rsttbl(bytes_per_attr_entry, 8);
80393 +               if (!oatbl) {
80394 +                       err = -ENOMEM;
80395 +                       goto out;
80396 +               }
80397 +       }
80399 +       log->open_attr_tbl = oatbl;
80401 +       /* Start the analysis pass from the Checkpoint lsn. */
80402 +       rec_lsn = checkpt_lsn;
80404 +       /* Read the first lsn */
80405 +       err = read_log_rec_lcb(log, checkpt_lsn, lcb_ctx_next, &lcb);
80406 +       if (err)
80407 +               goto out;
80409 +       /* Loop to read all subsequent records to the end of the log file */
80410 +next_log_record_analyze:
80411 +       err = read_next_log_rec(log, lcb, &rec_lsn);
80412 +       if (err)
80413 +               goto out;
80415 +       if (!rec_lsn)
80416 +               goto end_log_records_enumerate;
80418 +       frh = lcb->lrh;
80419 +       transact_id = le32_to_cpu(frh->transact_id);
80420 +       rec_len = le32_to_cpu(frh->client_data_len);
80421 +       lrh = lcb->log_rec;
80423 +       if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) {
80424 +               err = -EINVAL;
80425 +               goto out;
80426 +       }
80428 +       /*
80429 +        * The first lsn after the previous lsn remembered
80430 +        * the checkpoint is the first candidate for the rlsn
80431 +        */
80432 +       if (!rlsn)
80433 +               rlsn = rec_lsn;
80435 +       if (LfsClientRecord != frh->record_type)
80436 +               goto next_log_record_analyze;
80438 +       /*
80439 +        * Now update the Transaction Table for this transaction
80440 +        * If there is no entry present or it is unallocated we allocate the entry
80441 +        */
80442 +       if (!trtbl) {
80443 +               trtbl = init_rsttbl(sizeof(struct TRANSACTION_ENTRY),
80444 +                                   INITIAL_NUMBER_TRANSACTIONS);
80445 +               if (!trtbl) {
80446 +                       err = -ENOMEM;
80447 +                       goto out;
80448 +               }
80449 +       }
80451 +       tr = Add2Ptr(trtbl, transact_id);
80453 +       if (transact_id >= bytes_per_rt(trtbl) ||
80454 +           tr->next != RESTART_ENTRY_ALLOCATED_LE) {
80455 +               tr = alloc_rsttbl_from_idx(&trtbl, transact_id);
80456 +               if (!tr) {
80457 +                       err = -ENOMEM;
80458 +                       goto out;
80459 +               }
80460 +               tr->transact_state = TransactionActive;
80461 +               tr->first_lsn = cpu_to_le64(rec_lsn);
80462 +       }
80464 +       tr->prev_lsn = tr->undo_next_lsn = cpu_to_le64(rec_lsn);
80466 +       /*
80467 +        * If this is a compensation log record, then change
80468 +        * the undo_next_lsn to be the undo_next_lsn of this record
80469 +        */
80470 +       if (lrh->undo_op == cpu_to_le16(CompensationLogRecord))
80471 +               tr->undo_next_lsn = frh->client_undo_next_lsn;
80473 +       /* Dispatch to handle log record depending on type */
80474 +       switch (le16_to_cpu(lrh->redo_op)) {
80475 +       case InitializeFileRecordSegment:
80476 +       case DeallocateFileRecordSegment:
80477 +       case WriteEndOfFileRecordSegment:
80478 +       case CreateAttribute:
80479 +       case DeleteAttribute:
80480 +       case UpdateResidentValue:
80481 +       case UpdateNonresidentValue:
80482 +       case UpdateMappingPairs:
80483 +       case SetNewAttributeSizes:
80484 +       case AddIndexEntryRoot:
80485 +       case DeleteIndexEntryRoot:
80486 +       case AddIndexEntryAllocation:
80487 +       case DeleteIndexEntryAllocation:
80488 +       case WriteEndOfIndexBuffer:
80489 +       case SetIndexEntryVcnRoot:
80490 +       case SetIndexEntryVcnAllocation:
80491 +       case UpdateFileNameRoot:
80492 +       case UpdateFileNameAllocation:
80493 +       case SetBitsInNonresidentBitMap:
80494 +       case ClearBitsInNonresidentBitMap:
80495 +       case UpdateRecordDataRoot:
80496 +       case UpdateRecordDataAllocation:
80497 +       case ZeroEndOfFileRecord:
80498 +               t16 = le16_to_cpu(lrh->target_attr);
80499 +               t64 = le64_to_cpu(lrh->target_vcn);
80500 +               dp = find_dp(dptbl, t16, t64);
80502 +               if (dp)
80503 +                       goto copy_lcns;
80505 +               /*
80506 +                * Calculate the number of clusters per page the system
80507 +                * which wrote the checkpoint, possibly creating the table
80508 +                */
80509 +               if (dptbl) {
80510 +                       t32 = (le16_to_cpu(dptbl->size) -
80511 +                              sizeof(struct DIR_PAGE_ENTRY)) /
80512 +                             sizeof(u64);
80513 +               } else {
80514 +                       t32 = log->clst_per_page;
80515 +                       ntfs_free(dptbl);
80516 +                       dptbl = init_rsttbl(struct_size(dp, page_lcns, t32),
80517 +                                           32);
80518 +                       if (!dptbl) {
80519 +                               err = -ENOMEM;
80520 +                               goto out;
80521 +                       }
80522 +               }
80524 +               dp = alloc_rsttbl_idx(&dptbl);
80525 +               dp->target_attr = cpu_to_le32(t16);
80526 +               dp->transfer_len = cpu_to_le32(t32 << sbi->cluster_bits);
80527 +               dp->lcns_follow = cpu_to_le32(t32);
80528 +               dp->vcn = cpu_to_le64(t64 & ~((u64)t32 - 1));
80529 +               dp->oldest_lsn = cpu_to_le64(rec_lsn);
80531 +copy_lcns:
80532 +               /*
80533 +                * Copy the Lcns from the log record into the Dirty Page Entry
80534 +                * TODO: for different page size support, must somehow make
80535 +                * whole routine a loop, case Lcns do not fit below
80536 +                */
80537 +               t16 = le16_to_cpu(lrh->lcns_follow);
80538 +               for (i = 0; i < t16; i++) {
80539 +                       size_t j = (size_t)(le64_to_cpu(lrh->target_vcn) -
80540 +                                           le64_to_cpu(dp->vcn));
80541 +                       dp->page_lcns[j + i] = lrh->page_lcns[i];
80542 +               }
80544 +               goto next_log_record_analyze;
80546 +       case DeleteDirtyClusters: {
80547 +               u32 range_count =
80548 +                       le16_to_cpu(lrh->redo_len) / sizeof(struct LCN_RANGE);
80549 +               const struct LCN_RANGE *r =
80550 +                       Add2Ptr(lrh, le16_to_cpu(lrh->redo_off));
80552 +               /* Loop through all of the Lcn ranges this log record */
80553 +               for (i = 0; i < range_count; i++, r++) {
80554 +                       u64 lcn0 = le64_to_cpu(r->lcn);
80555 +                       u64 lcn_e = lcn0 + le64_to_cpu(r->len) - 1;
80557 +                       dp = NULL;
80558 +                       while ((dp = enum_rstbl(dptbl, dp))) {
80559 +                               u32 j;
80561 +                               t32 = le32_to_cpu(dp->lcns_follow);
80562 +                               for (j = 0; j < t32; j++) {
80563 +                                       t64 = le64_to_cpu(dp->page_lcns[j]);
80564 +                                       if (t64 >= lcn0 && t64 <= lcn_e)
80565 +                                               dp->page_lcns[j] = 0;
80566 +                               }
80567 +                       }
80568 +               }
80569 +               goto next_log_record_analyze;
80570 +               ;
80571 +       }
80573 +       case OpenNonresidentAttribute:
80574 +               t16 = le16_to_cpu(lrh->target_attr);
80575 +               if (t16 >= bytes_per_rt(oatbl)) {
80576 +                       /*
80577 +                        * Compute how big the table needs to be.
80578 +                        * Add 10 extra entries for some cushion
80579 +                        */
80580 +                       u32 new_e = t16 / le16_to_cpu(oatbl->size);
80582 +                       new_e += 10 - le16_to_cpu(oatbl->used);
80584 +                       oatbl = extend_rsttbl(oatbl, new_e, ~0u);
80585 +                       log->open_attr_tbl = oatbl;
80586 +                       if (!oatbl) {
80587 +                               err = -ENOMEM;
80588 +                               goto out;
80589 +                       }
80590 +               }
80592 +               /* Point to the entry being opened */
80593 +               oe = alloc_rsttbl_from_idx(&oatbl, t16);
80594 +               log->open_attr_tbl = oatbl;
80595 +               if (!oe) {
80596 +                       err = -ENOMEM;
80597 +                       goto out;
80598 +               }
80600 +               /* Initialize this entry from the log record */
80601 +               t16 = le16_to_cpu(lrh->redo_off);
80602 +               if (!rst->major_ver) {
80603 +                       /* Convert version '0' into version '1' */
80604 +                       struct OPEN_ATTR_ENRTY_32 *oe0 = Add2Ptr(lrh, t16);
80606 +                       oe->bytes_per_index = oe0->bytes_per_index;
80607 +                       oe->type = oe0->type;
80608 +                       oe->is_dirty_pages = oe0->is_dirty_pages;
80609 +                       oe->name_len = 0; //oe0.name_len;
80610 +                       oe->ref = oe0->ref;
80611 +                       oe->open_record_lsn = oe0->open_record_lsn;
80612 +               } else {
80613 +                       memcpy(oe, Add2Ptr(lrh, t16), bytes_per_attr_entry);
80614 +               }
80616 +               t16 = le16_to_cpu(lrh->undo_len);
80617 +               if (t16) {
80618 +                       oe->ptr = ntfs_malloc(t16);
80619 +                       if (!oe->ptr) {
80620 +                               err = -ENOMEM;
80621 +                               goto out;
80622 +                       }
80623 +                       oe->name_len = t16 / sizeof(short);
80624 +                       memcpy(oe->ptr,
80625 +                              Add2Ptr(lrh, le16_to_cpu(lrh->undo_off)), t16);
80626 +                       oe->is_attr_name = 1;
80627 +               } else {
80628 +                       oe->ptr = NULL;
80629 +                       oe->is_attr_name = 0;
80630 +               }
80632 +               goto next_log_record_analyze;
80634 +       case HotFix:
80635 +               t16 = le16_to_cpu(lrh->target_attr);
80636 +               t64 = le64_to_cpu(lrh->target_vcn);
80637 +               dp = find_dp(dptbl, t16, t64);
80638 +               if (dp) {
80639 +                       size_t j = le64_to_cpu(lrh->target_vcn) -
80640 +                                  le64_to_cpu(dp->vcn);
80641 +                       if (dp->page_lcns[j])
80642 +                               dp->page_lcns[j] = lrh->page_lcns[0];
80643 +               }
80644 +               goto next_log_record_analyze;
80646 +       case EndTopLevelAction:
80647 +               tr = Add2Ptr(trtbl, transact_id);
80648 +               tr->prev_lsn = cpu_to_le64(rec_lsn);
80649 +               tr->undo_next_lsn = frh->client_undo_next_lsn;
80650 +               goto next_log_record_analyze;
80652 +       case PrepareTransaction:
80653 +               tr = Add2Ptr(trtbl, transact_id);
80654 +               tr->transact_state = TransactionPrepared;
80655 +               goto next_log_record_analyze;
80657 +       case CommitTransaction:
80658 +               tr = Add2Ptr(trtbl, transact_id);
80659 +               tr->transact_state = TransactionCommitted;
80660 +               goto next_log_record_analyze;
80662 +       case ForgetTransaction:
80663 +               free_rsttbl_idx(trtbl, transact_id);
80664 +               goto next_log_record_analyze;
80666 +       case Noop:
80667 +       case OpenAttributeTableDump:
80668 +       case AttributeNamesDump:
80669 +       case DirtyPageTableDump:
80670 +       case TransactionTableDump:
80671 +               /* The following cases require no action the Analysis Pass */
80672 +               goto next_log_record_analyze;
80674 +       default:
80675 +               /*
80676 +                * All codes will be explicitly handled.
80677 +                * If we see a code we do not expect, then we are trouble
80678 +                */
80679 +               goto next_log_record_analyze;
80680 +       }
80682 +end_log_records_enumerate:
80683 +       lcb_put(lcb);
80684 +       lcb = NULL;
80686 +       /*
80687 +        * Scan the Dirty Page Table and Transaction Table for
80688 +        * the lowest lsn, and return it as the Redo lsn
80689 +        */
80690 +       dp = NULL;
80691 +       while ((dp = enum_rstbl(dptbl, dp))) {
80692 +               t64 = le64_to_cpu(dp->oldest_lsn);
80693 +               if (t64 && t64 < rlsn)
80694 +                       rlsn = t64;
80695 +       }
80697 +       tr = NULL;
80698 +       while ((tr = enum_rstbl(trtbl, tr))) {
80699 +               t64 = le64_to_cpu(tr->first_lsn);
80700 +               if (t64 && t64 < rlsn)
80701 +                       rlsn = t64;
80702 +       }
80704 +       /* Only proceed if the Dirty Page Table or Transaction table are not empty */
80705 +       if ((!dptbl || !dptbl->total) && (!trtbl || !trtbl->total))
80706 +               goto end_reply;
80708 +       sbi->flags |= NTFS_FLAGS_NEED_REPLAY;
80709 +       if (is_ro)
80710 +               goto out;
80712 +       /* Reopen all of the attributes with dirty pages */
80713 +       oe = NULL;
80714 +next_open_attribute:
80716 +       oe = enum_rstbl(oatbl, oe);
80717 +       if (!oe) {
80718 +               err = 0;
80719 +               dp = NULL;
80720 +               goto next_dirty_page;
80721 +       }
80723 +       oa = ntfs_zalloc(sizeof(struct OpenAttr));
80724 +       if (!oa) {
80725 +               err = -ENOMEM;
80726 +               goto out;
80727 +       }
80729 +       inode = ntfs_iget5(sbi->sb, &oe->ref, NULL);
80730 +       if (IS_ERR(inode))
80731 +               goto fake_attr;
80733 +       if (is_bad_inode(inode)) {
80734 +               iput(inode);
80735 +fake_attr:
80736 +               if (oa->ni) {
80737 +                       iput(&oa->ni->vfs_inode);
80738 +                       oa->ni = NULL;
80739 +               }
80741 +               attr = attr_create_nonres_log(sbi, oe->type, 0, oe->ptr,
80742 +                                             oe->name_len, 0);
80743 +               if (!attr) {
80744 +                       ntfs_free(oa);
80745 +                       err = -ENOMEM;
80746 +                       goto out;
80747 +               }
80748 +               oa->attr = attr;
80749 +               oa->run1 = &oa->run0;
80750 +               goto final_oe;
80751 +       }
80753 +       ni_oe = ntfs_i(inode);
80754 +       oa->ni = ni_oe;
80756 +       attr = ni_find_attr(ni_oe, NULL, NULL, oe->type, oe->ptr, oe->name_len,
80757 +                           NULL, NULL);
80759 +       if (!attr)
80760 +               goto fake_attr;
80762 +       t32 = le32_to_cpu(attr->size);
80763 +       oa->attr = ntfs_memdup(attr, t32);
80764 +       if (!oa->attr)
80765 +               goto fake_attr;
80767 +       if (!S_ISDIR(inode->i_mode)) {
80768 +               if (attr->type == ATTR_DATA && !attr->name_len) {
80769 +                       oa->run1 = &ni_oe->file.run;
80770 +                       goto final_oe;
80771 +               }
80772 +       } else {
80773 +               if (attr->type == ATTR_ALLOC &&
80774 +                   attr->name_len == ARRAY_SIZE(I30_NAME) &&
80775 +                   !memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME))) {
80776 +                       oa->run1 = &ni_oe->dir.alloc_run;
80777 +                       goto final_oe;
80778 +               }
80779 +       }
80781 +       if (attr->non_res) {
80782 +               u16 roff = le16_to_cpu(attr->nres.run_off);
80783 +               CLST svcn = le64_to_cpu(attr->nres.svcn);
80785 +               err = run_unpack(&oa->run0, sbi, inode->i_ino, svcn,
80786 +                                le64_to_cpu(attr->nres.evcn), svcn,
80787 +                                Add2Ptr(attr, roff), t32 - roff);
80788 +               if (err < 0) {
80789 +                       ntfs_free(oa->attr);
80790 +                       oa->attr = NULL;
80791 +                       goto fake_attr;
80792 +               }
80793 +               err = 0;
80794 +       }
80795 +       oa->run1 = &oa->run0;
80796 +       attr = oa->attr;
80798 +final_oe:
80799 +       if (oe->is_attr_name == 1)
80800 +               ntfs_free(oe->ptr);
80801 +       oe->is_attr_name = 0;
80802 +       oe->ptr = oa;
80803 +       oe->name_len = attr->name_len;
80805 +       goto next_open_attribute;
80807 +       /*
80808 +        * Now loop through the dirty page table to extract all of the Vcn/Lcn
80809 +        * Mapping that we have, and insert it into the appropriate run
80810 +        */
80811 +next_dirty_page:
80812 +       dp = enum_rstbl(dptbl, dp);
80813 +       if (!dp)
80814 +               goto do_redo_1;
80816 +       oe = Add2Ptr(oatbl, le32_to_cpu(dp->target_attr));
80818 +       if (oe->next != RESTART_ENTRY_ALLOCATED_LE)
80819 +               goto next_dirty_page;
80821 +       oa = oe->ptr;
80822 +       if (!oa)
80823 +               goto next_dirty_page;
80825 +       i = -1;
80826 +next_dirty_page_vcn:
80827 +       i += 1;
80828 +       if (i >= le32_to_cpu(dp->lcns_follow))
80829 +               goto next_dirty_page;
80831 +       vcn = le64_to_cpu(dp->vcn) + i;
80832 +       size = (vcn + 1) << sbi->cluster_bits;
80834 +       if (!dp->page_lcns[i])
80835 +               goto next_dirty_page_vcn;
80837 +       rno = ino_get(&oe->ref);
80838 +       if (rno <= MFT_REC_MIRR &&
80839 +           size < (MFT_REC_VOL + 1) * sbi->record_size &&
80840 +           oe->type == ATTR_DATA) {
80841 +               goto next_dirty_page_vcn;
80842 +       }
80844 +       lcn = le64_to_cpu(dp->page_lcns[i]);
80846 +       if ((!run_lookup_entry(oa->run1, vcn, &lcn0, &len0, NULL) ||
80847 +            lcn0 != lcn) &&
80848 +           !run_add_entry(oa->run1, vcn, lcn, 1, false)) {
80849 +               err = -ENOMEM;
80850 +               goto out;
80851 +       }
80852 +       attr = oa->attr;
80853 +       t64 = le64_to_cpu(attr->nres.alloc_size);
80854 +       if (size > t64) {
80855 +               attr->nres.valid_size = attr->nres.data_size =
80856 +                       attr->nres.alloc_size = cpu_to_le64(size);
80857 +       }
80858 +       goto next_dirty_page_vcn;
80860 +do_redo_1:
80861 +       /*
80862 +        * Perform the Redo Pass, to restore all of the dirty pages to the same
80863 +        * contents that they had immediately before the crash
80864 +        * If the dirty page table is empty, then we can skip the entire Redo Pass
80865 +        */
80866 +       if (!dptbl || !dptbl->total)
80867 +               goto do_undo_action;
80869 +       rec_lsn = rlsn;
80871 +       /*
80872 +        * Read the record at the Redo lsn, before falling
80873 +        * into common code to handle each record
80874 +        */
80875 +       err = read_log_rec_lcb(log, rlsn, lcb_ctx_next, &lcb);
80876 +       if (err)
80877 +               goto out;
80879 +       /*
80880 +        * Now loop to read all of our log records forwards,
80881 +        * until we hit the end of the file, cleaning up at the end
80882 +        */
80883 +do_action_next:
80884 +       frh = lcb->lrh;
80886 +       if (LfsClientRecord != frh->record_type)
80887 +               goto read_next_log_do_action;
80889 +       transact_id = le32_to_cpu(frh->transact_id);
80890 +       rec_len = le32_to_cpu(frh->client_data_len);
80891 +       lrh = lcb->log_rec;
80893 +       if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) {
80894 +               err = -EINVAL;
80895 +               goto out;
80896 +       }
80898 +       /* Ignore log records that do not update pages */
80899 +       if (lrh->lcns_follow)
80900 +               goto find_dirty_page;
80902 +       goto read_next_log_do_action;
80904 +find_dirty_page:
80905 +       t16 = le16_to_cpu(lrh->target_attr);
80906 +       t64 = le64_to_cpu(lrh->target_vcn);
80907 +       dp = find_dp(dptbl, t16, t64);
80909 +       if (!dp)
80910 +               goto read_next_log_do_action;
80912 +       if (rec_lsn < le64_to_cpu(dp->oldest_lsn))
80913 +               goto read_next_log_do_action;
80915 +       t16 = le16_to_cpu(lrh->target_attr);
80916 +       if (t16 >= bytes_per_rt(oatbl)) {
80917 +               err = -EINVAL;
80918 +               goto out;
80919 +       }
80921 +       oe = Add2Ptr(oatbl, t16);
80923 +       if (oe->next != RESTART_ENTRY_ALLOCATED_LE) {
80924 +               err = -EINVAL;
80925 +               goto out;
80926 +       }
80928 +       oa = oe->ptr;
80930 +       if (!oa) {
80931 +               err = -EINVAL;
80932 +               goto out;
80933 +       }
80934 +       attr = oa->attr;
80936 +       vcn = le64_to_cpu(lrh->target_vcn);
80938 +       if (!run_lookup_entry(oa->run1, vcn, &lcn, NULL, NULL) ||
80939 +           lcn == SPARSE_LCN) {
80940 +               goto read_next_log_do_action;
80941 +       }
80943 +       /* Point to the Redo data and get its length */
80944 +       data = Add2Ptr(lrh, le16_to_cpu(lrh->redo_off));
80945 +       dlen = le16_to_cpu(lrh->redo_len);
80947 +       /* Shorten length by any Lcns which were deleted */
80948 +       saved_len = dlen;
80950 +       for (i = le16_to_cpu(lrh->lcns_follow); i; i--) {
80951 +               size_t j;
80952 +               u32 alen, voff;
80954 +               voff = le16_to_cpu(lrh->record_off) +
80955 +                      le16_to_cpu(lrh->attr_off);
80956 +               voff += le16_to_cpu(lrh->cluster_off) << SECTOR_SHIFT;
80958 +               /* If the Vcn question is allocated, we can just get out.*/
80959 +               j = le64_to_cpu(lrh->target_vcn) - le64_to_cpu(dp->vcn);
80960 +               if (dp->page_lcns[j + i - 1])
80961 +                       break;
80963 +               if (!saved_len)
80964 +                       saved_len = 1;
80966 +               /*
80967 +                * Calculate the allocated space left relative to the
80968 +                * log record Vcn, after removing this unallocated Vcn
80969 +                */
80970 +               alen = (i - 1) << sbi->cluster_bits;
80972 +               /*
80973 +                * If the update described this log record goes beyond
80974 +                * the allocated space, then we will have to reduce the length
80975 +                */
80976 +               if (voff >= alen)
80977 +                       dlen = 0;
80978 +               else if (voff + dlen > alen)
80979 +                       dlen = alen - voff;
80980 +       }
80982 +       /* If the resulting dlen from above is now zero, we can skip this log record */
80983 +       if (!dlen && saved_len)
80984 +               goto read_next_log_do_action;
80986 +       t16 = le16_to_cpu(lrh->redo_op);
80987 +       if (can_skip_action(t16))
80988 +               goto read_next_log_do_action;
80990 +       /* Apply the Redo operation a common routine */
80991 +       err = do_action(log, oe, lrh, t16, data, dlen, rec_len, &rec_lsn);
80992 +       if (err)
80993 +               goto out;
80995 +       /* Keep reading and looping back until end of file */
80996 +read_next_log_do_action:
80997 +       err = read_next_log_rec(log, lcb, &rec_lsn);
80998 +       if (!err && rec_lsn)
80999 +               goto do_action_next;
81001 +       lcb_put(lcb);
81002 +       lcb = NULL;
81004 +do_undo_action:
81005 +       /* Scan Transaction Table */
81006 +       tr = NULL;
81007 +transaction_table_next:
81008 +       tr = enum_rstbl(trtbl, tr);
81009 +       if (!tr)
81010 +               goto undo_action_done;
81012 +       if (TransactionActive != tr->transact_state || !tr->undo_next_lsn) {
81013 +               free_rsttbl_idx(trtbl, PtrOffset(trtbl, tr));
81014 +               goto transaction_table_next;
81015 +       }
81017 +       log->transaction_id = PtrOffset(trtbl, tr);
81018 +       undo_next_lsn = le64_to_cpu(tr->undo_next_lsn);
81020 +       /*
81021 +        * We only have to do anything if the transaction has
81022 +        * something its undo_next_lsn field
81023 +        */
81024 +       if (!undo_next_lsn)
81025 +               goto commit_undo;
81027 +       /* Read the first record to be undone by this transaction */
81028 +       err = read_log_rec_lcb(log, undo_next_lsn, lcb_ctx_undo_next, &lcb);
81029 +       if (err)
81030 +               goto out;
81032 +       /*
81033 +        * Now loop to read all of our log records forwards,
81034 +        * until we hit the end of the file, cleaning up at the end
81035 +        */
81036 +undo_action_next:
81038 +       lrh = lcb->log_rec;
81039 +       frh = lcb->lrh;
81040 +       transact_id = le32_to_cpu(frh->transact_id);
81041 +       rec_len = le32_to_cpu(frh->client_data_len);
81043 +       if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) {
81044 +               err = -EINVAL;
81045 +               goto out;
81046 +       }
81048 +       if (lrh->undo_op == cpu_to_le16(Noop))
81049 +               goto read_next_log_undo_action;
81051 +       oe = Add2Ptr(oatbl, le16_to_cpu(lrh->target_attr));
81052 +       oa = oe->ptr;
81054 +       t16 = le16_to_cpu(lrh->lcns_follow);
81055 +       if (!t16)
81056 +               goto add_allocated_vcns;
81058 +       is_mapped = run_lookup_entry(oa->run1, le64_to_cpu(lrh->target_vcn),
81059 +                                    &lcn, &clen, NULL);
81061 +       /*
81062 +        * If the mapping isn't already the table or the  mapping
81063 +        * corresponds to a hole the mapping, we need to make sure
81064 +        * there is no partial page already memory
81065 +        */
81066 +       if (is_mapped && lcn != SPARSE_LCN && clen >= t16)
81067 +               goto add_allocated_vcns;
81069 +       vcn = le64_to_cpu(lrh->target_vcn);
81070 +       vcn &= ~(log->clst_per_page - 1);
81072 +add_allocated_vcns:
81073 +       for (i = 0, vcn = le64_to_cpu(lrh->target_vcn),
81074 +           size = (vcn + 1) << sbi->cluster_bits;
81075 +            i < t16; i++, vcn += 1, size += sbi->cluster_size) {
81076 +               attr = oa->attr;
81077 +               if (!attr->non_res) {
81078 +                       if (size > le32_to_cpu(attr->res.data_size))
81079 +                               attr->res.data_size = cpu_to_le32(size);
81080 +               } else {
81081 +                       if (size > le64_to_cpu(attr->nres.data_size))
81082 +                               attr->nres.valid_size = attr->nres.data_size =
81083 +                                       attr->nres.alloc_size =
81084 +                                               cpu_to_le64(size);
81085 +               }
81086 +       }
81088 +       t16 = le16_to_cpu(lrh->undo_op);
81089 +       if (can_skip_action(t16))
81090 +               goto read_next_log_undo_action;
81092 +       /* Point to the Redo data and get its length */
81093 +       data = Add2Ptr(lrh, le16_to_cpu(lrh->undo_off));
81094 +       dlen = le16_to_cpu(lrh->undo_len);
81096 +       /* it is time to apply the undo action */
81097 +       err = do_action(log, oe, lrh, t16, data, dlen, rec_len, NULL);
81099 +read_next_log_undo_action:
81100 +       /*
81101 +        * Keep reading and looping back until we have read the
81102 +        * last record for this transaction
81103 +        */
81104 +       err = read_next_log_rec(log, lcb, &rec_lsn);
81105 +       if (err)
81106 +               goto out;
81108 +       if (rec_lsn)
81109 +               goto undo_action_next;
81111 +       lcb_put(lcb);
81112 +       lcb = NULL;
81114 +commit_undo:
81115 +       free_rsttbl_idx(trtbl, log->transaction_id);
81117 +       log->transaction_id = 0;
81119 +       goto transaction_table_next;
81121 +undo_action_done:
81123 +       ntfs_update_mftmirr(sbi, 0);
81125 +       sbi->flags &= ~NTFS_FLAGS_NEED_REPLAY;
81127 +end_reply:
81129 +       err = 0;
81130 +       if (is_ro)
81131 +               goto out;
81133 +       rh = ntfs_zalloc(log->page_size);
81134 +       if (!rh) {
81135 +               err = -ENOMEM;
81136 +               goto out;
81137 +       }
81139 +       rh->rhdr.sign = NTFS_RSTR_SIGNATURE;
81140 +       rh->rhdr.fix_off = cpu_to_le16(offsetof(struct RESTART_HDR, fixups));
81141 +       t16 = (log->page_size >> SECTOR_SHIFT) + 1;
81142 +       rh->rhdr.fix_num = cpu_to_le16(t16);
81143 +       rh->sys_page_size = cpu_to_le32(log->page_size);
81144 +       rh->page_size = cpu_to_le32(log->page_size);
81146 +       t16 = QuadAlign(offsetof(struct RESTART_HDR, fixups) +
81147 +                       sizeof(short) * t16);
81148 +       rh->ra_off = cpu_to_le16(t16);
81149 +       rh->minor_ver = cpu_to_le16(1); // 0x1A:
81150 +       rh->major_ver = cpu_to_le16(1); // 0x1C:
81152 +       ra2 = Add2Ptr(rh, t16);
81153 +       memcpy(ra2, ra, sizeof(struct RESTART_AREA));
81155 +       ra2->client_idx[0] = 0;
81156 +       ra2->client_idx[1] = LFS_NO_CLIENT_LE;
81157 +       ra2->flags = cpu_to_le16(2);
81159 +       le32_add_cpu(&ra2->open_log_count, 1);
81161 +       ntfs_fix_pre_write(&rh->rhdr, log->page_size);
81163 +       err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rh, log->page_size);
81164 +       if (!err)
81165 +               err = ntfs_sb_write_run(sbi, &log->ni->file.run, log->page_size,
81166 +                                       rh, log->page_size);
81168 +       ntfs_free(rh);
81169 +       if (err)
81170 +               goto out;
81172 +out:
81173 +       ntfs_free(rst);
81174 +       if (lcb)
81175 +               lcb_put(lcb);
81177 +       /* Scan the Open Attribute Table to close all of the open attributes */
81178 +       oe = NULL;
81179 +       while ((oe = enum_rstbl(oatbl, oe))) {
81180 +               rno = ino_get(&oe->ref);
81182 +               if (oe->is_attr_name == 1) {
81183 +                       ntfs_free(oe->ptr);
81184 +                       oe->ptr = NULL;
81185 +                       continue;
81186 +               }
81188 +               if (oe->is_attr_name)
81189 +                       continue;
81191 +               oa = oe->ptr;
81192 +               if (!oa)
81193 +                       continue;
81195 +               run_close(&oa->run0);
81196 +               ntfs_free(oa->attr);
81197 +               if (oa->ni)
81198 +                       iput(&oa->ni->vfs_inode);
81199 +               ntfs_free(oa);
81200 +       }
81202 +       ntfs_free(trtbl);
81203 +       ntfs_free(oatbl);
81204 +       ntfs_free(dptbl);
81205 +       ntfs_free(attr_names);
81206 +       ntfs_free(rst_info.r_page);
81208 +       ntfs_free(ra);
81209 +       ntfs_free(log->one_page_buf);
81211 +       if (err)
81212 +               sbi->flags |= NTFS_FLAGS_NEED_REPLAY;
81214 +       if (err == -EROFS)
81215 +               err = 0;
81216 +       else if (log->set_dirty)
81217 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
81219 +       ntfs_free(log);
81221 +       return err;
81223 diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
81224 new file mode 100644
81225 index 000000000000..327356b08187
81226 --- /dev/null
81227 +++ b/fs/ntfs3/fsntfs.c
81228 @@ -0,0 +1,2542 @@
81229 +// SPDX-License-Identifier: GPL-2.0
81231 + *
81232 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
81233 + *
81234 + */
81236 +#include <linux/blkdev.h>
81237 +#include <linux/buffer_head.h>
81238 +#include <linux/fs.h>
81239 +#include <linux/nls.h>
81241 +#include "debug.h"
81242 +#include "ntfs.h"
81243 +#include "ntfs_fs.h"
81245 +// clang-format off
81246 +const struct cpu_str NAME_MFT = {
81247 +       4, 0, { '$', 'M', 'F', 'T' },
81249 +const struct cpu_str NAME_MIRROR = {
81250 +       8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
81252 +const struct cpu_str NAME_LOGFILE = {
81253 +       8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
81255 +const struct cpu_str NAME_VOLUME = {
81256 +       7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
81258 +const struct cpu_str NAME_ATTRDEF = {
81259 +       8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
81261 +const struct cpu_str NAME_ROOT = {
81262 +       1, 0, { '.' },
81264 +const struct cpu_str NAME_BITMAP = {
81265 +       7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
81267 +const struct cpu_str NAME_BOOT = {
81268 +       5, 0, { '$', 'B', 'o', 'o', 't' },
81270 +const struct cpu_str NAME_BADCLUS = {
81271 +       8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
81273 +const struct cpu_str NAME_QUOTA = {
81274 +       6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
81276 +const struct cpu_str NAME_SECURE = {
81277 +       7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
81279 +const struct cpu_str NAME_UPCASE = {
81280 +       7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
81282 +const struct cpu_str NAME_EXTEND = {
81283 +       7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
81285 +const struct cpu_str NAME_OBJID = {
81286 +       6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
81288 +const struct cpu_str NAME_REPARSE = {
81289 +       8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
81291 +const struct cpu_str NAME_USNJRNL = {
81292 +       8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
81294 +const __le16 BAD_NAME[4] = {
81295 +       cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
81297 +const __le16 I30_NAME[4] = {
81298 +       cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
81300 +const __le16 SII_NAME[4] = {
81301 +       cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
81303 +const __le16 SDH_NAME[4] = {
81304 +       cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
81306 +const __le16 SDS_NAME[4] = {
81307 +       cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
81309 +const __le16 SO_NAME[2] = {
81310 +       cpu_to_le16('$'), cpu_to_le16('O'),
81312 +const __le16 SQ_NAME[2] = {
81313 +       cpu_to_le16('$'), cpu_to_le16('Q'),
81315 +const __le16 SR_NAME[2] = {
81316 +       cpu_to_le16('$'), cpu_to_le16('R'),
81319 +#ifdef CONFIG_NTFS3_LZX_XPRESS
81320 +const __le16 WOF_NAME[17] = {
81321 +       cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
81322 +       cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
81323 +       cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
81324 +       cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
81325 +       cpu_to_le16('a'),
81327 +#endif
81329 +// clang-format on
81332 + * ntfs_fix_pre_write
81333 + *
81334 + * inserts fixups into 'rhdr' before writing to disk
81335 + */
81336 +bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
81338 +       u16 *fixup, *ptr;
81339 +       u16 sample;
81340 +       u16 fo = le16_to_cpu(rhdr->fix_off);
81341 +       u16 fn = le16_to_cpu(rhdr->fix_num);
81343 +       if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
81344 +           fn * SECTOR_SIZE > bytes) {
81345 +               return false;
81346 +       }
81348 +       /* Get fixup pointer */
81349 +       fixup = Add2Ptr(rhdr, fo);
81351 +       if (*fixup >= 0x7FFF)
81352 +               *fixup = 1;
81353 +       else
81354 +               *fixup += 1;
81356 +       sample = *fixup;
81358 +       ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
81360 +       while (fn--) {
81361 +               *++fixup = *ptr;
81362 +               *ptr = sample;
81363 +               ptr += SECTOR_SIZE / sizeof(short);
81364 +       }
81365 +       return true;
81369 + * ntfs_fix_post_read
81370 + *
81371 + * remove fixups after reading from disk
81372 + * Returns < 0 if error, 0 if ok, 1 if need to update fixups
81373 + */
81374 +int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
81375 +                      bool simple)
81377 +       int ret;
81378 +       u16 *fixup, *ptr;
81379 +       u16 sample, fo, fn;
81381 +       fo = le16_to_cpu(rhdr->fix_off);
81382 +       fn = simple ? ((bytes >> SECTOR_SHIFT) + 1)
81383 +                   : le16_to_cpu(rhdr->fix_num);
81385 +       /* Check errors */
81386 +       if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
81387 +           fn * SECTOR_SIZE > bytes) {
81388 +               return -EINVAL; /* native chkntfs returns ok! */
81389 +       }
81391 +       /* Get fixup pointer */
81392 +       fixup = Add2Ptr(rhdr, fo);
81393 +       sample = *fixup;
81394 +       ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
81395 +       ret = 0;
81397 +       while (fn--) {
81398 +               /* Test current word */
81399 +               if (*ptr != sample) {
81400 +                       /* Fixup does not match! Is it serious error? */
81401 +                       ret = -E_NTFS_FIXUP;
81402 +               }
81404 +               /* Replace fixup */
81405 +               *ptr = *++fixup;
81406 +               ptr += SECTOR_SIZE / sizeof(short);
81407 +       }
81409 +       return ret;
81413 + * ntfs_extend_init
81414 + *
81415 + * loads $Extend file
81416 + */
81417 +int ntfs_extend_init(struct ntfs_sb_info *sbi)
81419 +       int err;
81420 +       struct super_block *sb = sbi->sb;
81421 +       struct inode *inode, *inode2;
81422 +       struct MFT_REF ref;
81424 +       if (sbi->volume.major_ver < 3) {
81425 +               ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
81426 +               return 0;
81427 +       }
81429 +       ref.low = cpu_to_le32(MFT_REC_EXTEND);
81430 +       ref.high = 0;
81431 +       ref.seq = cpu_to_le16(MFT_REC_EXTEND);
81432 +       inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
81433 +       if (IS_ERR(inode)) {
81434 +               err = PTR_ERR(inode);
81435 +               ntfs_err(sb, "Failed to load $Extend.");
81436 +               inode = NULL;
81437 +               goto out;
81438 +       }
81440 +       /* if ntfs_iget5 reads from disk it never returns bad inode */
81441 +       if (!S_ISDIR(inode->i_mode)) {
81442 +               err = -EINVAL;
81443 +               goto out;
81444 +       }
81446 +       /* Try to find $ObjId */
81447 +       inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
81448 +       if (inode2 && !IS_ERR(inode2)) {
81449 +               if (is_bad_inode(inode2)) {
81450 +                       iput(inode2);
81451 +               } else {
81452 +                       sbi->objid.ni = ntfs_i(inode2);
81453 +                       sbi->objid_no = inode2->i_ino;
81454 +               }
81455 +       }
81457 +       /* Try to find $Quota */
81458 +       inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
81459 +       if (inode2 && !IS_ERR(inode2)) {
81460 +               sbi->quota_no = inode2->i_ino;
81461 +               iput(inode2);
81462 +       }
81464 +       /* Try to find $Reparse */
81465 +       inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
81466 +       if (inode2 && !IS_ERR(inode2)) {
81467 +               sbi->reparse.ni = ntfs_i(inode2);
81468 +               sbi->reparse_no = inode2->i_ino;
81469 +       }
81471 +       /* Try to find $UsnJrnl */
81472 +       inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
81473 +       if (inode2 && !IS_ERR(inode2)) {
81474 +               sbi->usn_jrnl_no = inode2->i_ino;
81475 +               iput(inode2);
81476 +       }
81478 +       err = 0;
81479 +out:
81480 +       iput(inode);
81481 +       return err;
81484 +int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
81486 +       int err = 0;
81487 +       struct super_block *sb = sbi->sb;
81488 +       bool initialized = false;
81489 +       struct MFT_REF ref;
81490 +       struct inode *inode;
81492 +       /* Check for 4GB */
81493 +       if (ni->vfs_inode.i_size >= 0x100000000ull) {
81494 +               ntfs_err(sb, "\x24LogFile is too big");
81495 +               err = -EINVAL;
81496 +               goto out;
81497 +       }
81499 +       sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
81501 +       ref.low = cpu_to_le32(MFT_REC_MFT);
81502 +       ref.high = 0;
81503 +       ref.seq = cpu_to_le16(1);
81505 +       inode = ntfs_iget5(sb, &ref, NULL);
81507 +       if (IS_ERR(inode))
81508 +               inode = NULL;
81510 +       if (!inode) {
81511 +               /* Try to use mft copy */
81512 +               u64 t64 = sbi->mft.lbo;
81514 +               sbi->mft.lbo = sbi->mft.lbo2;
81515 +               inode = ntfs_iget5(sb, &ref, NULL);
81516 +               sbi->mft.lbo = t64;
81517 +               if (IS_ERR(inode))
81518 +                       inode = NULL;
81519 +       }
81521 +       if (!inode) {
81522 +               err = -EINVAL;
81523 +               ntfs_err(sb, "Failed to load $MFT.");
81524 +               goto out;
81525 +       }
81527 +       sbi->mft.ni = ntfs_i(inode);
81529 +       /* LogFile should not contains attribute list */
81530 +       err = ni_load_all_mi(sbi->mft.ni);
81531 +       if (!err)
81532 +               err = log_replay(ni, &initialized);
81534 +       iput(inode);
81535 +       sbi->mft.ni = NULL;
81537 +       sync_blockdev(sb->s_bdev);
81538 +       invalidate_bdev(sb->s_bdev);
81540 +       if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
81541 +               err = 0;
81542 +               goto out;
81543 +       }
81545 +       if (sb_rdonly(sb) || !initialized)
81546 +               goto out;
81548 +       /* fill LogFile by '-1' if it is initialized */
81549 +       err = ntfs_bio_fill_1(sbi, &ni->file.run);
81551 +out:
81552 +       sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
81554 +       return err;
81558 + * ntfs_query_def
81559 + *
81560 + * returns current ATTR_DEF_ENTRY for given attribute type
81561 + */
81562 +const struct ATTR_DEF_ENTRY *ntfs_query_def(struct ntfs_sb_info *sbi,
81563 +                                           enum ATTR_TYPE type)
81565 +       int type_in = le32_to_cpu(type);
81566 +       size_t min_idx = 0;
81567 +       size_t max_idx = sbi->def_entries - 1;
81569 +       while (min_idx <= max_idx) {
81570 +               size_t i = min_idx + ((max_idx - min_idx) >> 1);
81571 +               const struct ATTR_DEF_ENTRY *entry = sbi->def_table + i;
81572 +               int diff = le32_to_cpu(entry->type) - type_in;
81574 +               if (!diff)
81575 +                       return entry;
81576 +               if (diff < 0)
81577 +                       min_idx = i + 1;
81578 +               else if (i)
81579 +                       max_idx = i - 1;
81580 +               else
81581 +                       return NULL;
81582 +       }
81583 +       return NULL;
81587 + * ntfs_look_for_free_space
81588 + *
81589 + * looks for a free space in bitmap
81590 + */
81591 +int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
81592 +                            CLST *new_lcn, CLST *new_len,
81593 +                            enum ALLOCATE_OPT opt)
81595 +       int err;
81596 +       struct super_block *sb = sbi->sb;
81597 +       size_t a_lcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
81598 +       struct wnd_bitmap *wnd = &sbi->used.bitmap;
81600 +       down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
81601 +       if (opt & ALLOCATE_MFT) {
81602 +               CLST alen;
81604 +               zlen = wnd_zone_len(wnd);
81606 +               if (!zlen) {
81607 +                       err = ntfs_refresh_zone(sbi);
81608 +                       if (err)
81609 +                               goto out;
81611 +                       zlen = wnd_zone_len(wnd);
81613 +                       if (!zlen) {
81614 +                               ntfs_err(sbi->sb,
81615 +                                        "no free space to extend mft");
81616 +                               err = -ENOSPC;
81617 +                               goto out;
81618 +                       }
81619 +               }
81621 +               lcn = wnd_zone_bit(wnd);
81622 +               alen = zlen > len ? len : zlen;
81624 +               wnd_zone_set(wnd, lcn + alen, zlen - alen);
81626 +               err = wnd_set_used(wnd, lcn, alen);
81627 +               if (err)
81628 +                       goto out;
81630 +               *new_lcn = lcn;
81631 +               *new_len = alen;
81632 +               goto ok;
81633 +       }
81635 +       /*
81636 +        * 'Cause cluster 0 is always used this value means that we should use
81637 +        * cached value of 'next_free_lcn' to improve performance
81638 +        */
81639 +       if (!lcn)
81640 +               lcn = sbi->used.next_free_lcn;
81642 +       if (lcn >= wnd->nbits)
81643 +               lcn = 0;
81645 +       *new_len = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &a_lcn);
81646 +       if (*new_len) {
81647 +               *new_lcn = a_lcn;
81648 +               goto ok;
81649 +       }
81651 +       /* Try to use clusters from MftZone */
81652 +       zlen = wnd_zone_len(wnd);
81653 +       zeroes = wnd_zeroes(wnd);
81655 +       /* Check too big request */
81656 +       if (len > zeroes + zlen)
81657 +               goto no_space;
81659 +       if (zlen <= NTFS_MIN_MFT_ZONE)
81660 +               goto no_space;
81662 +       /* How many clusters to cat from zone */
81663 +       zlcn = wnd_zone_bit(wnd);
81664 +       zlen2 = zlen >> 1;
81665 +       ztrim = len > zlen ? zlen : (len > zlen2 ? len : zlen2);
81666 +       new_zlen = zlen - ztrim;
81668 +       if (new_zlen < NTFS_MIN_MFT_ZONE) {
81669 +               new_zlen = NTFS_MIN_MFT_ZONE;
81670 +               if (new_zlen > zlen)
81671 +                       new_zlen = zlen;
81672 +       }
81674 +       wnd_zone_set(wnd, zlcn, new_zlen);
81676 +       /* allocate continues clusters */
81677 +       *new_len =
81678 +               wnd_find(wnd, len, 0,
81679 +                        BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &a_lcn);
81680 +       if (*new_len) {
81681 +               *new_lcn = a_lcn;
81682 +               goto ok;
81683 +       }
81685 +no_space:
81686 +       up_write(&wnd->rw_lock);
81688 +       return -ENOSPC;
81690 +ok:
81691 +       err = 0;
81693 +       ntfs_unmap_meta(sb, *new_lcn, *new_len);
81695 +       if (opt & ALLOCATE_MFT)
81696 +               goto out;
81698 +       /* Set hint for next requests */
81699 +       sbi->used.next_free_lcn = *new_lcn + *new_len;
81701 +out:
81702 +       up_write(&wnd->rw_lock);
81703 +       return err;
81707 + * ntfs_extend_mft
81708 + *
81709 + * allocates additional MFT records
81710 + * sbi->mft.bitmap is locked for write
81711 + *
81712 + * NOTE: recursive:
81713 + *     ntfs_look_free_mft ->
81714 + *     ntfs_extend_mft ->
81715 + *     attr_set_size ->
81716 + *     ni_insert_nonresident ->
81717 + *     ni_insert_attr ->
81718 + *     ni_ins_attr_ext ->
81719 + *     ntfs_look_free_mft ->
81720 + *     ntfs_extend_mft
81721 + * To avoid recursive always allocate space for two new mft records
81722 + * see attrib.c: "at least two mft to avoid recursive loop"
81723 + */
81724 +static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
81726 +       int err;
81727 +       struct ntfs_inode *ni = sbi->mft.ni;
81728 +       size_t new_mft_total;
81729 +       u64 new_mft_bytes, new_bitmap_bytes;
81730 +       struct ATTRIB *attr;
81731 +       struct wnd_bitmap *wnd = &sbi->mft.bitmap;
81733 +       new_mft_total = (wnd->nbits + MFT_INCREASE_CHUNK + 127) & (CLST)~127;
81734 +       new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
81736 +       /* Step 1: Resize $MFT::DATA */
81737 +       down_write(&ni->file.run_lock);
81738 +       err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
81739 +                           new_mft_bytes, NULL, false, &attr);
81741 +       if (err) {
81742 +               up_write(&ni->file.run_lock);
81743 +               goto out;
81744 +       }
81746 +       attr->nres.valid_size = attr->nres.data_size;
81747 +       new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
81748 +       ni->mi.dirty = true;
81750 +       /* Step 2: Resize $MFT::BITMAP */
81751 +       new_bitmap_bytes = bitmap_size(new_mft_total);
81753 +       err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
81754 +                           new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
81756 +       /* Refresh Mft Zone if necessary */
81757 +       down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
81759 +       ntfs_refresh_zone(sbi);
81761 +       up_write(&sbi->used.bitmap.rw_lock);
81762 +       up_write(&ni->file.run_lock);
81764 +       if (err)
81765 +               goto out;
81767 +       err = wnd_extend(wnd, new_mft_total);
81769 +       if (err)
81770 +               goto out;
81772 +       ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
81774 +       err = _ni_write_inode(&ni->vfs_inode, 0);
81775 +out:
81776 +       return err;
81780 + * ntfs_look_free_mft
81781 + *
81782 + * looks for a free MFT record
81783 + */
81784 +int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
81785 +                      struct ntfs_inode *ni, struct mft_inode **mi)
81787 +       int err = 0;
81788 +       size_t zbit, zlen, from, to, fr;
81789 +       size_t mft_total;
81790 +       struct MFT_REF ref;
81791 +       struct super_block *sb = sbi->sb;
81792 +       struct wnd_bitmap *wnd = &sbi->mft.bitmap;
81793 +       u32 ir;
81795 +       static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
81796 +                     MFT_REC_FREE - MFT_REC_RESERVED);
81798 +       if (!mft)
81799 +               down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
81801 +       zlen = wnd_zone_len(wnd);
81803 +       /* Always reserve space for MFT */
81804 +       if (zlen) {
81805 +               if (mft) {
81806 +                       zbit = wnd_zone_bit(wnd);
81807 +                       *rno = zbit;
81808 +                       wnd_zone_set(wnd, zbit + 1, zlen - 1);
81809 +               }
81810 +               goto found;
81811 +       }
81813 +       /* No MFT zone. find the nearest to '0' free MFT */
81814 +       if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
81815 +               /* Resize MFT */
81816 +               mft_total = wnd->nbits;
81818 +               err = ntfs_extend_mft(sbi);
81819 +               if (!err) {
81820 +                       zbit = mft_total;
81821 +                       goto reserve_mft;
81822 +               }
81824 +               if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
81825 +                       goto out;
81827 +               err = 0;
81829 +               /*
81830 +                * Look for free record reserved area [11-16) ==
81831 +                * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
81832 +                * marks it as used
81833 +                */
81834 +               if (!sbi->mft.reserved_bitmap) {
81835 +                       /* Once per session create internal bitmap for 5 bits */
81836 +                       sbi->mft.reserved_bitmap = 0xFF;
81838 +                       ref.high = 0;
81839 +                       for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
81840 +                               struct inode *i;
81841 +                               struct ntfs_inode *ni;
81842 +                               struct MFT_REC *mrec;
81844 +                               ref.low = cpu_to_le32(ir);
81845 +                               ref.seq = cpu_to_le16(ir);
81847 +                               i = ntfs_iget5(sb, &ref, NULL);
81848 +                               if (IS_ERR(i)) {
81849 +next:
81850 +                                       ntfs_notice(
81851 +                                               sb,
81852 +                                               "Invalid reserved record %x",
81853 +                                               ref.low);
81854 +                                       continue;
81855 +                               }
81856 +                               if (is_bad_inode(i)) {
81857 +                                       iput(i);
81858 +                                       goto next;
81859 +                               }
81861 +                               ni = ntfs_i(i);
81863 +                               mrec = ni->mi.mrec;
81865 +                               if (!is_rec_base(mrec))
81866 +                                       goto next;
81868 +                               if (mrec->hard_links)
81869 +                                       goto next;
81871 +                               if (!ni_std(ni))
81872 +                                       goto next;
81874 +                               if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
81875 +                                                NULL, 0, NULL, NULL))
81876 +                                       goto next;
81878 +                               __clear_bit(ir - MFT_REC_RESERVED,
81879 +                                           &sbi->mft.reserved_bitmap);
81880 +                       }
81881 +               }
81883 +               /* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
81884 +               zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
81885 +                                         MFT_REC_FREE, MFT_REC_RESERVED);
81886 +               if (zbit >= MFT_REC_FREE) {
81887 +                       sbi->mft.next_reserved = MFT_REC_FREE;
81888 +                       goto out;
81889 +               }
81891 +               zlen = 1;
81892 +               sbi->mft.next_reserved = zbit;
81893 +       } else {
81894 +reserve_mft:
81895 +               zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
81896 +               if (zbit + zlen > wnd->nbits)
81897 +                       zlen = wnd->nbits - zbit;
81899 +               while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
81900 +                       zlen -= 1;
81902 +               /* [zbit, zbit + zlen) will be used for Mft itself */
81903 +               from = sbi->mft.used;
81904 +               if (from < zbit)
81905 +                       from = zbit;
81906 +               to = zbit + zlen;
81907 +               if (from < to) {
81908 +                       ntfs_clear_mft_tail(sbi, from, to);
81909 +                       sbi->mft.used = to;
81910 +               }
81911 +       }
81913 +       if (mft) {
81914 +               *rno = zbit;
81915 +               zbit += 1;
81916 +               zlen -= 1;
81917 +       }
81919 +       wnd_zone_set(wnd, zbit, zlen);
81921 +found:
81922 +       if (!mft) {
81923 +               /* The request to get record for general purpose */
81924 +               if (sbi->mft.next_free < MFT_REC_USER)
81925 +                       sbi->mft.next_free = MFT_REC_USER;
81927 +               for (;;) {
81928 +                       if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
81929 +                       } else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
81930 +                               sbi->mft.next_free = sbi->mft.bitmap.nbits;
81931 +                       } else {
81932 +                               *rno = fr;
81933 +                               sbi->mft.next_free = *rno + 1;
81934 +                               break;
81935 +                       }
81937 +                       err = ntfs_extend_mft(sbi);
81938 +                       if (err)
81939 +                               goto out;
81940 +               }
81941 +       }
81943 +       if (ni && !ni_add_subrecord(ni, *rno, mi)) {
81944 +               err = -ENOMEM;
81945 +               goto out;
81946 +       }
81948 +       /* We have found a record that are not reserved for next MFT */
81949 +       if (*rno >= MFT_REC_FREE)
81950 +               wnd_set_used(wnd, *rno, 1);
81951 +       else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
81952 +               __set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
81954 +out:
81955 +       if (!mft)
81956 +               up_write(&wnd->rw_lock);
81958 +       return err;
81962 + * ntfs_mark_rec_free
81963 + *
81964 + * marks record as free
81965 + */
81966 +void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno)
81968 +       struct wnd_bitmap *wnd = &sbi->mft.bitmap;
81970 +       down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
81971 +       if (rno >= wnd->nbits)
81972 +               goto out;
81974 +       if (rno >= MFT_REC_FREE) {
81975 +               if (!wnd_is_used(wnd, rno, 1))
81976 +                       ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
81977 +               else
81978 +                       wnd_set_free(wnd, rno, 1);
81979 +       } else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
81980 +               __clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
81981 +       }
81983 +       if (rno < wnd_zone_bit(wnd))
81984 +               wnd_zone_set(wnd, rno, 1);
81985 +       else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
81986 +               sbi->mft.next_free = rno;
81988 +out:
81989 +       up_write(&wnd->rw_lock);
81993 + * ntfs_clear_mft_tail
81994 + *
81995 + * formats empty records [from, to)
81996 + * sbi->mft.bitmap is locked for write
81997 + */
81998 +int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
82000 +       int err;
82001 +       u32 rs;
82002 +       u64 vbo;
82003 +       struct runs_tree *run;
82004 +       struct ntfs_inode *ni;
82006 +       if (from >= to)
82007 +               return 0;
82009 +       rs = sbi->record_size;
82010 +       ni = sbi->mft.ni;
82011 +       run = &ni->file.run;
82013 +       down_read(&ni->file.run_lock);
82014 +       vbo = (u64)from * rs;
82015 +       for (; from < to; from++, vbo += rs) {
82016 +               struct ntfs_buffers nb;
82018 +               err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
82019 +               if (err)
82020 +                       goto out;
82022 +               err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
82023 +               nb_put(&nb);
82024 +               if (err)
82025 +                       goto out;
82026 +       }
82028 +out:
82029 +       sbi->mft.used = from;
82030 +       up_read(&ni->file.run_lock);
82031 +       return err;
82035 + * ntfs_refresh_zone
82036 + *
82037 + * refreshes Mft zone
82038 + * sbi->used.bitmap is locked for rw
82039 + * sbi->mft.bitmap is locked for write
82040 + * sbi->mft.ni->file.run_lock for write
82041 + */
82042 +int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
82044 +       CLST zone_limit, zone_max, lcn, vcn, len;
82045 +       size_t lcn_s, zlen;
82046 +       struct wnd_bitmap *wnd = &sbi->used.bitmap;
82047 +       struct ntfs_inode *ni = sbi->mft.ni;
82049 +       /* Do not change anything unless we have non empty Mft zone */
82050 +       if (wnd_zone_len(wnd))
82051 +               return 0;
82053 +       /*
82054 +        * Compute the mft zone at two steps
82055 +        * It would be nice if we are able to allocate
82056 +        * 1/8 of total clusters for MFT but not more then 512 MB
82057 +        */
82058 +       zone_limit = (512 * 1024 * 1024) >> sbi->cluster_bits;
82059 +       zone_max = wnd->nbits >> 3;
82060 +       if (zone_max > zone_limit)
82061 +               zone_max = zone_limit;
82063 +       vcn = bytes_to_cluster(sbi,
82064 +                              (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
82066 +       if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
82067 +               lcn = SPARSE_LCN;
82069 +       /* We should always find Last Lcn for MFT */
82070 +       if (lcn == SPARSE_LCN)
82071 +               return -EINVAL;
82073 +       lcn_s = lcn + 1;
82075 +       /* Try to allocate clusters after last MFT run */
82076 +       zlen = wnd_find(wnd, zone_max, lcn_s, 0, &lcn_s);
82077 +       if (!zlen) {
82078 +               ntfs_notice(sbi->sb, "MftZone: unavailable");
82079 +               return 0;
82080 +       }
82082 +       /* Truncate too large zone */
82083 +       wnd_zone_set(wnd, lcn_s, zlen);
82085 +       return 0;
82089 + * ntfs_update_mftmirr
82090 + *
82091 + * updates $MFTMirr data
82092 + */
82093 +int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
82095 +       int err;
82096 +       struct super_block *sb = sbi->sb;
82097 +       u32 blocksize = sb->s_blocksize;
82098 +       sector_t block1, block2;
82099 +       u32 bytes;
82101 +       if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
82102 +               return 0;
82104 +       err = 0;
82105 +       bytes = sbi->mft.recs_mirr << sbi->record_bits;
82106 +       block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
82107 +       block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
82109 +       for (; bytes >= blocksize; bytes -= blocksize) {
82110 +               struct buffer_head *bh1, *bh2;
82112 +               bh1 = sb_bread(sb, block1++);
82113 +               if (!bh1) {
82114 +                       err = -EIO;
82115 +                       goto out;
82116 +               }
82118 +               bh2 = sb_getblk(sb, block2++);
82119 +               if (!bh2) {
82120 +                       put_bh(bh1);
82121 +                       err = -EIO;
82122 +                       goto out;
82123 +               }
82125 +               if (buffer_locked(bh2))
82126 +                       __wait_on_buffer(bh2);
82128 +               lock_buffer(bh2);
82129 +               memcpy(bh2->b_data, bh1->b_data, blocksize);
82130 +               set_buffer_uptodate(bh2);
82131 +               mark_buffer_dirty(bh2);
82132 +               unlock_buffer(bh2);
82134 +               put_bh(bh1);
82135 +               bh1 = NULL;
82137 +               if (wait)
82138 +                       err = sync_dirty_buffer(bh2);
82140 +               put_bh(bh2);
82141 +               if (err)
82142 +                       goto out;
82143 +       }
82145 +       sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
82147 +out:
82148 +       return err;
82152 + * ntfs_set_state
82153 + *
82154 + * mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
82155 + * umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
82156 + * ntfs error: ntfs_set_state(NTFS_DIRTY_ERROR)
82157 + */
82158 +int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
82160 +       int err;
82161 +       struct ATTRIB *attr;
82162 +       struct VOLUME_INFO *info;
82163 +       struct mft_inode *mi;
82164 +       struct ntfs_inode *ni;
82166 +       /*
82167 +        * do not change state if fs was real_dirty
82168 +        * do not change state if fs already dirty(clear)
82169 +        * do not change any thing if mounted read only
82170 +        */
82171 +       if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
82172 +               return 0;
82174 +       /* Check cached value */
82175 +       if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
82176 +           (sbi->volume.flags & VOLUME_FLAG_DIRTY))
82177 +               return 0;
82179 +       ni = sbi->volume.ni;
82180 +       if (!ni)
82181 +               return -EINVAL;
82183 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
82185 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
82186 +       if (!attr) {
82187 +               err = -EINVAL;
82188 +               goto out;
82189 +       }
82191 +       info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
82192 +       if (!info) {
82193 +               err = -EINVAL;
82194 +               goto out;
82195 +       }
82197 +       switch (dirty) {
82198 +       case NTFS_DIRTY_ERROR:
82199 +               ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
82200 +               sbi->volume.real_dirty = true;
82201 +               fallthrough;
82202 +       case NTFS_DIRTY_DIRTY:
82203 +               info->flags |= VOLUME_FLAG_DIRTY;
82204 +               break;
82205 +       case NTFS_DIRTY_CLEAR:
82206 +               info->flags &= ~VOLUME_FLAG_DIRTY;
82207 +               break;
82208 +       }
82209 +       /* cache current volume flags*/
82210 +       sbi->volume.flags = info->flags;
82211 +       mi->dirty = true;
82212 +       err = 0;
82214 +out:
82215 +       ni_unlock(ni);
82216 +       if (err)
82217 +               return err;
82219 +       mark_inode_dirty(&ni->vfs_inode);
82220 +       /*verify(!ntfs_update_mftmirr()); */
82221 +       err = sync_inode_metadata(&ni->vfs_inode, 1);
82223 +       return err;
82227 + * security_hash
82228 + *
82229 + * calculates a hash of security descriptor
82230 + */
82231 +static inline __le32 security_hash(const void *sd, size_t bytes)
82233 +       u32 hash = 0;
82234 +       const __le32 *ptr = sd;
82236 +       bytes >>= 2;
82237 +       while (bytes--)
82238 +               hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
82239 +       return cpu_to_le32(hash);
82242 +int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
82244 +       struct block_device *bdev = sb->s_bdev;
82245 +       u32 blocksize = sb->s_blocksize;
82246 +       u64 block = lbo >> sb->s_blocksize_bits;
82247 +       u32 off = lbo & (blocksize - 1);
82248 +       u32 op = blocksize - off;
82250 +       for (; bytes; block += 1, off = 0, op = blocksize) {
82251 +               struct buffer_head *bh = __bread(bdev, block, blocksize);
82253 +               if (!bh)
82254 +                       return -EIO;
82256 +               if (op > bytes)
82257 +                       op = bytes;
82259 +               memcpy(buffer, bh->b_data + off, op);
82261 +               put_bh(bh);
82263 +               bytes -= op;
82264 +               buffer = Add2Ptr(buffer, op);
82265 +       }
82267 +       return 0;
82270 +int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
82271 +                 const void *buf, int wait)
82273 +       u32 blocksize = sb->s_blocksize;
82274 +       struct block_device *bdev = sb->s_bdev;
82275 +       sector_t block = lbo >> sb->s_blocksize_bits;
82276 +       u32 off = lbo & (blocksize - 1);
82277 +       u32 op = blocksize - off;
82278 +       struct buffer_head *bh;
82280 +       if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
82281 +               wait = 1;
82283 +       for (; bytes; block += 1, off = 0, op = blocksize) {
82284 +               if (op > bytes)
82285 +                       op = bytes;
82287 +               if (op < blocksize) {
82288 +                       bh = __bread(bdev, block, blocksize);
82289 +                       if (!bh) {
82290 +                               ntfs_err(sb, "failed to read block %llx",
82291 +                                        (u64)block);
82292 +                               return -EIO;
82293 +                       }
82294 +               } else {
82295 +                       bh = __getblk(bdev, block, blocksize);
82296 +                       if (!bh)
82297 +                               return -ENOMEM;
82298 +               }
82300 +               if (buffer_locked(bh))
82301 +                       __wait_on_buffer(bh);
82303 +               lock_buffer(bh);
82304 +               if (buf) {
82305 +                       memcpy(bh->b_data + off, buf, op);
82306 +                       buf = Add2Ptr(buf, op);
82307 +               } else {
82308 +                       memset(bh->b_data + off, -1, op);
82309 +               }
82311 +               set_buffer_uptodate(bh);
82312 +               mark_buffer_dirty(bh);
82313 +               unlock_buffer(bh);
82315 +               if (wait) {
82316 +                       int err = sync_dirty_buffer(bh);
82318 +                       if (err) {
82319 +                               ntfs_err(
82320 +                                       sb,
82321 +                                       "failed to sync buffer at block %llx, error %d",
82322 +                                       (u64)block, err);
82323 +                               put_bh(bh);
82324 +                               return err;
82325 +                       }
82326 +               }
82328 +               put_bh(bh);
82330 +               bytes -= op;
82331 +       }
82332 +       return 0;
82335 +int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
82336 +                     u64 vbo, const void *buf, size_t bytes)
82338 +       struct super_block *sb = sbi->sb;
82339 +       u8 cluster_bits = sbi->cluster_bits;
82340 +       u32 off = vbo & sbi->cluster_mask;
82341 +       CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
82342 +       u64 lbo, len;
82343 +       size_t idx;
82345 +       if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
82346 +               return -ENOENT;
82348 +       if (lcn == SPARSE_LCN)
82349 +               return -EINVAL;
82351 +       lbo = ((u64)lcn << cluster_bits) + off;
82352 +       len = ((u64)clen << cluster_bits) - off;
82354 +       for (;;) {
82355 +               u32 op = len < bytes ? len : bytes;
82356 +               int err = ntfs_sb_write(sb, lbo, op, buf, 0);
82358 +               if (err)
82359 +                       return err;
82361 +               bytes -= op;
82362 +               if (!bytes)
82363 +                       break;
82365 +               vcn_next = vcn + clen;
82366 +               if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
82367 +                   vcn != vcn_next)
82368 +                       return -ENOENT;
82370 +               if (lcn == SPARSE_LCN)
82371 +                       return -EINVAL;
82373 +               if (buf)
82374 +                       buf = Add2Ptr(buf, op);
82376 +               lbo = ((u64)lcn << cluster_bits);
82377 +               len = ((u64)clen << cluster_bits);
82378 +       }
82380 +       return 0;
82383 +struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
82384 +                                  const struct runs_tree *run, u64 vbo)
82386 +       struct super_block *sb = sbi->sb;
82387 +       u8 cluster_bits = sbi->cluster_bits;
82388 +       CLST lcn;
82389 +       u64 lbo;
82391 +       if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
82392 +               return ERR_PTR(-ENOENT);
82394 +       lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
82396 +       return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
82399 +int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
82400 +                    u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
82402 +       int err;
82403 +       struct super_block *sb = sbi->sb;
82404 +       u32 blocksize = sb->s_blocksize;
82405 +       u8 cluster_bits = sbi->cluster_bits;
82406 +       u32 off = vbo & sbi->cluster_mask;
82407 +       u32 nbh = 0;
82408 +       CLST vcn_next, vcn = vbo >> cluster_bits;
82409 +       CLST lcn, clen;
82410 +       u64 lbo, len;
82411 +       size_t idx;
82412 +       struct buffer_head *bh;
82414 +       if (!run) {
82415 +               /* first reading of $Volume + $MFTMirr + LogFile goes here*/
82416 +               if (vbo > MFT_REC_VOL * sbi->record_size) {
82417 +                       err = -ENOENT;
82418 +                       goto out;
82419 +               }
82421 +               /* use absolute boot's 'MFTCluster' to read record */
82422 +               lbo = vbo + sbi->mft.lbo;
82423 +               len = sbi->record_size;
82424 +       } else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
82425 +               err = -ENOENT;
82426 +               goto out;
82427 +       } else {
82428 +               if (lcn == SPARSE_LCN) {
82429 +                       err = -EINVAL;
82430 +                       goto out;
82431 +               }
82433 +               lbo = ((u64)lcn << cluster_bits) + off;
82434 +               len = ((u64)clen << cluster_bits) - off;
82435 +       }
82437 +       off = lbo & (blocksize - 1);
82438 +       if (nb) {
82439 +               nb->off = off;
82440 +               nb->bytes = bytes;
82441 +       }
82443 +       for (;;) {
82444 +               u32 len32 = len >= bytes ? bytes : len;
82445 +               sector_t block = lbo >> sb->s_blocksize_bits;
82447 +               do {
82448 +                       u32 op = blocksize - off;
82450 +                       if (op > len32)
82451 +                               op = len32;
82453 +                       bh = ntfs_bread(sb, block);
82454 +                       if (!bh) {
82455 +                               err = -EIO;
82456 +                               goto out;
82457 +                       }
82459 +                       if (buf) {
82460 +                               memcpy(buf, bh->b_data + off, op);
82461 +                               buf = Add2Ptr(buf, op);
82462 +                       }
82464 +                       if (!nb) {
82465 +                               put_bh(bh);
82466 +                       } else if (nbh >= ARRAY_SIZE(nb->bh)) {
82467 +                               err = -EINVAL;
82468 +                               goto out;
82469 +                       } else {
82470 +                               nb->bh[nbh++] = bh;
82471 +                               nb->nbufs = nbh;
82472 +                       }
82474 +                       bytes -= op;
82475 +                       if (!bytes)
82476 +                               return 0;
82477 +                       len32 -= op;
82478 +                       block += 1;
82479 +                       off = 0;
82481 +               } while (len32);
82483 +               vcn_next = vcn + clen;
82484 +               if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
82485 +                   vcn != vcn_next) {
82486 +                       err = -ENOENT;
82487 +                       goto out;
82488 +               }
82490 +               if (lcn == SPARSE_LCN) {
82491 +                       err = -EINVAL;
82492 +                       goto out;
82493 +               }
82495 +               lbo = ((u64)lcn << cluster_bits);
82496 +               len = ((u64)clen << cluster_bits);
82497 +       }
82499 +out:
82500 +       if (!nbh)
82501 +               return err;
82503 +       while (nbh) {
82504 +               put_bh(nb->bh[--nbh]);
82505 +               nb->bh[nbh] = NULL;
82506 +       }
82508 +       nb->nbufs = 0;
82509 +       return err;
82512 +/* Returns < 0 if error, 0 if ok, '-E_NTFS_FIXUP' if need to update fixups */
82513 +int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
82514 +                struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
82515 +                struct ntfs_buffers *nb)
82517 +       int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
82519 +       if (err)
82520 +               return err;
82521 +       return ntfs_fix_post_read(rhdr, nb->bytes, true);
82524 +int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
82525 +               u32 bytes, struct ntfs_buffers *nb)
82527 +       int err = 0;
82528 +       struct super_block *sb = sbi->sb;
82529 +       u32 blocksize = sb->s_blocksize;
82530 +       u8 cluster_bits = sbi->cluster_bits;
82531 +       CLST vcn_next, vcn = vbo >> cluster_bits;
82532 +       u32 off;
82533 +       u32 nbh = 0;
82534 +       CLST lcn, clen;
82535 +       u64 lbo, len;
82536 +       size_t idx;
82538 +       nb->bytes = bytes;
82540 +       if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
82541 +               err = -ENOENT;
82542 +               goto out;
82543 +       }
82545 +       off = vbo & sbi->cluster_mask;
82546 +       lbo = ((u64)lcn << cluster_bits) + off;
82547 +       len = ((u64)clen << cluster_bits) - off;
82549 +       nb->off = off = lbo & (blocksize - 1);
82551 +       for (;;) {
82552 +               u32 len32 = len < bytes ? len : bytes;
82553 +               sector_t block = lbo >> sb->s_blocksize_bits;
82555 +               do {
82556 +                       u32 op;
82557 +                       struct buffer_head *bh;
82559 +                       if (nbh >= ARRAY_SIZE(nb->bh)) {
82560 +                               err = -EINVAL;
82561 +                               goto out;
82562 +                       }
82564 +                       op = blocksize - off;
82565 +                       if (op > len32)
82566 +                               op = len32;
82568 +                       if (op == blocksize) {
82569 +                               bh = sb_getblk(sb, block);
82570 +                               if (!bh) {
82571 +                                       err = -ENOMEM;
82572 +                                       goto out;
82573 +                               }
82574 +                               if (buffer_locked(bh))
82575 +                                       __wait_on_buffer(bh);
82576 +                               set_buffer_uptodate(bh);
82577 +                       } else {
82578 +                               bh = ntfs_bread(sb, block);
82579 +                               if (!bh) {
82580 +                                       err = -EIO;
82581 +                                       goto out;
82582 +                               }
82583 +                       }
82585 +                       nb->bh[nbh++] = bh;
82586 +                       bytes -= op;
82587 +                       if (!bytes) {
82588 +                               nb->nbufs = nbh;
82589 +                               return 0;
82590 +                       }
82592 +                       block += 1;
82593 +                       len32 -= op;
82594 +                       off = 0;
82595 +               } while (len32);
82597 +               vcn_next = vcn + clen;
82598 +               if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
82599 +                   vcn != vcn_next) {
82600 +                       err = -ENOENT;
82601 +                       goto out;
82602 +               }
82604 +               lbo = ((u64)lcn << cluster_bits);
82605 +               len = ((u64)clen << cluster_bits);
82606 +       }
82608 +out:
82609 +       while (nbh) {
82610 +               put_bh(nb->bh[--nbh]);
82611 +               nb->bh[nbh] = NULL;
82612 +       }
82614 +       nb->nbufs = 0;
82616 +       return err;
82619 +int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
82620 +                 struct ntfs_buffers *nb, int sync)
82622 +       int err = 0;
82623 +       struct super_block *sb = sbi->sb;
82624 +       u32 block_size = sb->s_blocksize;
82625 +       u32 bytes = nb->bytes;
82626 +       u32 off = nb->off;
82627 +       u16 fo = le16_to_cpu(rhdr->fix_off);
82628 +       u16 fn = le16_to_cpu(rhdr->fix_num);
82629 +       u32 idx;
82630 +       __le16 *fixup;
82631 +       __le16 sample;
82633 +       if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
82634 +           fn * SECTOR_SIZE > bytes) {
82635 +               return -EINVAL;
82636 +       }
82638 +       for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
82639 +               u32 op = block_size - off;
82640 +               char *bh_data;
82641 +               struct buffer_head *bh = nb->bh[idx];
82642 +               __le16 *ptr, *end_data;
82644 +               if (op > bytes)
82645 +                       op = bytes;
82647 +               if (buffer_locked(bh))
82648 +                       __wait_on_buffer(bh);
82650 +               lock_buffer(nb->bh[idx]);
82652 +               bh_data = bh->b_data + off;
82653 +               end_data = Add2Ptr(bh_data, op);
82654 +               memcpy(bh_data, rhdr, op);
82656 +               if (!idx) {
82657 +                       u16 t16;
82659 +                       fixup = Add2Ptr(bh_data, fo);
82660 +                       sample = *fixup;
82661 +                       t16 = le16_to_cpu(sample);
82662 +                       if (t16 >= 0x7FFF) {
82663 +                               sample = *fixup = cpu_to_le16(1);
82664 +                       } else {
82665 +                               sample = cpu_to_le16(t16 + 1);
82666 +                               *fixup = sample;
82667 +                       }
82669 +                       *(__le16 *)Add2Ptr(rhdr, fo) = sample;
82670 +               }
82672 +               ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
82674 +               do {
82675 +                       *++fixup = *ptr;
82676 +                       *ptr = sample;
82677 +                       ptr += SECTOR_SIZE / sizeof(short);
82678 +               } while (ptr < end_data);
82680 +               set_buffer_uptodate(bh);
82681 +               mark_buffer_dirty(bh);
82682 +               unlock_buffer(bh);
82684 +               if (sync) {
82685 +                       int err2 = sync_dirty_buffer(bh);
82687 +                       if (!err && err2)
82688 +                               err = err2;
82689 +               }
82691 +               bytes -= op;
82692 +               rhdr = Add2Ptr(rhdr, op);
82693 +       }
82695 +       return err;
82698 +static inline struct bio *ntfs_alloc_bio(u32 nr_vecs)
82700 +       struct bio *bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
82702 +       if (!bio && (current->flags & PF_MEMALLOC)) {
82703 +               while (!bio && (nr_vecs /= 2))
82704 +                       bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
82705 +       }
82706 +       return bio;
82709 +/* read/write pages from/to disk*/
82710 +int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
82711 +                  struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
82712 +                  u32 op)
82714 +       int err = 0;
82715 +       struct bio *new, *bio = NULL;
82716 +       struct super_block *sb = sbi->sb;
82717 +       struct block_device *bdev = sb->s_bdev;
82718 +       struct page *page;
82719 +       u8 cluster_bits = sbi->cluster_bits;
82720 +       CLST lcn, clen, vcn, vcn_next;
82721 +       u32 add, off, page_idx;
82722 +       u64 lbo, len;
82723 +       size_t run_idx;
82724 +       struct blk_plug plug;
82726 +       if (!bytes)
82727 +               return 0;
82729 +       blk_start_plug(&plug);
82731 +       /* align vbo and bytes to be 512 bytes aligned */
82732 +       lbo = (vbo + bytes + 511) & ~511ull;
82733 +       vbo = vbo & ~511ull;
82734 +       bytes = lbo - vbo;
82736 +       vcn = vbo >> cluster_bits;
82737 +       if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
82738 +               err = -ENOENT;
82739 +               goto out;
82740 +       }
82741 +       off = vbo & sbi->cluster_mask;
82742 +       page_idx = 0;
82743 +       page = pages[0];
82745 +       for (;;) {
82746 +               lbo = ((u64)lcn << cluster_bits) + off;
82747 +               len = ((u64)clen << cluster_bits) - off;
82748 +new_bio:
82749 +               new = ntfs_alloc_bio(nr_pages - page_idx);
82750 +               if (!new) {
82751 +                       err = -ENOMEM;
82752 +                       goto out;
82753 +               }
82754 +               if (bio) {
82755 +                       bio_chain(bio, new);
82756 +                       submit_bio(bio);
82757 +               }
82758 +               bio = new;
82759 +               bio_set_dev(bio, bdev);
82760 +               bio->bi_iter.bi_sector = lbo >> 9;
82761 +               bio->bi_opf = op;
82763 +               while (len) {
82764 +                       off = vbo & (PAGE_SIZE - 1);
82765 +                       add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
82767 +                       if (bio_add_page(bio, page, add, off) < add)
82768 +                               goto new_bio;
82770 +                       if (bytes <= add)
82771 +                               goto out;
82772 +                       bytes -= add;
82773 +                       vbo += add;
82775 +                       if (add + off == PAGE_SIZE) {
82776 +                               page_idx += 1;
82777 +                               if (WARN_ON(page_idx >= nr_pages)) {
82778 +                                       err = -EINVAL;
82779 +                                       goto out;
82780 +                               }
82781 +                               page = pages[page_idx];
82782 +                       }
82784 +                       if (len <= add)
82785 +                               break;
82786 +                       len -= add;
82787 +                       lbo += add;
82788 +               }
82790 +               vcn_next = vcn + clen;
82791 +               if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
82792 +                   vcn != vcn_next) {
82793 +                       err = -ENOENT;
82794 +                       goto out;
82795 +               }
82796 +               off = 0;
82797 +       }
82798 +out:
82799 +       if (bio) {
82800 +               if (!err)
82801 +                       err = submit_bio_wait(bio);
82802 +               bio_put(bio);
82803 +       }
82804 +       blk_finish_plug(&plug);
82806 +       return err;
82810 + * Helper for ntfs_loadlog_and_replay
82811 + * fill on-disk logfile range by (-1)
82812 + * this means empty logfile
82813 + */
82814 +int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
82816 +       int err = 0;
82817 +       struct super_block *sb = sbi->sb;
82818 +       struct block_device *bdev = sb->s_bdev;
82819 +       u8 cluster_bits = sbi->cluster_bits;
82820 +       struct bio *new, *bio = NULL;
82821 +       CLST lcn, clen;
82822 +       u64 lbo, len;
82823 +       size_t run_idx;
82824 +       struct page *fill;
82825 +       void *kaddr;
82826 +       struct blk_plug plug;
82828 +       fill = alloc_page(GFP_KERNEL);
82829 +       if (!fill)
82830 +               return -ENOMEM;
82832 +       kaddr = kmap_atomic(fill);
82833 +       memset(kaddr, -1, PAGE_SIZE);
82834 +       kunmap_atomic(kaddr);
82835 +       flush_dcache_page(fill);
82836 +       lock_page(fill);
82838 +       if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
82839 +               err = -ENOENT;
82840 +               goto out;
82841 +       }
82843 +       /*
82844 +        * TODO: try blkdev_issue_write_same
82845 +        */
82846 +       blk_start_plug(&plug);
82847 +       do {
82848 +               lbo = (u64)lcn << cluster_bits;
82849 +               len = (u64)clen << cluster_bits;
82850 +new_bio:
82851 +               new = ntfs_alloc_bio(BIO_MAX_VECS);
82852 +               if (!new) {
82853 +                       err = -ENOMEM;
82854 +                       break;
82855 +               }
82856 +               if (bio) {
82857 +                       bio_chain(bio, new);
82858 +                       submit_bio(bio);
82859 +               }
82860 +               bio = new;
82861 +               bio_set_dev(bio, bdev);
82862 +               bio->bi_opf = REQ_OP_WRITE;
82863 +               bio->bi_iter.bi_sector = lbo >> 9;
82865 +               for (;;) {
82866 +                       u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
82868 +                       if (bio_add_page(bio, fill, add, 0) < add)
82869 +                               goto new_bio;
82871 +                       lbo += add;
82872 +                       if (len <= add)
82873 +                               break;
82874 +                       len -= add;
82875 +               }
82876 +       } while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
82878 +       if (bio) {
82879 +               if (!err)
82880 +                       err = submit_bio_wait(bio);
82881 +               bio_put(bio);
82882 +       }
82883 +       blk_finish_plug(&plug);
82884 +out:
82885 +       unlock_page(fill);
82886 +       put_page(fill);
82888 +       return err;
82891 +int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
82892 +                   u64 vbo, u64 *lbo, u64 *bytes)
82894 +       u32 off;
82895 +       CLST lcn, len;
82896 +       u8 cluster_bits = sbi->cluster_bits;
82898 +       if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
82899 +               return -ENOENT;
82901 +       off = vbo & sbi->cluster_mask;
82902 +       *lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
82903 +       *bytes = ((u64)len << cluster_bits) - off;
82905 +       return 0;
82908 +struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
82910 +       int err = 0;
82911 +       struct super_block *sb = sbi->sb;
82912 +       struct inode *inode = new_inode(sb);
82913 +       struct ntfs_inode *ni;
82915 +       if (!inode)
82916 +               return ERR_PTR(-ENOMEM);
82918 +       ni = ntfs_i(inode);
82920 +       err = mi_format_new(&ni->mi, sbi, rno, dir ? RECORD_FLAG_DIR : 0,
82921 +                           false);
82922 +       if (err)
82923 +               goto out;
82925 +       inode->i_ino = rno;
82926 +       if (insert_inode_locked(inode) < 0) {
82927 +               err = -EIO;
82928 +               goto out;
82929 +       }
82931 +out:
82932 +       if (err) {
82933 +               iput(inode);
82934 +               ni = ERR_PTR(err);
82935 +       }
82936 +       return ni;
82940 + * O:BAG:BAD:(A;OICI;FA;;;WD)
82941 + * owner S-1-5-32-544 (Administrators)
82942 + * group S-1-5-32-544 (Administrators)
82943 + * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
82944 + */
82945 +const u8 s_default_security[] __aligned(8) = {
82946 +       0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
82947 +       0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
82948 +       0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
82949 +       0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
82950 +       0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
82951 +       0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
82952 +       0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
82955 +static_assert(sizeof(s_default_security) == 0x50);
82957 +static inline u32 sid_length(const struct SID *sid)
82959 +       return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
82963 + * Thanks Mark Harmstone for idea
82964 + */
82965 +static bool is_acl_valid(const struct ACL *acl, u32 len)
82967 +       const struct ACE_HEADER *ace;
82968 +       u32 i;
82969 +       u16 ace_count, ace_size;
82971 +       if (acl->AclRevision != ACL_REVISION &&
82972 +           acl->AclRevision != ACL_REVISION_DS) {
82973 +               /*
82974 +                * This value should be ACL_REVISION, unless the ACL contains an
82975 +                * object-specific ACE, in which case this value must be ACL_REVISION_DS.
82976 +                * All ACEs in an ACL must be at the same revision level.
82977 +                */
82978 +               return false;
82979 +       }
82981 +       if (acl->Sbz1)
82982 +               return false;
82984 +       if (le16_to_cpu(acl->AclSize) > len)
82985 +               return false;
82987 +       if (acl->Sbz2)
82988 +               return false;
82990 +       len -= sizeof(struct ACL);
82991 +       ace = (struct ACE_HEADER *)&acl[1];
82992 +       ace_count = le16_to_cpu(acl->AceCount);
82994 +       for (i = 0; i < ace_count; i++) {
82995 +               if (len < sizeof(struct ACE_HEADER))
82996 +                       return false;
82998 +               ace_size = le16_to_cpu(ace->AceSize);
82999 +               if (len < ace_size)
83000 +                       return false;
83002 +               len -= ace_size;
83003 +               ace = Add2Ptr(ace, ace_size);
83004 +       }
83006 +       return true;
83009 +bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
83011 +       u32 sd_owner, sd_group, sd_sacl, sd_dacl;
83013 +       if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
83014 +               return false;
83016 +       if (sd->Revision != 1)
83017 +               return false;
83019 +       if (sd->Sbz1)
83020 +               return false;
83022 +       if (!(sd->Control & SE_SELF_RELATIVE))
83023 +               return false;
83025 +       sd_owner = le32_to_cpu(sd->Owner);
83026 +       if (sd_owner) {
83027 +               const struct SID *owner = Add2Ptr(sd, sd_owner);
83029 +               if (sd_owner + offsetof(struct SID, SubAuthority) > len)
83030 +                       return false;
83032 +               if (owner->Revision != 1)
83033 +                       return false;
83035 +               if (sd_owner + sid_length(owner) > len)
83036 +                       return false;
83037 +       }
83039 +       sd_group = le32_to_cpu(sd->Group);
83040 +       if (sd_group) {
83041 +               const struct SID *group = Add2Ptr(sd, sd_group);
83043 +               if (sd_group + offsetof(struct SID, SubAuthority) > len)
83044 +                       return false;
83046 +               if (group->Revision != 1)
83047 +                       return false;
83049 +               if (sd_group + sid_length(group) > len)
83050 +                       return false;
83051 +       }
83053 +       sd_sacl = le32_to_cpu(sd->Sacl);
83054 +       if (sd_sacl) {
83055 +               const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
83057 +               if (sd_sacl + sizeof(struct ACL) > len)
83058 +                       return false;
83060 +               if (!is_acl_valid(sacl, len - sd_sacl))
83061 +                       return false;
83062 +       }
83064 +       sd_dacl = le32_to_cpu(sd->Dacl);
83065 +       if (sd_dacl) {
83066 +               const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
83068 +               if (sd_dacl + sizeof(struct ACL) > len)
83069 +                       return false;
83071 +               if (!is_acl_valid(dacl, len - sd_dacl))
83072 +                       return false;
83073 +       }
83075 +       return true;
83079 + * ntfs_security_init
83080 + *
83081 + * loads and parse $Secure
83082 + */
83083 +int ntfs_security_init(struct ntfs_sb_info *sbi)
83085 +       int err;
83086 +       struct super_block *sb = sbi->sb;
83087 +       struct inode *inode;
83088 +       struct ntfs_inode *ni;
83089 +       struct MFT_REF ref;
83090 +       struct ATTRIB *attr;
83091 +       struct ATTR_LIST_ENTRY *le;
83092 +       u64 sds_size;
83093 +       size_t cnt, off;
83094 +       struct NTFS_DE *ne;
83095 +       struct NTFS_DE_SII *sii_e;
83096 +       struct ntfs_fnd *fnd_sii = NULL;
83097 +       const struct INDEX_ROOT *root_sii;
83098 +       const struct INDEX_ROOT *root_sdh;
83099 +       struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
83100 +       struct ntfs_index *indx_sii = &sbi->security.index_sii;
83102 +       ref.low = cpu_to_le32(MFT_REC_SECURE);
83103 +       ref.high = 0;
83104 +       ref.seq = cpu_to_le16(MFT_REC_SECURE);
83106 +       inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
83107 +       if (IS_ERR(inode)) {
83108 +               err = PTR_ERR(inode);
83109 +               ntfs_err(sb, "Failed to load $Secure.");
83110 +               inode = NULL;
83111 +               goto out;
83112 +       }
83114 +       ni = ntfs_i(inode);
83116 +       le = NULL;
83118 +       attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
83119 +                           ARRAY_SIZE(SDH_NAME), NULL, NULL);
83120 +       if (!attr) {
83121 +               err = -EINVAL;
83122 +               goto out;
83123 +       }
83125 +       root_sdh = resident_data(attr);
83126 +       if (root_sdh->type != ATTR_ZERO ||
83127 +           root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH) {
83128 +               err = -EINVAL;
83129 +               goto out;
83130 +       }
83132 +       err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
83133 +       if (err)
83134 +               goto out;
83136 +       attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
83137 +                           ARRAY_SIZE(SII_NAME), NULL, NULL);
83138 +       if (!attr) {
83139 +               err = -EINVAL;
83140 +               goto out;
83141 +       }
83143 +       root_sii = resident_data(attr);
83144 +       if (root_sii->type != ATTR_ZERO ||
83145 +           root_sii->rule != NTFS_COLLATION_TYPE_UINT) {
83146 +               err = -EINVAL;
83147 +               goto out;
83148 +       }
83150 +       err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
83151 +       if (err)
83152 +               goto out;
83154 +       fnd_sii = fnd_get();
83155 +       if (!fnd_sii) {
83156 +               err = -ENOMEM;
83157 +               goto out;
83158 +       }
83160 +       sds_size = inode->i_size;
83162 +       /* Find the last valid Id */
83163 +       sbi->security.next_id = SECURITY_ID_FIRST;
83164 +       /* Always write new security at the end of bucket */
83165 +       sbi->security.next_off =
83166 +               Quad2Align(sds_size - SecurityDescriptorsBlockSize);
83168 +       cnt = 0;
83169 +       off = 0;
83170 +       ne = NULL;
83172 +       for (;;) {
83173 +               u32 next_id;
83175 +               err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
83176 +               if (err || !ne)
83177 +                       break;
83179 +               sii_e = (struct NTFS_DE_SII *)ne;
83180 +               if (le16_to_cpu(ne->view.data_size) < SIZEOF_SECURITY_HDR)
83181 +                       continue;
83183 +               next_id = le32_to_cpu(sii_e->sec_id) + 1;
83184 +               if (next_id >= sbi->security.next_id)
83185 +                       sbi->security.next_id = next_id;
83187 +               cnt += 1;
83188 +       }
83190 +       sbi->security.ni = ni;
83191 +       inode = NULL;
83192 +out:
83193 +       iput(inode);
83194 +       fnd_put(fnd_sii);
83196 +       return err;
83200 + * ntfs_get_security_by_id
83201 + *
83202 + * reads security descriptor by id
83203 + */
83204 +int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
83205 +                           struct SECURITY_DESCRIPTOR_RELATIVE **sd,
83206 +                           size_t *size)
83208 +       int err;
83209 +       int diff;
83210 +       struct ntfs_inode *ni = sbi->security.ni;
83211 +       struct ntfs_index *indx = &sbi->security.index_sii;
83212 +       void *p = NULL;
83213 +       struct NTFS_DE_SII *sii_e;
83214 +       struct ntfs_fnd *fnd_sii;
83215 +       struct SECURITY_HDR d_security;
83216 +       const struct INDEX_ROOT *root_sii;
83217 +       u32 t32;
83219 +       *sd = NULL;
83221 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
83223 +       fnd_sii = fnd_get();
83224 +       if (!fnd_sii) {
83225 +               err = -ENOMEM;
83226 +               goto out;
83227 +       }
83229 +       root_sii = indx_get_root(indx, ni, NULL, NULL);
83230 +       if (!root_sii) {
83231 +               err = -EINVAL;
83232 +               goto out;
83233 +       }
83235 +       /* Try to find this SECURITY descriptor in SII indexes */
83236 +       err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
83237 +                       NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
83238 +       if (err)
83239 +               goto out;
83241 +       if (diff)
83242 +               goto out;
83244 +       t32 = le32_to_cpu(sii_e->sec_hdr.size);
83245 +       if (t32 < SIZEOF_SECURITY_HDR) {
83246 +               err = -EINVAL;
83247 +               goto out;
83248 +       }
83250 +       if (t32 > SIZEOF_SECURITY_HDR + 0x10000) {
83251 +               /*
83252 +                * looks like too big security. 0x10000 - is arbitrary big number
83253 +                */
83254 +               err = -EFBIG;
83255 +               goto out;
83256 +       }
83258 +       *size = t32 - SIZEOF_SECURITY_HDR;
83260 +       p = ntfs_malloc(*size);
83261 +       if (!p) {
83262 +               err = -ENOMEM;
83263 +               goto out;
83264 +       }
83266 +       err = ntfs_read_run_nb(sbi, &ni->file.run,
83267 +                              le64_to_cpu(sii_e->sec_hdr.off), &d_security,
83268 +                              sizeof(d_security), NULL);
83269 +       if (err)
83270 +               goto out;
83272 +       if (memcmp(&d_security, &sii_e->sec_hdr, SIZEOF_SECURITY_HDR)) {
83273 +               err = -EINVAL;
83274 +               goto out;
83275 +       }
83277 +       err = ntfs_read_run_nb(sbi, &ni->file.run,
83278 +                              le64_to_cpu(sii_e->sec_hdr.off) +
83279 +                                      SIZEOF_SECURITY_HDR,
83280 +                              p, *size, NULL);
83281 +       if (err)
83282 +               goto out;
83284 +       *sd = p;
83285 +       p = NULL;
83287 +out:
83288 +       ntfs_free(p);
83289 +       fnd_put(fnd_sii);
83290 +       ni_unlock(ni);
83292 +       return err;
83296 + * ntfs_insert_security
83297 + *
83298 + * inserts security descriptor into $Secure::SDS
83299 + *
83300 + * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
83301 + * and it contains a mirror copy of each security descriptor.  When writing
83302 + * to a security descriptor at location X, another copy will be written at
83303 + * location (X+256K).
83304 + * When writing a security descriptor that will cross the 256K boundary,
83305 + * the pointer will be advanced by 256K to skip
83306 + * over the mirror portion.
83307 + */
83308 +int ntfs_insert_security(struct ntfs_sb_info *sbi,
83309 +                        const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
83310 +                        u32 size_sd, __le32 *security_id, bool *inserted)
83312 +       int err, diff;
83313 +       struct ntfs_inode *ni = sbi->security.ni;
83314 +       struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
83315 +       struct ntfs_index *indx_sii = &sbi->security.index_sii;
83316 +       struct NTFS_DE_SDH *e;
83317 +       struct NTFS_DE_SDH sdh_e;
83318 +       struct NTFS_DE_SII sii_e;
83319 +       struct SECURITY_HDR *d_security;
83320 +       u32 new_sec_size = size_sd + SIZEOF_SECURITY_HDR;
83321 +       u32 aligned_sec_size = Quad2Align(new_sec_size);
83322 +       struct SECURITY_KEY hash_key;
83323 +       struct ntfs_fnd *fnd_sdh = NULL;
83324 +       const struct INDEX_ROOT *root_sdh;
83325 +       const struct INDEX_ROOT *root_sii;
83326 +       u64 mirr_off, new_sds_size;
83327 +       u32 next, left;
83329 +       static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
83330 +                     SecurityDescriptorsBlockSize);
83332 +       hash_key.hash = security_hash(sd, size_sd);
83333 +       hash_key.sec_id = SECURITY_ID_INVALID;
83335 +       if (inserted)
83336 +               *inserted = false;
83337 +       *security_id = SECURITY_ID_INVALID;
83339 +       /* Allocate a temporal buffer*/
83340 +       d_security = ntfs_zalloc(aligned_sec_size);
83341 +       if (!d_security)
83342 +               return -ENOMEM;
83344 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
83346 +       fnd_sdh = fnd_get();
83347 +       if (!fnd_sdh) {
83348 +               err = -ENOMEM;
83349 +               goto out;
83350 +       }
83352 +       root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
83353 +       if (!root_sdh) {
83354 +               err = -EINVAL;
83355 +               goto out;
83356 +       }
83358 +       root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
83359 +       if (!root_sii) {
83360 +               err = -EINVAL;
83361 +               goto out;
83362 +       }
83364 +       /*
83365 +        * Check if such security already exists
83366 +        * use "SDH" and hash -> to get the offset in "SDS"
83367 +        */
83368 +       err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
83369 +                       &d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
83370 +                       fnd_sdh);
83371 +       if (err)
83372 +               goto out;
83374 +       while (e) {
83375 +               if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
83376 +                       err = ntfs_read_run_nb(sbi, &ni->file.run,
83377 +                                              le64_to_cpu(e->sec_hdr.off),
83378 +                                              d_security, new_sec_size, NULL);
83379 +                       if (err)
83380 +                               goto out;
83382 +                       if (le32_to_cpu(d_security->size) == new_sec_size &&
83383 +                           d_security->key.hash == hash_key.hash &&
83384 +                           !memcmp(d_security + 1, sd, size_sd)) {
83385 +                               *security_id = d_security->key.sec_id;
83386 +                               /*such security already exists*/
83387 +                               err = 0;
83388 +                               goto out;
83389 +                       }
83390 +               }
83392 +               err = indx_find_sort(indx_sdh, ni, root_sdh,
83393 +                                    (struct NTFS_DE **)&e, fnd_sdh);
83394 +               if (err)
83395 +                       goto out;
83397 +               if (!e || e->key.hash != hash_key.hash)
83398 +                       break;
83399 +       }
83401 +       /* Zero unused space */
83402 +       next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
83403 +       left = SecurityDescriptorsBlockSize - next;
83405 +       /* Zero gap until SecurityDescriptorsBlockSize */
83406 +       if (left < new_sec_size) {
83407 +               /* zero "left" bytes from sbi->security.next_off */
83408 +               sbi->security.next_off += SecurityDescriptorsBlockSize + left;
83409 +       }
83411 +       /* Zero tail of previous security */
83412 +       //used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
83414 +       /*
83415 +        * Example:
83416 +        * 0x40438 == ni->vfs_inode.i_size
83417 +        * 0x00440 == sbi->security.next_off
83418 +        * need to zero [0x438-0x440)
83419 +        * if (next > used) {
83420 +        *  u32 tozero = next - used;
83421 +        *  zero "tozero" bytes from sbi->security.next_off - tozero
83422 +        */
83424 +       /* format new security descriptor */
83425 +       d_security->key.hash = hash_key.hash;
83426 +       d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
83427 +       d_security->off = cpu_to_le64(sbi->security.next_off);
83428 +       d_security->size = cpu_to_le32(new_sec_size);
83429 +       memcpy(d_security + 1, sd, size_sd);
83431 +       /* Write main SDS bucket */
83432 +       err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
83433 +                               d_security, aligned_sec_size);
83435 +       if (err)
83436 +               goto out;
83438 +       mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
83439 +       new_sds_size = mirr_off + aligned_sec_size;
83441 +       if (new_sds_size > ni->vfs_inode.i_size) {
83442 +               err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
83443 +                                   ARRAY_SIZE(SDS_NAME), &ni->file.run,
83444 +                                   new_sds_size, &new_sds_size, false, NULL);
83445 +               if (err)
83446 +                       goto out;
83447 +       }
83449 +       /* Write copy SDS bucket */
83450 +       err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
83451 +                               aligned_sec_size);
83452 +       if (err)
83453 +               goto out;
83455 +       /* Fill SII entry */
83456 +       sii_e.de.view.data_off =
83457 +               cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
83458 +       sii_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
83459 +       sii_e.de.view.res = 0;
83460 +       sii_e.de.size = cpu_to_le16(SIZEOF_SII_DIRENTRY);
83461 +       sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
83462 +       sii_e.de.flags = 0;
83463 +       sii_e.de.res = 0;
83464 +       sii_e.sec_id = d_security->key.sec_id;
83465 +       memcpy(&sii_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
83467 +       err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL);
83468 +       if (err)
83469 +               goto out;
83471 +       /* Fill SDH entry */
83472 +       sdh_e.de.view.data_off =
83473 +               cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
83474 +       sdh_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
83475 +       sdh_e.de.view.res = 0;
83476 +       sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
83477 +       sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
83478 +       sdh_e.de.flags = 0;
83479 +       sdh_e.de.res = 0;
83480 +       sdh_e.key.hash = d_security->key.hash;
83481 +       sdh_e.key.sec_id = d_security->key.sec_id;
83482 +       memcpy(&sdh_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
83483 +       sdh_e.magic[0] = cpu_to_le16('I');
83484 +       sdh_e.magic[1] = cpu_to_le16('I');
83486 +       fnd_clear(fnd_sdh);
83487 +       err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
83488 +                               fnd_sdh);
83489 +       if (err)
83490 +               goto out;
83492 +       *security_id = d_security->key.sec_id;
83493 +       if (inserted)
83494 +               *inserted = true;
83496 +       /* Update Id and offset for next descriptor */
83497 +       sbi->security.next_id += 1;
83498 +       sbi->security.next_off += aligned_sec_size;
83500 +out:
83501 +       fnd_put(fnd_sdh);
83502 +       mark_inode_dirty(&ni->vfs_inode);
83503 +       ni_unlock(ni);
83504 +       ntfs_free(d_security);
83506 +       return err;
83510 + * ntfs_reparse_init
83511 + *
83512 + * loads and parse $Extend/$Reparse
83513 + */
83514 +int ntfs_reparse_init(struct ntfs_sb_info *sbi)
83516 +       int err;
83517 +       struct ntfs_inode *ni = sbi->reparse.ni;
83518 +       struct ntfs_index *indx = &sbi->reparse.index_r;
83519 +       struct ATTRIB *attr;
83520 +       struct ATTR_LIST_ENTRY *le;
83521 +       const struct INDEX_ROOT *root_r;
83523 +       if (!ni)
83524 +               return 0;
83526 +       le = NULL;
83527 +       attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
83528 +                           ARRAY_SIZE(SR_NAME), NULL, NULL);
83529 +       if (!attr) {
83530 +               err = -EINVAL;
83531 +               goto out;
83532 +       }
83534 +       root_r = resident_data(attr);
83535 +       if (root_r->type != ATTR_ZERO ||
83536 +           root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
83537 +               err = -EINVAL;
83538 +               goto out;
83539 +       }
83541 +       err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
83542 +       if (err)
83543 +               goto out;
83545 +out:
83546 +       return err;
83550 + * ntfs_objid_init
83551 + *
83552 + * loads and parse $Extend/$ObjId
83553 + */
83554 +int ntfs_objid_init(struct ntfs_sb_info *sbi)
83556 +       int err;
83557 +       struct ntfs_inode *ni = sbi->objid.ni;
83558 +       struct ntfs_index *indx = &sbi->objid.index_o;
83559 +       struct ATTRIB *attr;
83560 +       struct ATTR_LIST_ENTRY *le;
83561 +       const struct INDEX_ROOT *root;
83563 +       if (!ni)
83564 +               return 0;
83566 +       le = NULL;
83567 +       attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
83568 +                           ARRAY_SIZE(SO_NAME), NULL, NULL);
83569 +       if (!attr) {
83570 +               err = -EINVAL;
83571 +               goto out;
83572 +       }
83574 +       root = resident_data(attr);
83575 +       if (root->type != ATTR_ZERO ||
83576 +           root->rule != NTFS_COLLATION_TYPE_UINTS) {
83577 +               err = -EINVAL;
83578 +               goto out;
83579 +       }
83581 +       err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
83582 +       if (err)
83583 +               goto out;
83585 +out:
83586 +       return err;
83589 +int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
83591 +       int err;
83592 +       struct ntfs_inode *ni = sbi->objid.ni;
83593 +       struct ntfs_index *indx = &sbi->objid.index_o;
83595 +       if (!ni)
83596 +               return -EINVAL;
83598 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
83600 +       err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
83602 +       mark_inode_dirty(&ni->vfs_inode);
83603 +       ni_unlock(ni);
83605 +       return err;
83608 +int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
83609 +                       const struct MFT_REF *ref)
83611 +       int err;
83612 +       struct ntfs_inode *ni = sbi->reparse.ni;
83613 +       struct ntfs_index *indx = &sbi->reparse.index_r;
83614 +       struct NTFS_DE_R re;
83616 +       if (!ni)
83617 +               return -EINVAL;
83619 +       memset(&re, 0, sizeof(re));
83621 +       re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
83622 +       re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
83623 +       re.de.key_size = cpu_to_le16(sizeof(re.key));
83625 +       re.key.ReparseTag = rtag;
83626 +       memcpy(&re.key.ref, ref, sizeof(*ref));
83628 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
83630 +       err = indx_insert_entry(indx, ni, &re.de, NULL, NULL);
83632 +       mark_inode_dirty(&ni->vfs_inode);
83633 +       ni_unlock(ni);
83635 +       return err;
83638 +int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
83639 +                       const struct MFT_REF *ref)
83641 +       int err, diff;
83642 +       struct ntfs_inode *ni = sbi->reparse.ni;
83643 +       struct ntfs_index *indx = &sbi->reparse.index_r;
83644 +       struct ntfs_fnd *fnd = NULL;
83645 +       struct REPARSE_KEY rkey;
83646 +       struct NTFS_DE_R *re;
83647 +       struct INDEX_ROOT *root_r;
83649 +       if (!ni)
83650 +               return -EINVAL;
83652 +       rkey.ReparseTag = rtag;
83653 +       rkey.ref = *ref;
83655 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
83657 +       if (rtag) {
83658 +               err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
83659 +               goto out1;
83660 +       }
83662 +       fnd = fnd_get();
83663 +       if (!fnd) {
83664 +               err = -ENOMEM;
83665 +               goto out1;
83666 +       }
83668 +       root_r = indx_get_root(indx, ni, NULL, NULL);
83669 +       if (!root_r) {
83670 +               err = -EINVAL;
83671 +               goto out;
83672 +       }
83674 +       /* 1 - forces to ignore rkey.ReparseTag when comparing keys */
83675 +       err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
83676 +                       (struct NTFS_DE **)&re, fnd);
83677 +       if (err)
83678 +               goto out;
83680 +       if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
83681 +               /* Impossible. Looks like volume corrupt?*/
83682 +               goto out;
83683 +       }
83685 +       memcpy(&rkey, &re->key, sizeof(rkey));
83687 +       fnd_put(fnd);
83688 +       fnd = NULL;
83690 +       err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
83691 +       if (err)
83692 +               goto out;
83694 +out:
83695 +       fnd_put(fnd);
83697 +out1:
83698 +       mark_inode_dirty(&ni->vfs_inode);
83699 +       ni_unlock(ni);
83701 +       return err;
83704 +static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
83705 +                                         CLST len)
83707 +       ntfs_unmap_meta(sbi->sb, lcn, len);
83708 +       ntfs_discard(sbi, lcn, len);
83711 +void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
83713 +       CLST end, i;
83714 +       struct wnd_bitmap *wnd = &sbi->used.bitmap;
83716 +       down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
83717 +       if (!wnd_is_used(wnd, lcn, len)) {
83718 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
83720 +               end = lcn + len;
83721 +               len = 0;
83722 +               for (i = lcn; i < end; i++) {
83723 +                       if (wnd_is_used(wnd, i, 1)) {
83724 +                               if (!len)
83725 +                                       lcn = i;
83726 +                               len += 1;
83727 +                               continue;
83728 +                       }
83730 +                       if (!len)
83731 +                               continue;
83733 +                       if (trim)
83734 +                               ntfs_unmap_and_discard(sbi, lcn, len);
83736 +                       wnd_set_free(wnd, lcn, len);
83737 +                       len = 0;
83738 +               }
83740 +               if (!len)
83741 +                       goto out;
83742 +       }
83744 +       if (trim)
83745 +               ntfs_unmap_and_discard(sbi, lcn, len);
83746 +       wnd_set_free(wnd, lcn, len);
83748 +out:
83749 +       up_write(&wnd->rw_lock);
83753 + * run_deallocate
83754 + *
83755 + * deallocate clusters
83756 + */
83757 +int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim)
83759 +       CLST lcn, len;
83760 +       size_t idx = 0;
83762 +       while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
83763 +               if (lcn == SPARSE_LCN)
83764 +                       continue;
83766 +               mark_as_free_ex(sbi, lcn, len, trim);
83767 +       }
83769 +       return 0;
83771 diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
83772 new file mode 100644
83773 index 000000000000..931a7241ef00
83774 --- /dev/null
83775 +++ b/fs/ntfs3/index.c
83776 @@ -0,0 +1,2641 @@
83777 +// SPDX-License-Identifier: GPL-2.0
83779 + *
83780 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
83781 + *
83782 + */
83784 +#include <linux/blkdev.h>
83785 +#include <linux/buffer_head.h>
83786 +#include <linux/fs.h>
83787 +#include <linux/nls.h>
83789 +#include "debug.h"
83790 +#include "ntfs.h"
83791 +#include "ntfs_fs.h"
83793 +static const struct INDEX_NAMES {
83794 +       const __le16 *name;
83795 +       u8 name_len;
83796 +} s_index_names[INDEX_MUTEX_TOTAL] = {
83797 +       { I30_NAME, ARRAY_SIZE(I30_NAME) }, { SII_NAME, ARRAY_SIZE(SII_NAME) },
83798 +       { SDH_NAME, ARRAY_SIZE(SDH_NAME) }, { SO_NAME, ARRAY_SIZE(SO_NAME) },
83799 +       { SQ_NAME, ARRAY_SIZE(SQ_NAME) },   { SR_NAME, ARRAY_SIZE(SR_NAME) },
83803 + * compare two names in index
83804 + * if l1 != 0
83805 + *   both names are little endian on-disk ATTR_FILE_NAME structs
83806 + * else
83807 + *   key1 - cpu_str, key2 - ATTR_FILE_NAME
83808 + */
83809 +static int cmp_fnames(const void *key1, size_t l1, const void *key2, size_t l2,
83810 +                     const void *data)
83812 +       const struct ATTR_FILE_NAME *f2 = key2;
83813 +       const struct ntfs_sb_info *sbi = data;
83814 +       const struct ATTR_FILE_NAME *f1;
83815 +       u16 fsize2;
83816 +       bool both_case;
83818 +       if (l2 <= offsetof(struct ATTR_FILE_NAME, name))
83819 +               return -1;
83821 +       fsize2 = fname_full_size(f2);
83822 +       if (l2 < fsize2)
83823 +               return -1;
83825 +       both_case = f2->type != FILE_NAME_DOS /*&& !sbi->options.nocase*/;
83826 +       if (!l1) {
83827 +               const struct le_str *s2 = (struct le_str *)&f2->name_len;
83829 +               /*
83830 +                * If names are equal (case insensitive)
83831 +                * try to compare it case sensitive
83832 +                */
83833 +               return ntfs_cmp_names_cpu(key1, s2, sbi->upcase, both_case);
83834 +       }
83836 +       f1 = key1;
83837 +       return ntfs_cmp_names(f1->name, f1->name_len, f2->name, f2->name_len,
83838 +                             sbi->upcase, both_case);
83841 +/* $SII of $Secure and $Q of Quota */
83842 +static int cmp_uint(const void *key1, size_t l1, const void *key2, size_t l2,
83843 +                   const void *data)
83845 +       const u32 *k1 = key1;
83846 +       const u32 *k2 = key2;
83848 +       if (l2 < sizeof(u32))
83849 +               return -1;
83851 +       if (*k1 < *k2)
83852 +               return -1;
83853 +       if (*k1 > *k2)
83854 +               return 1;
83855 +       return 0;
83858 +/* $SDH of $Secure */
83859 +static int cmp_sdh(const void *key1, size_t l1, const void *key2, size_t l2,
83860 +                  const void *data)
83862 +       const struct SECURITY_KEY *k1 = key1;
83863 +       const struct SECURITY_KEY *k2 = key2;
83864 +       u32 t1, t2;
83866 +       if (l2 < sizeof(struct SECURITY_KEY))
83867 +               return -1;
83869 +       t1 = le32_to_cpu(k1->hash);
83870 +       t2 = le32_to_cpu(k2->hash);
83872 +       /* First value is a hash value itself */
83873 +       if (t1 < t2)
83874 +               return -1;
83875 +       if (t1 > t2)
83876 +               return 1;
83878 +       /* Second value is security Id */
83879 +       if (data) {
83880 +               t1 = le32_to_cpu(k1->sec_id);
83881 +               t2 = le32_to_cpu(k2->sec_id);
83882 +               if (t1 < t2)
83883 +                       return -1;
83884 +               if (t1 > t2)
83885 +                       return 1;
83886 +       }
83888 +       return 0;
83891 +/* $O of ObjId and "$R" for Reparse */
83892 +static int cmp_uints(const void *key1, size_t l1, const void *key2, size_t l2,
83893 +                    const void *data)
83895 +       const __le32 *k1 = key1;
83896 +       const __le32 *k2 = key2;
83897 +       size_t count;
83899 +       if ((size_t)data == 1) {
83900 +               /*
83901 +                * ni_delete_all -> ntfs_remove_reparse -> delete all with this reference
83902 +                * k1, k2 - pointers to REPARSE_KEY
83903 +                */
83905 +               k1 += 1; // skip REPARSE_KEY.ReparseTag
83906 +               k2 += 1; // skip REPARSE_KEY.ReparseTag
83907 +               if (l2 <= sizeof(int))
83908 +                       return -1;
83909 +               l2 -= sizeof(int);
83910 +               if (l1 <= sizeof(int))
83911 +                       return 1;
83912 +               l1 -= sizeof(int);
83913 +       }
83915 +       if (l2 < sizeof(int))
83916 +               return -1;
83918 +       for (count = min(l1, l2) >> 2; count > 0; --count, ++k1, ++k2) {
83919 +               u32 t1 = le32_to_cpu(*k1);
83920 +               u32 t2 = le32_to_cpu(*k2);
83922 +               if (t1 > t2)
83923 +                       return 1;
83924 +               if (t1 < t2)
83925 +                       return -1;
83926 +       }
83928 +       if (l1 > l2)
83929 +               return 1;
83930 +       if (l1 < l2)
83931 +               return -1;
83933 +       return 0;
83936 +static inline NTFS_CMP_FUNC get_cmp_func(const struct INDEX_ROOT *root)
83938 +       switch (root->type) {
83939 +       case ATTR_NAME:
83940 +               if (root->rule == NTFS_COLLATION_TYPE_FILENAME)
83941 +                       return &cmp_fnames;
83942 +               break;
83943 +       case ATTR_ZERO:
83944 +               switch (root->rule) {
83945 +               case NTFS_COLLATION_TYPE_UINT:
83946 +                       return &cmp_uint;
83947 +               case NTFS_COLLATION_TYPE_SECURITY_HASH:
83948 +                       return &cmp_sdh;
83949 +               case NTFS_COLLATION_TYPE_UINTS:
83950 +                       return &cmp_uints;
83951 +               default:
83952 +                       break;
83953 +               }
83954 +       default:
83955 +               break;
83956 +       }
83958 +       return NULL;
83961 +struct bmp_buf {
83962 +       struct ATTRIB *b;
83963 +       struct mft_inode *mi;
83964 +       struct buffer_head *bh;
83965 +       ulong *buf;
83966 +       size_t bit;
83967 +       u32 nbits;
83968 +       u64 new_valid;
83971 +static int bmp_buf_get(struct ntfs_index *indx, struct ntfs_inode *ni,
83972 +                      size_t bit, struct bmp_buf *bbuf)
83974 +       struct ATTRIB *b;
83975 +       size_t data_size, valid_size, vbo, off = bit >> 3;
83976 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
83977 +       CLST vcn = off >> sbi->cluster_bits;
83978 +       struct ATTR_LIST_ENTRY *le = NULL;
83979 +       struct buffer_head *bh;
83980 +       struct super_block *sb;
83981 +       u32 blocksize;
83982 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
83984 +       bbuf->bh = NULL;
83986 +       b = ni_find_attr(ni, NULL, &le, ATTR_BITMAP, in->name, in->name_len,
83987 +                        &vcn, &bbuf->mi);
83988 +       bbuf->b = b;
83989 +       if (!b)
83990 +               return -EINVAL;
83992 +       if (!b->non_res) {
83993 +               data_size = le32_to_cpu(b->res.data_size);
83995 +               if (off >= data_size)
83996 +                       return -EINVAL;
83998 +               bbuf->buf = (ulong *)resident_data(b);
83999 +               bbuf->bit = 0;
84000 +               bbuf->nbits = data_size * 8;
84002 +               return 0;
84003 +       }
84005 +       data_size = le64_to_cpu(b->nres.data_size);
84006 +       if (WARN_ON(off >= data_size)) {
84007 +               /* looks like filesystem error */
84008 +               return -EINVAL;
84009 +       }
84011 +       valid_size = le64_to_cpu(b->nres.valid_size);
84013 +       bh = ntfs_bread_run(sbi, &indx->bitmap_run, off);
84014 +       if (!bh)
84015 +               return -EIO;
84017 +       if (IS_ERR(bh))
84018 +               return PTR_ERR(bh);
84020 +       bbuf->bh = bh;
84022 +       if (buffer_locked(bh))
84023 +               __wait_on_buffer(bh);
84025 +       lock_buffer(bh);
84027 +       sb = sbi->sb;
84028 +       blocksize = sb->s_blocksize;
84030 +       vbo = off & ~(size_t)sbi->block_mask;
84032 +       bbuf->new_valid = vbo + blocksize;
84033 +       if (bbuf->new_valid <= valid_size)
84034 +               bbuf->new_valid = 0;
84035 +       else if (bbuf->new_valid > data_size)
84036 +               bbuf->new_valid = data_size;
84038 +       if (vbo >= valid_size) {
84039 +               memset(bh->b_data, 0, blocksize);
84040 +       } else if (vbo + blocksize > valid_size) {
84041 +               u32 voff = valid_size & sbi->block_mask;
84043 +               memset(bh->b_data + voff, 0, blocksize - voff);
84044 +       }
84046 +       bbuf->buf = (ulong *)bh->b_data;
84047 +       bbuf->bit = 8 * (off & ~(size_t)sbi->block_mask);
84048 +       bbuf->nbits = 8 * blocksize;
84050 +       return 0;
84053 +static void bmp_buf_put(struct bmp_buf *bbuf, bool dirty)
84055 +       struct buffer_head *bh = bbuf->bh;
84056 +       struct ATTRIB *b = bbuf->b;
84058 +       if (!bh) {
84059 +               if (b && !b->non_res && dirty)
84060 +                       bbuf->mi->dirty = true;
84061 +               return;
84062 +       }
84064 +       if (!dirty)
84065 +               goto out;
84067 +       if (bbuf->new_valid) {
84068 +               b->nres.valid_size = cpu_to_le64(bbuf->new_valid);
84069 +               bbuf->mi->dirty = true;
84070 +       }
84072 +       set_buffer_uptodate(bh);
84073 +       mark_buffer_dirty(bh);
84075 +out:
84076 +       unlock_buffer(bh);
84077 +       put_bh(bh);
84081 + * indx_mark_used
84082 + *
84083 + * marks the bit 'bit' as used
84084 + */
84085 +static int indx_mark_used(struct ntfs_index *indx, struct ntfs_inode *ni,
84086 +                         size_t bit)
84088 +       int err;
84089 +       struct bmp_buf bbuf;
84091 +       err = bmp_buf_get(indx, ni, bit, &bbuf);
84092 +       if (err)
84093 +               return err;
84095 +       __set_bit(bit - bbuf.bit, bbuf.buf);
84097 +       bmp_buf_put(&bbuf, true);
84099 +       return 0;
84103 + * indx_mark_free
84104 + *
84105 + * the bit 'bit' as free
84106 + */
84107 +static int indx_mark_free(struct ntfs_index *indx, struct ntfs_inode *ni,
84108 +                         size_t bit)
84110 +       int err;
84111 +       struct bmp_buf bbuf;
84113 +       err = bmp_buf_get(indx, ni, bit, &bbuf);
84114 +       if (err)
84115 +               return err;
84117 +       __clear_bit(bit - bbuf.bit, bbuf.buf);
84119 +       bmp_buf_put(&bbuf, true);
84121 +       return 0;
84125 + * if ntfs_readdir calls this function (indx_used_bit -> scan_nres_bitmap),
84126 + * inode is shared locked and no ni_lock
84127 + * use rw_semaphore for read/write access to bitmap_run
84128 + */
84129 +static int scan_nres_bitmap(struct ntfs_inode *ni, struct ATTRIB *bitmap,
84130 +                           struct ntfs_index *indx, size_t from,
84131 +                           bool (*fn)(const ulong *buf, u32 bit, u32 bits,
84132 +                                      size_t *ret),
84133 +                           size_t *ret)
84135 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
84136 +       struct super_block *sb = sbi->sb;
84137 +       struct runs_tree *run = &indx->bitmap_run;
84138 +       struct rw_semaphore *lock = &indx->run_lock;
84139 +       u32 nbits = sb->s_blocksize * 8;
84140 +       u32 blocksize = sb->s_blocksize;
84141 +       u64 valid_size = le64_to_cpu(bitmap->nres.valid_size);
84142 +       u64 data_size = le64_to_cpu(bitmap->nres.data_size);
84143 +       sector_t eblock = bytes_to_block(sb, data_size);
84144 +       size_t vbo = from >> 3;
84145 +       sector_t blk = (vbo & sbi->cluster_mask) >> sb->s_blocksize_bits;
84146 +       sector_t vblock = vbo >> sb->s_blocksize_bits;
84147 +       sector_t blen, block;
84148 +       CLST lcn, clen, vcn, vcn_next;
84149 +       size_t idx;
84150 +       struct buffer_head *bh;
84151 +       bool ok;
84153 +       *ret = MINUS_ONE_T;
84155 +       if (vblock >= eblock)
84156 +               return 0;
84158 +       from &= nbits - 1;
84159 +       vcn = vbo >> sbi->cluster_bits;
84161 +       down_read(lock);
84162 +       ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx);
84163 +       up_read(lock);
84165 +next_run:
84166 +       if (!ok) {
84167 +               int err;
84168 +               const struct INDEX_NAMES *name = &s_index_names[indx->type];
84170 +               down_write(lock);
84171 +               err = attr_load_runs_vcn(ni, ATTR_BITMAP, name->name,
84172 +                                        name->name_len, run, vcn);
84173 +               up_write(lock);
84174 +               if (err)
84175 +                       return err;
84176 +               down_read(lock);
84177 +               ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx);
84178 +               up_read(lock);
84179 +               if (!ok)
84180 +                       return -EINVAL;
84181 +       }
84183 +       blen = (sector_t)clen * sbi->blocks_per_cluster;
84184 +       block = (sector_t)lcn * sbi->blocks_per_cluster;
84186 +       for (; blk < blen; blk++, from = 0) {
84187 +               bh = ntfs_bread(sb, block + blk);
84188 +               if (!bh)
84189 +                       return -EIO;
84191 +               vbo = (u64)vblock << sb->s_blocksize_bits;
84192 +               if (vbo >= valid_size) {
84193 +                       memset(bh->b_data, 0, blocksize);
84194 +               } else if (vbo + blocksize > valid_size) {
84195 +                       u32 voff = valid_size & sbi->block_mask;
84197 +                       memset(bh->b_data + voff, 0, blocksize - voff);
84198 +               }
84200 +               if (vbo + blocksize > data_size)
84201 +                       nbits = 8 * (data_size - vbo);
84203 +               ok = nbits > from ? (*fn)((ulong *)bh->b_data, from, nbits, ret)
84204 +                                 : false;
84205 +               put_bh(bh);
84207 +               if (ok) {
84208 +                       *ret += 8 * vbo;
84209 +                       return 0;
84210 +               }
84212 +               if (++vblock >= eblock) {
84213 +                       *ret = MINUS_ONE_T;
84214 +                       return 0;
84215 +               }
84216 +       }
84217 +       blk = 0;
84218 +       vcn_next = vcn + clen;
84219 +       down_read(lock);
84220 +       ok = run_get_entry(run, ++idx, &vcn, &lcn, &clen) && vcn == vcn_next;
84221 +       if (!ok)
84222 +               vcn = vcn_next;
84223 +       up_read(lock);
84224 +       goto next_run;
84227 +static bool scan_for_free(const ulong *buf, u32 bit, u32 bits, size_t *ret)
84229 +       size_t pos = find_next_zero_bit(buf, bits, bit);
84231 +       if (pos >= bits)
84232 +               return false;
84233 +       *ret = pos;
84234 +       return true;
84238 + * indx_find_free
84239 + *
84240 + * looks for free bit
84241 + * returns -1 if no free bits
84242 + */
84243 +static int indx_find_free(struct ntfs_index *indx, struct ntfs_inode *ni,
84244 +                         size_t *bit, struct ATTRIB **bitmap)
84246 +       struct ATTRIB *b;
84247 +       struct ATTR_LIST_ENTRY *le = NULL;
84248 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
84249 +       int err;
84251 +       b = ni_find_attr(ni, NULL, &le, ATTR_BITMAP, in->name, in->name_len,
84252 +                        NULL, NULL);
84254 +       if (!b)
84255 +               return -ENOENT;
84257 +       *bitmap = b;
84258 +       *bit = MINUS_ONE_T;
84260 +       if (!b->non_res) {
84261 +               u32 nbits = 8 * le32_to_cpu(b->res.data_size);
84262 +               size_t pos = find_next_zero_bit(resident_data(b), nbits, 0);
84264 +               if (pos < nbits)
84265 +                       *bit = pos;
84266 +       } else {
84267 +               err = scan_nres_bitmap(ni, b, indx, 0, &scan_for_free, bit);
84269 +               if (err)
84270 +                       return err;
84271 +       }
84273 +       return 0;
84276 +static bool scan_for_used(const ulong *buf, u32 bit, u32 bits, size_t *ret)
84278 +       size_t pos = find_next_bit(buf, bits, bit);
84280 +       if (pos >= bits)
84281 +               return false;
84282 +       *ret = pos;
84283 +       return true;
84287 + * indx_used_bit
84288 + *
84289 + * looks for used bit
84290 + * returns MINUS_ONE_T if no used bits
84291 + */
84292 +int indx_used_bit(struct ntfs_index *indx, struct ntfs_inode *ni, size_t *bit)
84294 +       struct ATTRIB *b;
84295 +       struct ATTR_LIST_ENTRY *le = NULL;
84296 +       size_t from = *bit;
84297 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
84298 +       int err;
84300 +       b = ni_find_attr(ni, NULL, &le, ATTR_BITMAP, in->name, in->name_len,
84301 +                        NULL, NULL);
84303 +       if (!b)
84304 +               return -ENOENT;
84306 +       *bit = MINUS_ONE_T;
84308 +       if (!b->non_res) {
84309 +               u32 nbits = le32_to_cpu(b->res.data_size) * 8;
84310 +               size_t pos = find_next_bit(resident_data(b), nbits, from);
84312 +               if (pos < nbits)
84313 +                       *bit = pos;
84314 +       } else {
84315 +               err = scan_nres_bitmap(ni, b, indx, from, &scan_for_used, bit);
84316 +               if (err)
84317 +                       return err;
84318 +       }
84320 +       return 0;
84324 + * hdr_find_split
84325 + *
84326 + * finds a point at which the index allocation buffer would like to
84327 + * be split.
84328 + * NOTE: This function should never return 'END' entry NULL returns on error
84329 + */
84330 +static const struct NTFS_DE *hdr_find_split(const struct INDEX_HDR *hdr)
84332 +       size_t o;
84333 +       const struct NTFS_DE *e = hdr_first_de(hdr);
84334 +       u32 used_2 = le32_to_cpu(hdr->used) >> 1;
84335 +       u16 esize = le16_to_cpu(e->size);
84337 +       if (!e || de_is_last(e))
84338 +               return NULL;
84340 +       for (o = le32_to_cpu(hdr->de_off) + esize; o < used_2; o += esize) {
84341 +               const struct NTFS_DE *p = e;
84343 +               e = Add2Ptr(hdr, o);
84345 +               /* We must not return END entry */
84346 +               if (de_is_last(e))
84347 +                       return p;
84349 +               esize = le16_to_cpu(e->size);
84350 +       }
84352 +       return e;
84356 + * hdr_insert_head
84357 + *
84358 + * inserts some entries at the beginning of the buffer.
84359 + * It is used to insert entries into a newly-created buffer.
84360 + */
84361 +static const struct NTFS_DE *hdr_insert_head(struct INDEX_HDR *hdr,
84362 +                                            const void *ins, u32 ins_bytes)
84364 +       u32 to_move;
84365 +       struct NTFS_DE *e = hdr_first_de(hdr);
84366 +       u32 used = le32_to_cpu(hdr->used);
84368 +       if (!e)
84369 +               return NULL;
84371 +       /* Now we just make room for the inserted entries and jam it in. */
84372 +       to_move = used - le32_to_cpu(hdr->de_off);
84373 +       memmove(Add2Ptr(e, ins_bytes), e, to_move);
84374 +       memcpy(e, ins, ins_bytes);
84375 +       hdr->used = cpu_to_le32(used + ins_bytes);
84377 +       return e;
84380 +void fnd_clear(struct ntfs_fnd *fnd)
84382 +       int i;
84384 +       for (i = 0; i < fnd->level; i++) {
84385 +               struct indx_node *n = fnd->nodes[i];
84387 +               if (!n)
84388 +                       continue;
84390 +               put_indx_node(n);
84391 +               fnd->nodes[i] = NULL;
84392 +       }
84393 +       fnd->level = 0;
84394 +       fnd->root_de = NULL;
84397 +static int fnd_push(struct ntfs_fnd *fnd, struct indx_node *n,
84398 +                   struct NTFS_DE *e)
84400 +       int i;
84402 +       i = fnd->level;
84403 +       if (i < 0 || i >= ARRAY_SIZE(fnd->nodes))
84404 +               return -EINVAL;
84405 +       fnd->nodes[i] = n;
84406 +       fnd->de[i] = e;
84407 +       fnd->level += 1;
84408 +       return 0;
84411 +static struct indx_node *fnd_pop(struct ntfs_fnd *fnd)
84413 +       struct indx_node *n;
84414 +       int i = fnd->level;
84416 +       i -= 1;
84417 +       n = fnd->nodes[i];
84418 +       fnd->nodes[i] = NULL;
84419 +       fnd->level = i;
84421 +       return n;
84424 +static bool fnd_is_empty(struct ntfs_fnd *fnd)
84426 +       if (!fnd->level)
84427 +               return !fnd->root_de;
84429 +       return !fnd->de[fnd->level - 1];
84433 + * hdr_find_e
84434 + *
84435 + * locates an entry the index buffer.
84436 + * If no matching entry is found, it returns the first entry which is greater
84437 + * than the desired entry If the search key is greater than all the entries the
84438 + * buffer, it returns the 'end' entry. This function does a binary search of the
84439 + * current index buffer, for the first entry that is <= to the search value
84440 + * Returns NULL if error
84441 + */
84442 +static struct NTFS_DE *hdr_find_e(const struct ntfs_index *indx,
84443 +                                 const struct INDEX_HDR *hdr, const void *key,
84444 +                                 size_t key_len, const void *ctx, int *diff)
84446 +       struct NTFS_DE *e;
84447 +       NTFS_CMP_FUNC cmp = indx->cmp;
84448 +       u32 e_size, e_key_len;
84449 +       u32 end = le32_to_cpu(hdr->used);
84450 +       u32 off = le32_to_cpu(hdr->de_off);
84452 +#ifdef NTFS3_INDEX_BINARY_SEARCH
84453 +       int max_idx = 0, fnd, min_idx;
84454 +       int nslots = 64;
84455 +       u16 *offs;
84457 +       if (end > 0x10000)
84458 +               goto next;
84460 +       offs = ntfs_malloc(sizeof(u16) * nslots);
84461 +       if (!offs)
84462 +               goto next;
84464 +       /* use binary search algorithm */
84465 +next1:
84466 +       if (off + sizeof(struct NTFS_DE) > end) {
84467 +               e = NULL;
84468 +               goto out1;
84469 +       }
84470 +       e = Add2Ptr(hdr, off);
84471 +       e_size = le16_to_cpu(e->size);
84473 +       if (e_size < sizeof(struct NTFS_DE) || off + e_size > end) {
84474 +               e = NULL;
84475 +               goto out1;
84476 +       }
84478 +       if (max_idx >= nslots) {
84479 +               u16 *ptr;
84480 +               int new_slots = QuadAlign(2 * nslots);
84482 +               ptr = ntfs_malloc(sizeof(u16) * new_slots);
84483 +               if (ptr)
84484 +                       memcpy(ptr, offs, sizeof(u16) * max_idx);
84485 +               ntfs_free(offs);
84486 +               offs = ptr;
84487 +               nslots = new_slots;
84488 +               if (!ptr)
84489 +                       goto next;
84490 +       }
84492 +       /* Store entry table */
84493 +       offs[max_idx] = off;
84495 +       if (!de_is_last(e)) {
84496 +               off += e_size;
84497 +               max_idx += 1;
84498 +               goto next1;
84499 +       }
84501 +       /*
84502 +        * Table of pointers is created
84503 +        * Use binary search to find entry that is <= to the search value
84504 +        */
84505 +       fnd = -1;
84506 +       min_idx = 0;
84508 +       while (min_idx <= max_idx) {
84509 +               int mid_idx = min_idx + ((max_idx - min_idx) >> 1);
84510 +               int diff2;
84512 +               e = Add2Ptr(hdr, offs[mid_idx]);
84514 +               e_key_len = le16_to_cpu(e->key_size);
84516 +               diff2 = (*cmp)(key, key_len, e + 1, e_key_len, ctx);
84518 +               if (!diff2) {
84519 +                       *diff = 0;
84520 +                       goto out1;
84521 +               }
84523 +               if (diff2 < 0) {
84524 +                       max_idx = mid_idx - 1;
84525 +                       fnd = mid_idx;
84526 +                       if (!fnd)
84527 +                               break;
84528 +               } else {
84529 +                       min_idx = mid_idx + 1;
84530 +               }
84531 +       }
84533 +       if (fnd == -1) {
84534 +               e = NULL;
84535 +               goto out1;
84536 +       }
84538 +       *diff = -1;
84539 +       e = Add2Ptr(hdr, offs[fnd]);
84541 +out1:
84542 +       ntfs_free(offs);
84544 +       return e;
84545 +#endif
84547 +next:
84548 +       /*
84549 +        * Entries index are sorted
84550 +        * Enumerate all entries until we find entry that is <= to the search value
84551 +        */
84552 +       if (off + sizeof(struct NTFS_DE) > end)
84553 +               return NULL;
84555 +       e = Add2Ptr(hdr, off);
84556 +       e_size = le16_to_cpu(e->size);
84558 +       if (e_size < sizeof(struct NTFS_DE) || off + e_size > end)
84559 +               return NULL;
84561 +       off += e_size;
84563 +       e_key_len = le16_to_cpu(e->key_size);
84565 +       *diff = (*cmp)(key, key_len, e + 1, e_key_len, ctx);
84566 +       if (!*diff)
84567 +               return e;
84569 +       if (*diff <= 0)
84570 +               return e;
84572 +       if (de_is_last(e)) {
84573 +               *diff = 1;
84574 +               return e;
84575 +       }
84576 +       goto next;
84580 + * hdr_insert_de
84581 + *
84582 + * inserts an index entry into the buffer.
84583 + * 'before' should be a pointer previously returned from hdr_find_e
84584 + */
84585 +static struct NTFS_DE *hdr_insert_de(const struct ntfs_index *indx,
84586 +                                    struct INDEX_HDR *hdr,
84587 +                                    const struct NTFS_DE *de,
84588 +                                    struct NTFS_DE *before, const void *ctx)
84590 +       int diff;
84591 +       size_t off = PtrOffset(hdr, before);
84592 +       u32 used = le32_to_cpu(hdr->used);
84593 +       u32 total = le32_to_cpu(hdr->total);
84594 +       u16 de_size = le16_to_cpu(de->size);
84596 +       /* First, check to see if there's enough room */
84597 +       if (used + de_size > total)
84598 +               return NULL;
84600 +       /* We know there's enough space, so we know we'll succeed. */
84601 +       if (before) {
84602 +               /* Check that before is inside Index */
84603 +               if (off >= used || off < le32_to_cpu(hdr->de_off) ||
84604 +                   off + le16_to_cpu(before->size) > total) {
84605 +                       return NULL;
84606 +               }
84607 +               goto ok;
84608 +       }
84609 +       /* No insert point is applied. Get it manually */
84610 +       before = hdr_find_e(indx, hdr, de + 1, le16_to_cpu(de->key_size), ctx,
84611 +                           &diff);
84612 +       if (!before)
84613 +               return NULL;
84614 +       off = PtrOffset(hdr, before);
84616 +ok:
84617 +       /* Now we just make room for the entry and jam it in. */
84618 +       memmove(Add2Ptr(before, de_size), before, used - off);
84620 +       hdr->used = cpu_to_le32(used + de_size);
84621 +       memcpy(before, de, de_size);
84623 +       return before;
84627 + * hdr_delete_de
84628 + *
84629 + * removes an entry from the index buffer
84630 + */
84631 +static inline struct NTFS_DE *hdr_delete_de(struct INDEX_HDR *hdr,
84632 +                                           struct NTFS_DE *re)
84634 +       u32 used = le32_to_cpu(hdr->used);
84635 +       u16 esize = le16_to_cpu(re->size);
84636 +       u32 off = PtrOffset(hdr, re);
84637 +       int bytes = used - (off + esize);
84639 +       if (off >= used || esize < sizeof(struct NTFS_DE) ||
84640 +           bytes < sizeof(struct NTFS_DE))
84641 +               return NULL;
84643 +       hdr->used = cpu_to_le32(used - esize);
84644 +       memmove(re, Add2Ptr(re, esize), bytes);
84646 +       return re;
84649 +void indx_clear(struct ntfs_index *indx)
84651 +       run_close(&indx->alloc_run);
84652 +       run_close(&indx->bitmap_run);
84655 +int indx_init(struct ntfs_index *indx, struct ntfs_sb_info *sbi,
84656 +             const struct ATTRIB *attr, enum index_mutex_classed type)
84658 +       u32 t32;
84659 +       const struct INDEX_ROOT *root = resident_data(attr);
84661 +       /* Check root fields */
84662 +       if (!root->index_block_clst)
84663 +               return -EINVAL;
84665 +       indx->type = type;
84666 +       indx->idx2vbn_bits = __ffs(root->index_block_clst);
84668 +       t32 = le32_to_cpu(root->index_block_size);
84669 +       indx->index_bits = blksize_bits(t32);
84671 +       /* Check index record size */
84672 +       if (t32 < sbi->cluster_size) {
84673 +               /* index record is smaller than a cluster, use 512 blocks */
84674 +               if (t32 != root->index_block_clst * SECTOR_SIZE)
84675 +                       return -EINVAL;
84677 +               /* Check alignment to a cluster */
84678 +               if ((sbi->cluster_size >> SECTOR_SHIFT) &
84679 +                   (root->index_block_clst - 1)) {
84680 +                       return -EINVAL;
84681 +               }
84683 +               indx->vbn2vbo_bits = SECTOR_SHIFT;
84684 +       } else {
84685 +               /* index record must be a multiple of cluster size */
84686 +               if (t32 != root->index_block_clst << sbi->cluster_bits)
84687 +                       return -EINVAL;
84689 +               indx->vbn2vbo_bits = sbi->cluster_bits;
84690 +       }
84692 +       init_rwsem(&indx->run_lock);
84694 +       indx->cmp = get_cmp_func(root);
84695 +       return indx->cmp ? 0 : -EINVAL;
84698 +static struct indx_node *indx_new(struct ntfs_index *indx,
84699 +                                 struct ntfs_inode *ni, CLST vbn,
84700 +                                 const __le64 *sub_vbn)
84702 +       int err;
84703 +       struct NTFS_DE *e;
84704 +       struct indx_node *r;
84705 +       struct INDEX_HDR *hdr;
84706 +       struct INDEX_BUFFER *index;
84707 +       u64 vbo = (u64)vbn << indx->vbn2vbo_bits;
84708 +       u32 bytes = 1u << indx->index_bits;
84709 +       u16 fn;
84710 +       u32 eo;
84712 +       r = ntfs_zalloc(sizeof(struct indx_node));
84713 +       if (!r)
84714 +               return ERR_PTR(-ENOMEM);
84716 +       index = ntfs_zalloc(bytes);
84717 +       if (!index) {
84718 +               ntfs_free(r);
84719 +               return ERR_PTR(-ENOMEM);
84720 +       }
84722 +       err = ntfs_get_bh(ni->mi.sbi, &indx->alloc_run, vbo, bytes, &r->nb);
84724 +       if (err) {
84725 +               ntfs_free(index);
84726 +               ntfs_free(r);
84727 +               return ERR_PTR(err);
84728 +       }
84730 +       /* Create header */
84731 +       index->rhdr.sign = NTFS_INDX_SIGNATURE;
84732 +       index->rhdr.fix_off = cpu_to_le16(sizeof(struct INDEX_BUFFER)); // 0x28
84733 +       fn = (bytes >> SECTOR_SHIFT) + 1; // 9
84734 +       index->rhdr.fix_num = cpu_to_le16(fn);
84735 +       index->vbn = cpu_to_le64(vbn);
84736 +       hdr = &index->ihdr;
84737 +       eo = QuadAlign(sizeof(struct INDEX_BUFFER) + fn * sizeof(short));
84738 +       hdr->de_off = cpu_to_le32(eo);
84740 +       e = Add2Ptr(hdr, eo);
84742 +       if (sub_vbn) {
84743 +               e->flags = NTFS_IE_LAST | NTFS_IE_HAS_SUBNODES;
84744 +               e->size = cpu_to_le16(sizeof(struct NTFS_DE) + sizeof(u64));
84745 +               hdr->used =
84746 +                       cpu_to_le32(eo + sizeof(struct NTFS_DE) + sizeof(u64));
84747 +               de_set_vbn_le(e, *sub_vbn);
84748 +               hdr->flags = 1;
84749 +       } else {
84750 +               e->size = cpu_to_le16(sizeof(struct NTFS_DE));
84751 +               hdr->used = cpu_to_le32(eo + sizeof(struct NTFS_DE));
84752 +               e->flags = NTFS_IE_LAST;
84753 +       }
84755 +       hdr->total = cpu_to_le32(bytes - offsetof(struct INDEX_BUFFER, ihdr));
84757 +       r->index = index;
84758 +       return r;
84761 +struct INDEX_ROOT *indx_get_root(struct ntfs_index *indx, struct ntfs_inode *ni,
84762 +                                struct ATTRIB **attr, struct mft_inode **mi)
84764 +       struct ATTR_LIST_ENTRY *le = NULL;
84765 +       struct ATTRIB *a;
84766 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
84768 +       a = ni_find_attr(ni, NULL, &le, ATTR_ROOT, in->name, in->name_len, NULL,
84769 +                        mi);
84770 +       if (!a)
84771 +               return NULL;
84773 +       if (attr)
84774 +               *attr = a;
84776 +       return resident_data_ex(a, sizeof(struct INDEX_ROOT));
84779 +static int indx_write(struct ntfs_index *indx, struct ntfs_inode *ni,
84780 +                     struct indx_node *node, int sync)
84782 +       struct INDEX_BUFFER *ib = node->index;
84784 +       return ntfs_write_bh(ni->mi.sbi, &ib->rhdr, &node->nb, sync);
84788 + * if ntfs_readdir calls this function
84789 + * inode is shared locked and no ni_lock
84790 + * use rw_semaphore for read/write access to alloc_run
84791 + */
84792 +int indx_read(struct ntfs_index *indx, struct ntfs_inode *ni, CLST vbn,
84793 +             struct indx_node **node)
84795 +       int err;
84796 +       struct INDEX_BUFFER *ib;
84797 +       struct runs_tree *run = &indx->alloc_run;
84798 +       struct rw_semaphore *lock = &indx->run_lock;
84799 +       u64 vbo = (u64)vbn << indx->vbn2vbo_bits;
84800 +       u32 bytes = 1u << indx->index_bits;
84801 +       struct indx_node *in = *node;
84802 +       const struct INDEX_NAMES *name;
84804 +       if (!in) {
84805 +               in = ntfs_zalloc(sizeof(struct indx_node));
84806 +               if (!in)
84807 +                       return -ENOMEM;
84808 +       } else {
84809 +               nb_put(&in->nb);
84810 +       }
84812 +       ib = in->index;
84813 +       if (!ib) {
84814 +               ib = ntfs_malloc(bytes);
84815 +               if (!ib) {
84816 +                       err = -ENOMEM;
84817 +                       goto out;
84818 +               }
84819 +       }
84821 +       down_read(lock);
84822 +       err = ntfs_read_bh(ni->mi.sbi, run, vbo, &ib->rhdr, bytes, &in->nb);
84823 +       up_read(lock);
84824 +       if (!err)
84825 +               goto ok;
84827 +       if (err == -E_NTFS_FIXUP)
84828 +               goto ok;
84830 +       if (err != -ENOENT)
84831 +               goto out;
84833 +       name = &s_index_names[indx->type];
84834 +       down_write(lock);
84835 +       err = attr_load_runs_range(ni, ATTR_ALLOC, name->name, name->name_len,
84836 +                                  run, vbo, vbo + bytes);
84837 +       up_write(lock);
84838 +       if (err)
84839 +               goto out;
84841 +       down_read(lock);
84842 +       err = ntfs_read_bh(ni->mi.sbi, run, vbo, &ib->rhdr, bytes, &in->nb);
84843 +       up_read(lock);
84844 +       if (err == -E_NTFS_FIXUP)
84845 +               goto ok;
84847 +       if (err)
84848 +               goto out;
84850 +ok:
84851 +       if (err == -E_NTFS_FIXUP) {
84852 +               ntfs_write_bh(ni->mi.sbi, &ib->rhdr, &in->nb, 0);
84853 +               err = 0;
84854 +       }
84856 +       in->index = ib;
84857 +       *node = in;
84859 +out:
84860 +       if (ib != in->index)
84861 +               ntfs_free(ib);
84863 +       if (*node != in) {
84864 +               nb_put(&in->nb);
84865 +               ntfs_free(in);
84866 +       }
84868 +       return err;
84872 + * indx_find
84873 + *
84874 + * scans NTFS directory for given entry
84875 + */
84876 +int indx_find(struct ntfs_index *indx, struct ntfs_inode *ni,
84877 +             const struct INDEX_ROOT *root, const void *key, size_t key_len,
84878 +             const void *ctx, int *diff, struct NTFS_DE **entry,
84879 +             struct ntfs_fnd *fnd)
84881 +       int err;
84882 +       struct NTFS_DE *e;
84883 +       const struct INDEX_HDR *hdr;
84884 +       struct indx_node *node;
84886 +       if (!root)
84887 +               root = indx_get_root(&ni->dir, ni, NULL, NULL);
84889 +       if (!root) {
84890 +               err = -EINVAL;
84891 +               goto out;
84892 +       }
84894 +       hdr = &root->ihdr;
84896 +       /* Check cache */
84897 +       e = fnd->level ? fnd->de[fnd->level - 1] : fnd->root_de;
84898 +       if (e && !de_is_last(e) &&
84899 +           !(*indx->cmp)(key, key_len, e + 1, le16_to_cpu(e->key_size), ctx)) {
84900 +               *entry = e;
84901 +               *diff = 0;
84902 +               return 0;
84903 +       }
84905 +       /* Soft finder reset */
84906 +       fnd_clear(fnd);
84908 +       /* Lookup entry that is <= to the search value */
84909 +       e = hdr_find_e(indx, hdr, key, key_len, ctx, diff);
84910 +       if (!e)
84911 +               return -EINVAL;
84913 +       if (fnd)
84914 +               fnd->root_de = e;
84916 +       err = 0;
84918 +       for (;;) {
84919 +               node = NULL;
84920 +               if (*diff >= 0 || !de_has_vcn_ex(e)) {
84921 +                       *entry = e;
84922 +                       goto out;
84923 +               }
84925 +               /* Read next level. */
84926 +               err = indx_read(indx, ni, de_get_vbn(e), &node);
84927 +               if (err)
84928 +                       goto out;
84930 +               /* Lookup entry that is <= to the search value */
84931 +               e = hdr_find_e(indx, &node->index->ihdr, key, key_len, ctx,
84932 +                              diff);
84933 +               if (!e) {
84934 +                       err = -EINVAL;
84935 +                       put_indx_node(node);
84936 +                       goto out;
84937 +               }
84939 +               fnd_push(fnd, node, e);
84940 +       }
84942 +out:
84943 +       return err;
84946 +int indx_find_sort(struct ntfs_index *indx, struct ntfs_inode *ni,
84947 +                  const struct INDEX_ROOT *root, struct NTFS_DE **entry,
84948 +                  struct ntfs_fnd *fnd)
84950 +       int err;
84951 +       struct indx_node *n = NULL;
84952 +       struct NTFS_DE *e;
84953 +       size_t iter = 0;
84954 +       int level = fnd->level;
84956 +       if (!*entry) {
84957 +               /* Start find */
84958 +               e = hdr_first_de(&root->ihdr);
84959 +               if (!e)
84960 +                       return 0;
84961 +               fnd_clear(fnd);
84962 +               fnd->root_de = e;
84963 +       } else if (!level) {
84964 +               if (de_is_last(fnd->root_de)) {
84965 +                       *entry = NULL;
84966 +                       return 0;
84967 +               }
84969 +               e = hdr_next_de(&root->ihdr, fnd->root_de);
84970 +               if (!e)
84971 +                       return -EINVAL;
84972 +               fnd->root_de = e;
84973 +       } else {
84974 +               n = fnd->nodes[level - 1];
84975 +               e = fnd->de[level - 1];
84977 +               if (de_is_last(e))
84978 +                       goto pop_level;
84980 +               e = hdr_next_de(&n->index->ihdr, e);
84981 +               if (!e)
84982 +                       return -EINVAL;
84984 +               fnd->de[level - 1] = e;
84985 +       }
84987 +       /* Just to avoid tree cycle */
84988 +next_iter:
84989 +       if (iter++ >= 1000)
84990 +               return -EINVAL;
84992 +       while (de_has_vcn_ex(e)) {
84993 +               if (le16_to_cpu(e->size) <
84994 +                   sizeof(struct NTFS_DE) + sizeof(u64)) {
84995 +                       if (n) {
84996 +                               fnd_pop(fnd);
84997 +                               ntfs_free(n);
84998 +                       }
84999 +                       return -EINVAL;
85000 +               }
85002 +               /* Read next level */
85003 +               err = indx_read(indx, ni, de_get_vbn(e), &n);
85004 +               if (err)
85005 +                       return err;
85007 +               /* Try next level */
85008 +               e = hdr_first_de(&n->index->ihdr);
85009 +               if (!e) {
85010 +                       ntfs_free(n);
85011 +                       return -EINVAL;
85012 +               }
85014 +               fnd_push(fnd, n, e);
85015 +       }
85017 +       if (le16_to_cpu(e->size) > sizeof(struct NTFS_DE)) {
85018 +               *entry = e;
85019 +               return 0;
85020 +       }
85022 +pop_level:
85023 +       for (;;) {
85024 +               if (!de_is_last(e))
85025 +                       goto next_iter;
85027 +               /* Pop one level */
85028 +               if (n) {
85029 +                       fnd_pop(fnd);
85030 +                       ntfs_free(n);
85031 +               }
85033 +               level = fnd->level;
85035 +               if (level) {
85036 +                       n = fnd->nodes[level - 1];
85037 +                       e = fnd->de[level - 1];
85038 +               } else if (fnd->root_de) {
85039 +                       n = NULL;
85040 +                       e = fnd->root_de;
85041 +                       fnd->root_de = NULL;
85042 +               } else {
85043 +                       *entry = NULL;
85044 +                       return 0;
85045 +               }
85047 +               if (le16_to_cpu(e->size) > sizeof(struct NTFS_DE)) {
85048 +                       *entry = e;
85049 +                       if (!fnd->root_de)
85050 +                               fnd->root_de = e;
85051 +                       return 0;
85052 +               }
85053 +       }
85056 +int indx_find_raw(struct ntfs_index *indx, struct ntfs_inode *ni,
85057 +                 const struct INDEX_ROOT *root, struct NTFS_DE **entry,
85058 +                 size_t *off, struct ntfs_fnd *fnd)
85060 +       int err;
85061 +       struct indx_node *n = NULL;
85062 +       struct NTFS_DE *e = NULL;
85063 +       struct NTFS_DE *e2;
85064 +       size_t bit;
85065 +       CLST next_used_vbn;
85066 +       CLST next_vbn;
85067 +       u32 record_size = ni->mi.sbi->record_size;
85069 +       /* Use non sorted algorithm */
85070 +       if (!*entry) {
85071 +               /* This is the first call */
85072 +               e = hdr_first_de(&root->ihdr);
85073 +               if (!e)
85074 +                       return 0;
85075 +               fnd_clear(fnd);
85076 +               fnd->root_de = e;
85078 +               /* The first call with setup of initial element */
85079 +               if (*off >= record_size) {
85080 +                       next_vbn = (((*off - record_size) >> indx->index_bits))
85081 +                                  << indx->idx2vbn_bits;
85082 +                       /* jump inside cycle 'for'*/
85083 +                       goto next;
85084 +               }
85086 +               /* Start enumeration from root */
85087 +               *off = 0;
85088 +       } else if (!fnd->root_de)
85089 +               return -EINVAL;
85091 +       for (;;) {
85092 +               /* Check if current entry can be used */
85093 +               if (e && le16_to_cpu(e->size) > sizeof(struct NTFS_DE))
85094 +                       goto ok;
85096 +               if (!fnd->level) {
85097 +                       /* Continue to enumerate root */
85098 +                       if (!de_is_last(fnd->root_de)) {
85099 +                               e = hdr_next_de(&root->ihdr, fnd->root_de);
85100 +                               if (!e)
85101 +                                       return -EINVAL;
85102 +                               fnd->root_de = e;
85103 +                               continue;
85104 +                       }
85106 +                       /* Start to enumerate indexes from 0 */
85107 +                       next_vbn = 0;
85108 +               } else {
85109 +                       /* Continue to enumerate indexes */
85110 +                       e2 = fnd->de[fnd->level - 1];
85112 +                       n = fnd->nodes[fnd->level - 1];
85114 +                       if (!de_is_last(e2)) {
85115 +                               e = hdr_next_de(&n->index->ihdr, e2);
85116 +                               if (!e)
85117 +                                       return -EINVAL;
85118 +                               fnd->de[fnd->level - 1] = e;
85119 +                               continue;
85120 +                       }
85122 +                       /* Continue with next index */
85123 +                       next_vbn = le64_to_cpu(n->index->vbn) +
85124 +                                  root->index_block_clst;
85125 +               }
85127 +next:
85128 +               /* Release current index */
85129 +               if (n) {
85130 +                       fnd_pop(fnd);
85131 +                       put_indx_node(n);
85132 +                       n = NULL;
85133 +               }
85135 +               /* Skip all free indexes */
85136 +               bit = next_vbn >> indx->idx2vbn_bits;
85137 +               err = indx_used_bit(indx, ni, &bit);
85138 +               if (err == -ENOENT || bit == MINUS_ONE_T) {
85139 +                       /* No used indexes */
85140 +                       *entry = NULL;
85141 +                       return 0;
85142 +               }
85144 +               next_used_vbn = bit << indx->idx2vbn_bits;
85146 +               /* Read buffer into memory */
85147 +               err = indx_read(indx, ni, next_used_vbn, &n);
85148 +               if (err)
85149 +                       return err;
85151 +               e = hdr_first_de(&n->index->ihdr);
85152 +               fnd_push(fnd, n, e);
85153 +               if (!e)
85154 +                       return -EINVAL;
85155 +       }
85157 +ok:
85158 +       /* return offset to restore enumerator if necessary */
85159 +       if (!n) {
85160 +               /* 'e' points in root */
85161 +               *off = PtrOffset(&root->ihdr, e);
85162 +       } else {
85163 +               /* 'e' points in index */
85164 +               *off = (le64_to_cpu(n->index->vbn) << indx->vbn2vbo_bits) +
85165 +                      record_size + PtrOffset(&n->index->ihdr, e);
85166 +       }
85168 +       *entry = e;
85169 +       return 0;
85173 + * indx_create_allocate
85174 + *
85175 + * create "Allocation + Bitmap" attributes
85176 + */
85177 +static int indx_create_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
85178 +                               CLST *vbn)
85180 +       int err = -ENOMEM;
85181 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
85182 +       struct ATTRIB *bitmap;
85183 +       struct ATTRIB *alloc;
85184 +       u32 data_size = 1u << indx->index_bits;
85185 +       u32 alloc_size = ntfs_up_cluster(sbi, data_size);
85186 +       CLST len = alloc_size >> sbi->cluster_bits;
85187 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
85188 +       CLST alen;
85189 +       struct runs_tree run;
85191 +       run_init(&run);
85193 +       err = attr_allocate_clusters(sbi, &run, 0, 0, len, NULL, 0, &alen, 0,
85194 +                                    NULL);
85195 +       if (err)
85196 +               goto out;
85198 +       err = ni_insert_nonresident(ni, ATTR_ALLOC, in->name, in->name_len,
85199 +                                   &run, 0, len, 0, &alloc, NULL);
85200 +       if (err)
85201 +               goto out1;
85203 +       alloc->nres.valid_size = alloc->nres.data_size = cpu_to_le64(data_size);
85205 +       err = ni_insert_resident(ni, bitmap_size(1), ATTR_BITMAP, in->name,
85206 +                                in->name_len, &bitmap, NULL);
85207 +       if (err)
85208 +               goto out2;
85210 +       if (in->name == I30_NAME) {
85211 +               ni->vfs_inode.i_size = data_size;
85212 +               inode_set_bytes(&ni->vfs_inode, alloc_size);
85213 +       }
85215 +       memcpy(&indx->alloc_run, &run, sizeof(run));
85217 +       *vbn = 0;
85219 +       return 0;
85221 +out2:
85222 +       mi_remove_attr(&ni->mi, alloc);
85224 +out1:
85225 +       run_deallocate(sbi, &run, false);
85227 +out:
85228 +       return err;
85232 + * indx_add_allocate
85233 + *
85234 + * add clusters to index
85235 + */
85236 +static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
85237 +                            CLST *vbn)
85239 +       int err;
85240 +       size_t bit;
85241 +       u64 data_size;
85242 +       u64 bmp_size, bmp_size_v;
85243 +       struct ATTRIB *bmp, *alloc;
85244 +       struct mft_inode *mi;
85245 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
85247 +       err = indx_find_free(indx, ni, &bit, &bmp);
85248 +       if (err)
85249 +               goto out1;
85251 +       if (bit != MINUS_ONE_T) {
85252 +               bmp = NULL;
85253 +       } else {
85254 +               if (bmp->non_res) {
85255 +                       bmp_size = le64_to_cpu(bmp->nres.data_size);
85256 +                       bmp_size_v = le64_to_cpu(bmp->nres.valid_size);
85257 +               } else {
85258 +                       bmp_size = bmp_size_v = le32_to_cpu(bmp->res.data_size);
85259 +               }
85261 +               bit = bmp_size << 3;
85262 +       }
85264 +       data_size = (u64)(bit + 1) << indx->index_bits;
85266 +       if (bmp) {
85267 +               /* Increase bitmap */
85268 +               err = attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len,
85269 +                                   &indx->bitmap_run, bitmap_size(bit + 1),
85270 +                                   NULL, true, NULL);
85271 +               if (err)
85272 +                       goto out1;
85273 +       }
85275 +       alloc = ni_find_attr(ni, NULL, NULL, ATTR_ALLOC, in->name, in->name_len,
85276 +                            NULL, &mi);
85277 +       if (!alloc) {
85278 +               if (bmp)
85279 +                       goto out2;
85280 +               goto out1;
85281 +       }
85283 +       /* Increase allocation */
85284 +       err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len,
85285 +                           &indx->alloc_run, data_size, &data_size, true,
85286 +                           NULL);
85287 +       if (err) {
85288 +               if (bmp)
85289 +                       goto out2;
85290 +               goto out1;
85291 +       }
85293 +       *vbn = bit << indx->idx2vbn_bits;
85295 +       return 0;
85297 +out2:
85298 +       /* Ops (no space?) */
85299 +       attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len,
85300 +                     &indx->bitmap_run, bmp_size, &bmp_size_v, false, NULL);
85302 +out1:
85303 +       return err;
85307 + * indx_insert_into_root
85308 + *
85309 + * attempts to insert an entry into the index root
85310 + * If necessary, it will twiddle the index b-tree.
85311 + */
85312 +static int indx_insert_into_root(struct ntfs_index *indx, struct ntfs_inode *ni,
85313 +                                const struct NTFS_DE *new_de,
85314 +                                struct NTFS_DE *root_de, const void *ctx,
85315 +                                struct ntfs_fnd *fnd)
85317 +       int err = 0;
85318 +       struct NTFS_DE *e, *e0, *re;
85319 +       struct mft_inode *mi;
85320 +       struct ATTRIB *attr;
85321 +       struct MFT_REC *rec;
85322 +       struct INDEX_HDR *hdr;
85323 +       struct indx_node *n;
85324 +       CLST new_vbn;
85325 +       __le64 *sub_vbn, t_vbn;
85326 +       u16 new_de_size;
85327 +       u32 hdr_used, hdr_total, asize, used, to_move;
85328 +       u32 root_size, new_root_size;
85329 +       struct ntfs_sb_info *sbi;
85330 +       int ds_root;
85331 +       struct INDEX_ROOT *root, *a_root = NULL;
85333 +       /* Get the record this root placed in */
85334 +       root = indx_get_root(indx, ni, &attr, &mi);
85335 +       if (!root)
85336 +               goto out;
85338 +       /*
85339 +        * Try easy case:
85340 +        * hdr_insert_de will succeed if there's room the root for the new entry.
85341 +        */
85342 +       hdr = &root->ihdr;
85343 +       sbi = ni->mi.sbi;
85344 +       rec = mi->mrec;
85345 +       used = le32_to_cpu(rec->used);
85346 +       new_de_size = le16_to_cpu(new_de->size);
85347 +       hdr_used = le32_to_cpu(hdr->used);
85348 +       hdr_total = le32_to_cpu(hdr->total);
85349 +       asize = le32_to_cpu(attr->size);
85350 +       root_size = le32_to_cpu(attr->res.data_size);
85352 +       ds_root = new_de_size + hdr_used - hdr_total;
85354 +       if (used + ds_root < sbi->max_bytes_per_attr) {
85355 +               /* make a room for new elements */
85356 +               mi_resize_attr(mi, attr, ds_root);
85357 +               hdr->total = cpu_to_le32(hdr_total + ds_root);
85358 +               e = hdr_insert_de(indx, hdr, new_de, root_de, ctx);
85359 +               WARN_ON(!e);
85360 +               fnd_clear(fnd);
85361 +               fnd->root_de = e;
85363 +               return 0;
85364 +       }
85366 +       /* Make a copy of root attribute to restore if error */
85367 +       a_root = ntfs_memdup(attr, asize);
85368 +       if (!a_root) {
85369 +               err = -ENOMEM;
85370 +               goto out;
85371 +       }
85373 +       /* copy all the non-end entries from the index root to the new buffer.*/
85374 +       to_move = 0;
85375 +       e0 = hdr_first_de(hdr);
85377 +       /* Calculate the size to copy */
85378 +       for (e = e0;; e = hdr_next_de(hdr, e)) {
85379 +               if (!e) {
85380 +                       err = -EINVAL;
85381 +                       goto out;
85382 +               }
85384 +               if (de_is_last(e))
85385 +                       break;
85386 +               to_move += le16_to_cpu(e->size);
85387 +       }
85389 +       n = NULL;
85390 +       if (!to_move) {
85391 +               re = NULL;
85392 +       } else {
85393 +               re = ntfs_memdup(e0, to_move);
85394 +               if (!re) {
85395 +                       err = -ENOMEM;
85396 +                       goto out;
85397 +               }
85398 +       }
85400 +       sub_vbn = NULL;
85401 +       if (de_has_vcn(e)) {
85402 +               t_vbn = de_get_vbn_le(e);
85403 +               sub_vbn = &t_vbn;
85404 +       }
85406 +       new_root_size = sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE) +
85407 +                       sizeof(u64);
85408 +       ds_root = new_root_size - root_size;
85410 +       if (ds_root > 0 && used + ds_root > sbi->max_bytes_per_attr) {
85411 +               /* make root external */
85412 +               err = -EOPNOTSUPP;
85413 +               goto out;
85414 +       }
85416 +       if (ds_root)
85417 +               mi_resize_attr(mi, attr, ds_root);
85419 +       /* Fill first entry (vcn will be set later) */
85420 +       e = (struct NTFS_DE *)(root + 1);
85421 +       memset(e, 0, sizeof(struct NTFS_DE));
85422 +       e->size = cpu_to_le16(sizeof(struct NTFS_DE) + sizeof(u64));
85423 +       e->flags = NTFS_IE_HAS_SUBNODES | NTFS_IE_LAST;
85425 +       hdr->flags = 1;
85426 +       hdr->used = hdr->total =
85427 +               cpu_to_le32(new_root_size - offsetof(struct INDEX_ROOT, ihdr));
85429 +       fnd->root_de = hdr_first_de(hdr);
85430 +       mi->dirty = true;
85432 +       /* Create alloc and bitmap attributes (if not) */
85433 +       err = run_is_empty(&indx->alloc_run)
85434 +                     ? indx_create_allocate(indx, ni, &new_vbn)
85435 +                     : indx_add_allocate(indx, ni, &new_vbn);
85437 +       /* layout of record may be changed, so rescan root */
85438 +       root = indx_get_root(indx, ni, &attr, &mi);
85439 +       if (!root) {
85440 +               /* bug? */
85441 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
85442 +               err = -EINVAL;
85443 +               goto out1;
85444 +       }
85446 +       if (err) {
85447 +               /* restore root */
85448 +               if (mi_resize_attr(mi, attr, -ds_root))
85449 +                       memcpy(attr, a_root, asize);
85450 +               else {
85451 +                       /* bug? */
85452 +                       ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
85453 +               }
85454 +               goto out1;
85455 +       }
85457 +       e = (struct NTFS_DE *)(root + 1);
85458 +       *(__le64 *)(e + 1) = cpu_to_le64(new_vbn);
85459 +       mi->dirty = true;
85461 +       /* now we can create/format the new buffer and copy the entries into */
85462 +       n = indx_new(indx, ni, new_vbn, sub_vbn);
85463 +       if (IS_ERR(n)) {
85464 +               err = PTR_ERR(n);
85465 +               goto out1;
85466 +       }
85468 +       hdr = &n->index->ihdr;
85469 +       hdr_used = le32_to_cpu(hdr->used);
85470 +       hdr_total = le32_to_cpu(hdr->total);
85472 +       /* Copy root entries into new buffer */
85473 +       hdr_insert_head(hdr, re, to_move);
85475 +       /* Update bitmap attribute */
85476 +       indx_mark_used(indx, ni, new_vbn >> indx->idx2vbn_bits);
85478 +       /* Check if we can insert new entry new index buffer */
85479 +       if (hdr_used + new_de_size > hdr_total) {
85480 +               /*
85481 +                * This occurs if mft record is the same or bigger than index
85482 +                * buffer. Move all root new index and have no space to add
85483 +                * new entry classic case when mft record is 1K and index
85484 +                * buffer 4K the problem should not occurs
85485 +                */
85486 +               ntfs_free(re);
85487 +               indx_write(indx, ni, n, 0);
85489 +               put_indx_node(n);
85490 +               fnd_clear(fnd);
85491 +               err = indx_insert_entry(indx, ni, new_de, ctx, fnd);
85492 +               goto out;
85493 +       }
85495 +       /*
85496 +        * Now root is a parent for new index buffer
85497 +        * Insert NewEntry a new buffer
85498 +        */
85499 +       e = hdr_insert_de(indx, hdr, new_de, NULL, ctx);
85500 +       if (!e) {
85501 +               err = -EINVAL;
85502 +               goto out1;
85503 +       }
85504 +       fnd_push(fnd, n, e);
85506 +       /* Just write updates index into disk */
85507 +       indx_write(indx, ni, n, 0);
85509 +       n = NULL;
85511 +out1:
85512 +       ntfs_free(re);
85513 +       if (n)
85514 +               put_indx_node(n);
85516 +out:
85517 +       ntfs_free(a_root);
85518 +       return err;
85522 + * indx_insert_into_buffer
85523 + *
85524 + * attempts to insert an entry into an Index Allocation Buffer.
85525 + * If necessary, it will split the buffer.
85526 + */
85527 +static int
85528 +indx_insert_into_buffer(struct ntfs_index *indx, struct ntfs_inode *ni,
85529 +                       struct INDEX_ROOT *root, const struct NTFS_DE *new_de,
85530 +                       const void *ctx, int level, struct ntfs_fnd *fnd)
85532 +       int err;
85533 +       const struct NTFS_DE *sp;
85534 +       struct NTFS_DE *e, *de_t, *up_e = NULL;
85535 +       struct indx_node *n2 = NULL;
85536 +       struct indx_node *n1 = fnd->nodes[level];
85537 +       struct INDEX_HDR *hdr1 = &n1->index->ihdr;
85538 +       struct INDEX_HDR *hdr2;
85539 +       u32 to_copy, used;
85540 +       CLST new_vbn;
85541 +       __le64 t_vbn, *sub_vbn;
85542 +       u16 sp_size;
85544 +       /* Try the most easy case */
85545 +       e = fnd->level - 1 == level ? fnd->de[level] : NULL;
85546 +       e = hdr_insert_de(indx, hdr1, new_de, e, ctx);
85547 +       fnd->de[level] = e;
85548 +       if (e) {
85549 +               /* Just write updated index into disk */
85550 +               indx_write(indx, ni, n1, 0);
85551 +               return 0;
85552 +       }
85554 +       /*
85555 +        * No space to insert into buffer. Split it.
85556 +        * To split we:
85557 +        *  - Save split point ('cause index buffers will be changed)
85558 +        * - Allocate NewBuffer and copy all entries <= sp into new buffer
85559 +        * - Remove all entries (sp including) from TargetBuffer
85560 +        * - Insert NewEntry into left or right buffer (depending on sp <=>
85561 +        *     NewEntry)
85562 +        * - Insert sp into parent buffer (or root)
85563 +        * - Make sp a parent for new buffer
85564 +        */
85565 +       sp = hdr_find_split(hdr1);
85566 +       if (!sp)
85567 +               return -EINVAL;
85569 +       sp_size = le16_to_cpu(sp->size);
85570 +       up_e = ntfs_malloc(sp_size + sizeof(u64));
85571 +       if (!up_e)
85572 +               return -ENOMEM;
85573 +       memcpy(up_e, sp, sp_size);
85575 +       if (!hdr1->flags) {
85576 +               up_e->flags |= NTFS_IE_HAS_SUBNODES;
85577 +               up_e->size = cpu_to_le16(sp_size + sizeof(u64));
85578 +               sub_vbn = NULL;
85579 +       } else {
85580 +               t_vbn = de_get_vbn_le(up_e);
85581 +               sub_vbn = &t_vbn;
85582 +       }
85584 +       /* Allocate on disk a new index allocation buffer. */
85585 +       err = indx_add_allocate(indx, ni, &new_vbn);
85586 +       if (err)
85587 +               goto out;
85589 +       /* Allocate and format memory a new index buffer */
85590 +       n2 = indx_new(indx, ni, new_vbn, sub_vbn);
85591 +       if (IS_ERR(n2)) {
85592 +               err = PTR_ERR(n2);
85593 +               goto out;
85594 +       }
85596 +       hdr2 = &n2->index->ihdr;
85598 +       /* Make sp a parent for new buffer */
85599 +       de_set_vbn(up_e, new_vbn);
85601 +       /* copy all the entries <= sp into the new buffer. */
85602 +       de_t = hdr_first_de(hdr1);
85603 +       to_copy = PtrOffset(de_t, sp);
85604 +       hdr_insert_head(hdr2, de_t, to_copy);
85606 +       /* remove all entries (sp including) from hdr1 */
85607 +       used = le32_to_cpu(hdr1->used) - to_copy - sp_size;
85608 +       memmove(de_t, Add2Ptr(sp, sp_size), used - le32_to_cpu(hdr1->de_off));
85609 +       hdr1->used = cpu_to_le32(used);
85611 +       /* Insert new entry into left or right buffer (depending on sp <=> new_de) */
85612 +       hdr_insert_de(indx,
85613 +                     (*indx->cmp)(new_de + 1, le16_to_cpu(new_de->key_size),
85614 +                                  up_e + 1, le16_to_cpu(up_e->key_size),
85615 +                                  ctx) < 0
85616 +                             ? hdr2
85617 +                             : hdr1,
85618 +                     new_de, NULL, ctx);
85620 +       indx_mark_used(indx, ni, new_vbn >> indx->idx2vbn_bits);
85622 +       indx_write(indx, ni, n1, 0);
85623 +       indx_write(indx, ni, n2, 0);
85625 +       put_indx_node(n2);
85627 +       /*
85628 +        * we've finished splitting everybody, so we are ready to
85629 +        * insert the promoted entry into the parent.
85630 +        */
85631 +       if (!level) {
85632 +               /* Insert in root */
85633 +               err = indx_insert_into_root(indx, ni, up_e, NULL, ctx, fnd);
85634 +               if (err)
85635 +                       goto out;
85636 +       } else {
85637 +               /*
85638 +                * The target buffer's parent is another index buffer
85639 +                * TODO: Remove recursion
85640 +                */
85641 +               err = indx_insert_into_buffer(indx, ni, root, up_e, ctx,
85642 +                                             level - 1, fnd);
85643 +               if (err)
85644 +                       goto out;
85645 +       }
85647 +out:
85648 +       ntfs_free(up_e);
85650 +       return err;
85654 + * indx_insert_entry
85655 + *
85656 + * inserts new entry into index
85657 + */
85658 +int indx_insert_entry(struct ntfs_index *indx, struct ntfs_inode *ni,
85659 +                     const struct NTFS_DE *new_de, const void *ctx,
85660 +                     struct ntfs_fnd *fnd)
85662 +       int err;
85663 +       int diff;
85664 +       struct NTFS_DE *e;
85665 +       struct ntfs_fnd *fnd_a = NULL;
85666 +       struct INDEX_ROOT *root;
85668 +       if (!fnd) {
85669 +               fnd_a = fnd_get();
85670 +               if (!fnd_a) {
85671 +                       err = -ENOMEM;
85672 +                       goto out1;
85673 +               }
85674 +               fnd = fnd_a;
85675 +       }
85677 +       root = indx_get_root(indx, ni, NULL, NULL);
85678 +       if (!root) {
85679 +               err = -EINVAL;
85680 +               goto out;
85681 +       }
85683 +       if (fnd_is_empty(fnd)) {
85684 +               /* Find the spot the tree where we want to insert the new entry. */
85685 +               err = indx_find(indx, ni, root, new_de + 1,
85686 +                               le16_to_cpu(new_de->key_size), ctx, &diff, &e,
85687 +                               fnd);
85688 +               if (err)
85689 +                       goto out;
85691 +               if (!diff) {
85692 +                       err = -EEXIST;
85693 +                       goto out;
85694 +               }
85695 +       }
85697 +       if (!fnd->level) {
85698 +               /* The root is also a leaf, so we'll insert the new entry into it. */
85699 +               err = indx_insert_into_root(indx, ni, new_de, fnd->root_de, ctx,
85700 +                                           fnd);
85701 +               if (err)
85702 +                       goto out;
85703 +       } else {
85704 +               /* found a leaf buffer, so we'll insert the new entry into it.*/
85705 +               err = indx_insert_into_buffer(indx, ni, root, new_de, ctx,
85706 +                                             fnd->level - 1, fnd);
85707 +               if (err)
85708 +                       goto out;
85709 +       }
85711 +out:
85712 +       fnd_put(fnd_a);
85713 +out1:
85714 +       return err;
85718 + * indx_find_buffer
85719 + *
85720 + * locates a buffer the tree.
85721 + */
85722 +static struct indx_node *indx_find_buffer(struct ntfs_index *indx,
85723 +                                         struct ntfs_inode *ni,
85724 +                                         const struct INDEX_ROOT *root,
85725 +                                         __le64 vbn, struct indx_node *n)
85727 +       int err;
85728 +       const struct NTFS_DE *e;
85729 +       struct indx_node *r;
85730 +       const struct INDEX_HDR *hdr = n ? &n->index->ihdr : &root->ihdr;
85732 +       /* Step 1: Scan one level */
85733 +       for (e = hdr_first_de(hdr);; e = hdr_next_de(hdr, e)) {
85734 +               if (!e)
85735 +                       return ERR_PTR(-EINVAL);
85737 +               if (de_has_vcn(e) && vbn == de_get_vbn_le(e))
85738 +                       return n;
85740 +               if (de_is_last(e))
85741 +                       break;
85742 +       }
85744 +       /* Step2: Do recursion */
85745 +       e = Add2Ptr(hdr, le32_to_cpu(hdr->de_off));
85746 +       for (;;) {
85747 +               if (de_has_vcn_ex(e)) {
85748 +                       err = indx_read(indx, ni, de_get_vbn(e), &n);
85749 +                       if (err)
85750 +                               return ERR_PTR(err);
85752 +                       r = indx_find_buffer(indx, ni, root, vbn, n);
85753 +                       if (r)
85754 +                               return r;
85755 +               }
85757 +               if (de_is_last(e))
85758 +                       break;
85760 +               e = Add2Ptr(e, le16_to_cpu(e->size));
85761 +       }
85763 +       return NULL;
85767 + * indx_shrink
85768 + *
85769 + * deallocates unused tail indexes
85770 + */
85771 +static int indx_shrink(struct ntfs_index *indx, struct ntfs_inode *ni,
85772 +                      size_t bit)
85774 +       int err = 0;
85775 +       u64 bpb, new_data;
85776 +       size_t nbits;
85777 +       struct ATTRIB *b;
85778 +       struct ATTR_LIST_ENTRY *le = NULL;
85779 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
85781 +       b = ni_find_attr(ni, NULL, &le, ATTR_BITMAP, in->name, in->name_len,
85782 +                        NULL, NULL);
85784 +       if (!b)
85785 +               return -ENOENT;
85787 +       if (!b->non_res) {
85788 +               unsigned long pos;
85789 +               const unsigned long *bm = resident_data(b);
85791 +               nbits = le32_to_cpu(b->res.data_size) * 8;
85793 +               if (bit >= nbits)
85794 +                       return 0;
85796 +               pos = find_next_bit(bm, nbits, bit);
85797 +               if (pos < nbits)
85798 +                       return 0;
85799 +       } else {
85800 +               size_t used = MINUS_ONE_T;
85802 +               nbits = le64_to_cpu(b->nres.data_size) * 8;
85804 +               if (bit >= nbits)
85805 +                       return 0;
85807 +               err = scan_nres_bitmap(ni, b, indx, bit, &scan_for_used, &used);
85808 +               if (err)
85809 +                       return err;
85811 +               if (used != MINUS_ONE_T)
85812 +                       return 0;
85813 +       }
85815 +       new_data = (u64)bit << indx->index_bits;
85817 +       err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len,
85818 +                           &indx->alloc_run, new_data, &new_data, false, NULL);
85819 +       if (err)
85820 +               return err;
85822 +       bpb = bitmap_size(bit);
85823 +       if (bpb * 8 == nbits)
85824 +               return 0;
85826 +       err = attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len,
85827 +                           &indx->bitmap_run, bpb, &bpb, false, NULL);
85829 +       return err;
85832 +static int indx_free_children(struct ntfs_index *indx, struct ntfs_inode *ni,
85833 +                             const struct NTFS_DE *e, bool trim)
85835 +       int err;
85836 +       struct indx_node *n;
85837 +       struct INDEX_HDR *hdr;
85838 +       CLST vbn = de_get_vbn(e);
85839 +       size_t i;
85841 +       err = indx_read(indx, ni, vbn, &n);
85842 +       if (err)
85843 +               return err;
85845 +       hdr = &n->index->ihdr;
85846 +       /* First, recurse into the children, if any.*/
85847 +       if (hdr_has_subnode(hdr)) {
85848 +               for (e = hdr_first_de(hdr); e; e = hdr_next_de(hdr, e)) {
85849 +                       indx_free_children(indx, ni, e, false);
85850 +                       if (de_is_last(e))
85851 +                               break;
85852 +               }
85853 +       }
85855 +       put_indx_node(n);
85857 +       i = vbn >> indx->idx2vbn_bits;
85858 +       /* We've gotten rid of the children; add this buffer to the free list. */
85859 +       indx_mark_free(indx, ni, i);
85861 +       if (!trim)
85862 +               return 0;
85864 +       /*
85865 +        * If there are no used indexes after current free index
85866 +        * then we can truncate allocation and bitmap
85867 +        * Use bitmap to estimate the case
85868 +        */
85869 +       indx_shrink(indx, ni, i + 1);
85870 +       return 0;
85874 + * indx_get_entry_to_replace
85875 + *
85876 + * finds a replacement entry for a deleted entry
85877 + * always returns a node entry:
85878 + * NTFS_IE_HAS_SUBNODES is set the flags and the size includes the sub_vcn
85879 + */
85880 +static int indx_get_entry_to_replace(struct ntfs_index *indx,
85881 +                                    struct ntfs_inode *ni,
85882 +                                    const struct NTFS_DE *de_next,
85883 +                                    struct NTFS_DE **de_to_replace,
85884 +                                    struct ntfs_fnd *fnd)
85886 +       int err;
85887 +       int level = -1;
85888 +       CLST vbn;
85889 +       struct NTFS_DE *e, *te, *re;
85890 +       struct indx_node *n;
85891 +       struct INDEX_BUFFER *ib;
85893 +       *de_to_replace = NULL;
85895 +       /* Find first leaf entry down from de_next */
85896 +       vbn = de_get_vbn(de_next);
85897 +       for (;;) {
85898 +               n = NULL;
85899 +               err = indx_read(indx, ni, vbn, &n);
85900 +               if (err)
85901 +                       goto out;
85903 +               e = hdr_first_de(&n->index->ihdr);
85904 +               fnd_push(fnd, n, e);
85906 +               if (!de_is_last(e)) {
85907 +                       /*
85908 +                        * This buffer is non-empty, so its first entry could be used as the
85909 +                        * replacement entry.
85910 +                        */
85911 +                       level = fnd->level - 1;
85912 +               }
85914 +               if (!de_has_vcn(e))
85915 +                       break;
85917 +               /* This buffer is a node. Continue to go down */
85918 +               vbn = de_get_vbn(e);
85919 +       }
85921 +       if (level == -1)
85922 +               goto out;
85924 +       n = fnd->nodes[level];
85925 +       te = hdr_first_de(&n->index->ihdr);
85926 +       /* Copy the candidate entry into the replacement entry buffer. */
85927 +       re = ntfs_malloc(le16_to_cpu(te->size) + sizeof(u64));
85928 +       if (!re) {
85929 +               err = -ENOMEM;
85930 +               goto out;
85931 +       }
85933 +       *de_to_replace = re;
85934 +       memcpy(re, te, le16_to_cpu(te->size));
85936 +       if (!de_has_vcn(re)) {
85937 +               /*
85938 +                * The replacement entry we found doesn't have a sub_vcn. increase its size
85939 +                * to hold one.
85940 +                */
85941 +               le16_add_cpu(&re->size, sizeof(u64));
85942 +               re->flags |= NTFS_IE_HAS_SUBNODES;
85943 +       } else {
85944 +               /*
85945 +                * The replacement entry we found was a node entry, which means that all
85946 +                * its child buffers are empty. Return them to the free pool.
85947 +                */
85948 +               indx_free_children(indx, ni, te, true);
85949 +       }
85951 +       /*
85952 +        * Expunge the replacement entry from its former location,
85953 +        * and then write that buffer.
85954 +        */
85955 +       ib = n->index;
85956 +       e = hdr_delete_de(&ib->ihdr, te);
85958 +       fnd->de[level] = e;
85959 +       indx_write(indx, ni, n, 0);
85961 +       /* Check to see if this action created an empty leaf. */
85962 +       if (ib_is_leaf(ib) && ib_is_empty(ib))
85963 +               return 0;
85965 +out:
85966 +       fnd_clear(fnd);
85967 +       return err;
85971 + * indx_delete_entry
85972 + *
85973 + * deletes an entry from the index.
85974 + */
85975 +int indx_delete_entry(struct ntfs_index *indx, struct ntfs_inode *ni,
85976 +                     const void *key, u32 key_len, const void *ctx)
85978 +       int err, diff;
85979 +       struct INDEX_ROOT *root;
85980 +       struct INDEX_HDR *hdr;
85981 +       struct ntfs_fnd *fnd, *fnd2;
85982 +       struct INDEX_BUFFER *ib;
85983 +       struct NTFS_DE *e, *re, *next, *prev, *me;
85984 +       struct indx_node *n, *n2d = NULL;
85985 +       __le64 sub_vbn;
85986 +       int level, level2;
85987 +       struct ATTRIB *attr;
85988 +       struct mft_inode *mi;
85989 +       u32 e_size, root_size, new_root_size;
85990 +       size_t trim_bit;
85991 +       const struct INDEX_NAMES *in;
85993 +       fnd = fnd_get();
85994 +       if (!fnd) {
85995 +               err = -ENOMEM;
85996 +               goto out2;
85997 +       }
85999 +       fnd2 = fnd_get();
86000 +       if (!fnd2) {
86001 +               err = -ENOMEM;
86002 +               goto out1;
86003 +       }
86005 +       root = indx_get_root(indx, ni, &attr, &mi);
86006 +       if (!root) {
86007 +               err = -EINVAL;
86008 +               goto out;
86009 +       }
86011 +       /* Locate the entry to remove. */
86012 +       err = indx_find(indx, ni, root, key, key_len, ctx, &diff, &e, fnd);
86013 +       if (err)
86014 +               goto out;
86016 +       if (!e || diff) {
86017 +               err = -ENOENT;
86018 +               goto out;
86019 +       }
86021 +       level = fnd->level;
86023 +       if (level) {
86024 +               n = fnd->nodes[level - 1];
86025 +               e = fnd->de[level - 1];
86026 +               ib = n->index;
86027 +               hdr = &ib->ihdr;
86028 +       } else {
86029 +               hdr = &root->ihdr;
86030 +               e = fnd->root_de;
86031 +               n = NULL;
86032 +       }
86034 +       e_size = le16_to_cpu(e->size);
86036 +       if (!de_has_vcn_ex(e)) {
86037 +               /* The entry to delete is a leaf, so we can just rip it out */
86038 +               hdr_delete_de(hdr, e);
86040 +               if (!level) {
86041 +                       hdr->total = hdr->used;
86043 +                       /* Shrink resident root attribute */
86044 +                       mi_resize_attr(mi, attr, 0 - e_size);
86045 +                       goto out;
86046 +               }
86048 +               indx_write(indx, ni, n, 0);
86050 +               /*
86051 +                * Check to see if removing that entry made
86052 +                * the leaf empty.
86053 +                */
86054 +               if (ib_is_leaf(ib) && ib_is_empty(ib)) {
86055 +                       fnd_pop(fnd);
86056 +                       fnd_push(fnd2, n, e);
86057 +               }
86058 +       } else {
86059 +               /*
86060 +                * The entry we wish to delete is a node buffer, so we
86061 +                * have to find a replacement for it.
86062 +                */
86063 +               next = de_get_next(e);
86065 +               err = indx_get_entry_to_replace(indx, ni, next, &re, fnd2);
86066 +               if (err)
86067 +                       goto out;
86069 +               if (re) {
86070 +                       de_set_vbn_le(re, de_get_vbn_le(e));
86071 +                       hdr_delete_de(hdr, e);
86073 +                       err = level ? indx_insert_into_buffer(indx, ni, root,
86074 +                                                             re, ctx,
86075 +                                                             fnd->level - 1,
86076 +                                                             fnd)
86077 +                                   : indx_insert_into_root(indx, ni, re, e,
86078 +                                                           ctx, fnd);
86079 +                       ntfs_free(re);
86081 +                       if (err)
86082 +                               goto out;
86083 +               } else {
86084 +                       /*
86085 +                        * There is no replacement for the current entry.
86086 +                        * This means that the subtree rooted at its node is empty,
86087 +                        * and can be deleted, which turn means that the node can
86088 +                        * just inherit the deleted entry sub_vcn
86089 +                        */
86090 +                       indx_free_children(indx, ni, next, true);
86092 +                       de_set_vbn_le(next, de_get_vbn_le(e));
86093 +                       hdr_delete_de(hdr, e);
86094 +                       if (level) {
86095 +                               indx_write(indx, ni, n, 0);
86096 +                       } else {
86097 +                               hdr->total = hdr->used;
86099 +                               /* Shrink resident root attribute */
86100 +                               mi_resize_attr(mi, attr, 0 - e_size);
86101 +                       }
86102 +               }
86103 +       }
86105 +       /* Delete a branch of tree */
86106 +       if (!fnd2 || !fnd2->level)
86107 +               goto out;
86109 +       /* Reinit root 'cause it can be changed */
86110 +       root = indx_get_root(indx, ni, &attr, &mi);
86111 +       if (!root) {
86112 +               err = -EINVAL;
86113 +               goto out;
86114 +       }
86116 +       n2d = NULL;
86117 +       sub_vbn = fnd2->nodes[0]->index->vbn;
86118 +       level2 = 0;
86119 +       level = fnd->level;
86121 +       hdr = level ? &fnd->nodes[level - 1]->index->ihdr : &root->ihdr;
86123 +       /* Scan current level */
86124 +       for (e = hdr_first_de(hdr);; e = hdr_next_de(hdr, e)) {
86125 +               if (!e) {
86126 +                       err = -EINVAL;
86127 +                       goto out;
86128 +               }
86130 +               if (de_has_vcn(e) && sub_vbn == de_get_vbn_le(e))
86131 +                       break;
86133 +               if (de_is_last(e)) {
86134 +                       e = NULL;
86135 +                       break;
86136 +               }
86137 +       }
86139 +       if (!e) {
86140 +               /* Do slow search from root */
86141 +               struct indx_node *in;
86143 +               fnd_clear(fnd);
86145 +               in = indx_find_buffer(indx, ni, root, sub_vbn, NULL);
86146 +               if (IS_ERR(in)) {
86147 +                       err = PTR_ERR(in);
86148 +                       goto out;
86149 +               }
86151 +               if (in)
86152 +                       fnd_push(fnd, in, NULL);
86153 +       }
86155 +       /* Merge fnd2 -> fnd */
86156 +       for (level = 0; level < fnd2->level; level++) {
86157 +               fnd_push(fnd, fnd2->nodes[level], fnd2->de[level]);
86158 +               fnd2->nodes[level] = NULL;
86159 +       }
86160 +       fnd2->level = 0;
86162 +       hdr = NULL;
86163 +       for (level = fnd->level; level; level--) {
86164 +               struct indx_node *in = fnd->nodes[level - 1];
86166 +               ib = in->index;
86167 +               if (ib_is_empty(ib)) {
86168 +                       sub_vbn = ib->vbn;
86169 +               } else {
86170 +                       hdr = &ib->ihdr;
86171 +                       n2d = in;
86172 +                       level2 = level;
86173 +                       break;
86174 +               }
86175 +       }
86177 +       if (!hdr)
86178 +               hdr = &root->ihdr;
86180 +       e = hdr_first_de(hdr);
86181 +       if (!e) {
86182 +               err = -EINVAL;
86183 +               goto out;
86184 +       }
86186 +       if (hdr != &root->ihdr || !de_is_last(e)) {
86187 +               prev = NULL;
86188 +               while (!de_is_last(e)) {
86189 +                       if (de_has_vcn(e) && sub_vbn == de_get_vbn_le(e))
86190 +                               break;
86191 +                       prev = e;
86192 +                       e = hdr_next_de(hdr, e);
86193 +                       if (!e) {
86194 +                               err = -EINVAL;
86195 +                               goto out;
86196 +                       }
86197 +               }
86199 +               if (sub_vbn != de_get_vbn_le(e)) {
86200 +                       /*
86201 +                        * Didn't find the parent entry, although this buffer is the parent trail.
86202 +                        * Something is corrupt.
86203 +                        */
86204 +                       err = -EINVAL;
86205 +                       goto out;
86206 +               }
86208 +               if (de_is_last(e)) {
86209 +                       /*
86210 +                        * Since we can't remove the end entry, we'll remove its
86211 +                        * predecessor instead. This means we have to transfer the
86212 +                        * predecessor's sub_vcn to the end entry.
86213 +                        * Note: that this index block is not empty, so the
86214 +                        * predecessor must exist
86215 +                        */
86216 +                       if (!prev) {
86217 +                               err = -EINVAL;
86218 +                               goto out;
86219 +                       }
86221 +                       if (de_has_vcn(prev)) {
86222 +                               de_set_vbn_le(e, de_get_vbn_le(prev));
86223 +                       } else if (de_has_vcn(e)) {
86224 +                               le16_sub_cpu(&e->size, sizeof(u64));
86225 +                               e->flags &= ~NTFS_IE_HAS_SUBNODES;
86226 +                               le32_sub_cpu(&hdr->used, sizeof(u64));
86227 +                       }
86228 +                       e = prev;
86229 +               }
86231 +               /*
86232 +                * Copy the current entry into a temporary buffer (stripping off its
86233 +                * down-pointer, if any) and delete it from the current buffer or root,
86234 +                * as appropriate.
86235 +                */
86236 +               e_size = le16_to_cpu(e->size);
86237 +               me = ntfs_memdup(e, e_size);
86238 +               if (!me) {
86239 +                       err = -ENOMEM;
86240 +                       goto out;
86241 +               }
86243 +               if (de_has_vcn(me)) {
86244 +                       me->flags &= ~NTFS_IE_HAS_SUBNODES;
86245 +                       le16_sub_cpu(&me->size, sizeof(u64));
86246 +               }
86248 +               hdr_delete_de(hdr, e);
86250 +               if (hdr == &root->ihdr) {
86251 +                       level = 0;
86252 +                       hdr->total = hdr->used;
86254 +                       /* Shrink resident root attribute */
86255 +                       mi_resize_attr(mi, attr, 0 - e_size);
86256 +               } else {
86257 +                       indx_write(indx, ni, n2d, 0);
86258 +                       level = level2;
86259 +               }
86261 +               /* Mark unused buffers as free */
86262 +               trim_bit = -1;
86263 +               for (; level < fnd->level; level++) {
86264 +                       ib = fnd->nodes[level]->index;
86265 +                       if (ib_is_empty(ib)) {
86266 +                               size_t k = le64_to_cpu(ib->vbn) >>
86267 +                                          indx->idx2vbn_bits;
86269 +                               indx_mark_free(indx, ni, k);
86270 +                               if (k < trim_bit)
86271 +                                       trim_bit = k;
86272 +                       }
86273 +               }
86275 +               fnd_clear(fnd);
86276 +               /*fnd->root_de = NULL;*/
86278 +               /*
86279 +                * Re-insert the entry into the tree.
86280 +                * Find the spot the tree where we want to insert the new entry.
86281 +                */
86282 +               err = indx_insert_entry(indx, ni, me, ctx, fnd);
86283 +               ntfs_free(me);
86284 +               if (err)
86285 +                       goto out;
86287 +               if (trim_bit != -1)
86288 +                       indx_shrink(indx, ni, trim_bit);
86289 +       } else {
86290 +               /*
86291 +                * This tree needs to be collapsed down to an empty root.
86292 +                * Recreate the index root as an empty leaf and free all the bits the
86293 +                * index allocation bitmap.
86294 +                */
86295 +               fnd_clear(fnd);
86296 +               fnd_clear(fnd2);
86298 +               in = &s_index_names[indx->type];
86300 +               err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len,
86301 +                                   &indx->alloc_run, 0, NULL, false, NULL);
86302 +               err = ni_remove_attr(ni, ATTR_ALLOC, in->name, in->name_len,
86303 +                                    false, NULL);
86304 +               run_close(&indx->alloc_run);
86306 +               err = attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len,
86307 +                                   &indx->bitmap_run, 0, NULL, false, NULL);
86308 +               err = ni_remove_attr(ni, ATTR_BITMAP, in->name, in->name_len,
86309 +                                    false, NULL);
86310 +               run_close(&indx->bitmap_run);
86312 +               root = indx_get_root(indx, ni, &attr, &mi);
86313 +               if (!root) {
86314 +                       err = -EINVAL;
86315 +                       goto out;
86316 +               }
86318 +               root_size = le32_to_cpu(attr->res.data_size);
86319 +               new_root_size =
86320 +                       sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE);
86322 +               if (new_root_size != root_size &&
86323 +                   !mi_resize_attr(mi, attr, new_root_size - root_size)) {
86324 +                       err = -EINVAL;
86325 +                       goto out;
86326 +               }
86328 +               /* Fill first entry */
86329 +               e = (struct NTFS_DE *)(root + 1);
86330 +               e->ref.low = 0;
86331 +               e->ref.high = 0;
86332 +               e->ref.seq = 0;
86333 +               e->size = cpu_to_le16(sizeof(struct NTFS_DE));
86334 +               e->flags = NTFS_IE_LAST; // 0x02
86335 +               e->key_size = 0;
86336 +               e->res = 0;
86338 +               hdr = &root->ihdr;
86339 +               hdr->flags = 0;
86340 +               hdr->used = hdr->total = cpu_to_le32(
86341 +                       new_root_size - offsetof(struct INDEX_ROOT, ihdr));
86342 +               mi->dirty = true;
86343 +       }
86345 +out:
86346 +       fnd_put(fnd2);
86347 +out1:
86348 +       fnd_put(fnd);
86349 +out2:
86350 +       return err;
86353 +int indx_update_dup(struct ntfs_inode *ni, struct ntfs_sb_info *sbi,
86354 +                   const struct ATTR_FILE_NAME *fname,
86355 +                   const struct NTFS_DUP_INFO *dup, int sync)
86357 +       int err, diff;
86358 +       struct NTFS_DE *e = NULL;
86359 +       struct ATTR_FILE_NAME *e_fname;
86360 +       struct ntfs_fnd *fnd;
86361 +       struct INDEX_ROOT *root;
86362 +       struct mft_inode *mi;
86363 +       struct ntfs_index *indx = &ni->dir;
86365 +       fnd = fnd_get();
86366 +       if (!fnd) {
86367 +               err = -ENOMEM;
86368 +               goto out1;
86369 +       }
86371 +       root = indx_get_root(indx, ni, NULL, &mi);
86372 +       if (!root) {
86373 +               err = -EINVAL;
86374 +               goto out;
86375 +       }
86377 +       /* Find entries tree and on disk */
86378 +       err = indx_find(indx, ni, root, fname, fname_full_size(fname), sbi,
86379 +                       &diff, &e, fnd);
86380 +       if (err)
86381 +               goto out;
86383 +       if (!e) {
86384 +               err = -EINVAL;
86385 +               goto out;
86386 +       }
86388 +       if (diff) {
86389 +               err = -EINVAL;
86390 +               goto out;
86391 +       }
86393 +       e_fname = (struct ATTR_FILE_NAME *)(e + 1);
86395 +       if (!memcmp(&e_fname->dup, dup, sizeof(*dup))) {
86396 +               /* nothing to update in index! Try to avoid this call */
86397 +               goto out;
86398 +       }
86400 +       memcpy(&e_fname->dup, dup, sizeof(*dup));
86402 +       if (fnd->level) {
86403 +               err = indx_write(indx, ni, fnd->nodes[fnd->level - 1], sync);
86404 +       } else if (sync) {
86405 +               mi->dirty = true;
86406 +               err = mi_write(mi, 1);
86407 +       } else {
86408 +               mi->dirty = true;
86409 +               mark_inode_dirty(&ni->vfs_inode);
86410 +       }
86412 +out:
86413 +       fnd_put(fnd);
86415 +out1:
86416 +       return err;
86418 diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
86419 new file mode 100644
86420 index 000000000000..9e836c192ddf
86421 --- /dev/null
86422 +++ b/fs/ntfs3/inode.c
86423 @@ -0,0 +1,2033 @@
86424 +// SPDX-License-Identifier: GPL-2.0
86426 + *
86427 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
86428 + *
86429 + */
86431 +#include <linux/blkdev.h>
86432 +#include <linux/buffer_head.h>
86433 +#include <linux/fs.h>
86434 +#include <linux/iversion.h>
86435 +#include <linux/mpage.h>
86436 +#include <linux/namei.h>
86437 +#include <linux/nls.h>
86438 +#include <linux/uio.h>
86439 +#include <linux/version.h>
86440 +#include <linux/writeback.h>
86442 +#include "debug.h"
86443 +#include "ntfs.h"
86444 +#include "ntfs_fs.h"
86447 + * ntfs_read_mft
86448 + *
86449 + * reads record and parses MFT
86450 + */
86451 +static struct inode *ntfs_read_mft(struct inode *inode,
86452 +                                  const struct cpu_str *name,
86453 +                                  const struct MFT_REF *ref)
86455 +       int err = 0;
86456 +       struct ntfs_inode *ni = ntfs_i(inode);
86457 +       struct super_block *sb = inode->i_sb;
86458 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
86459 +       mode_t mode = 0;
86460 +       struct ATTR_STD_INFO5 *std5 = NULL;
86461 +       struct ATTR_LIST_ENTRY *le;
86462 +       struct ATTRIB *attr;
86463 +       bool is_match = false;
86464 +       bool is_root = false;
86465 +       bool is_dir;
86466 +       unsigned long ino = inode->i_ino;
86467 +       u32 rp_fa = 0, asize, t32;
86468 +       u16 roff, rsize, names = 0;
86469 +       const struct ATTR_FILE_NAME *fname = NULL;
86470 +       const struct INDEX_ROOT *root;
86471 +       struct REPARSE_DATA_BUFFER rp; // 0x18 bytes
86472 +       u64 t64;
86473 +       struct MFT_REC *rec;
86474 +       struct runs_tree *run;
86476 +       inode->i_op = NULL;
86478 +       err = mi_init(&ni->mi, sbi, ino);
86479 +       if (err)
86480 +               goto out;
86482 +       if (!sbi->mft.ni && ino == MFT_REC_MFT && !sb->s_root) {
86483 +               t64 = sbi->mft.lbo >> sbi->cluster_bits;
86484 +               t32 = bytes_to_cluster(sbi, MFT_REC_VOL * sbi->record_size);
86485 +               sbi->mft.ni = ni;
86486 +               init_rwsem(&ni->file.run_lock);
86488 +               if (!run_add_entry(&ni->file.run, 0, t64, t32, true)) {
86489 +                       err = -ENOMEM;
86490 +                       goto out;
86491 +               }
86492 +       }
86494 +       err = mi_read(&ni->mi, ino == MFT_REC_MFT);
86496 +       if (err)
86497 +               goto out;
86499 +       rec = ni->mi.mrec;
86501 +       if (sbi->flags & NTFS_FLAGS_LOG_REPLAYING) {
86502 +               ;
86503 +       } else if (ref->seq != rec->seq) {
86504 +               err = -EINVAL;
86505 +               ntfs_err(sb, "MFT: r=%lx, expect seq=%x instead of %x!", ino,
86506 +                        le16_to_cpu(ref->seq), le16_to_cpu(rec->seq));
86507 +               goto out;
86508 +       } else if (!is_rec_inuse(rec)) {
86509 +               err = -EINVAL;
86510 +               ntfs_err(sb, "Inode r=%x is not in use!", (u32)ino);
86511 +               goto out;
86512 +       }
86514 +       if (le32_to_cpu(rec->total) != sbi->record_size) {
86515 +               // bad inode?
86516 +               err = -EINVAL;
86517 +               goto out;
86518 +       }
86520 +       if (!is_rec_base(rec))
86521 +               goto Ok;
86523 +       /* record should contain $I30 root */
86524 +       is_dir = rec->flags & RECORD_FLAG_DIR;
86526 +       inode->i_generation = le16_to_cpu(rec->seq);
86528 +       /* Enumerate all struct Attributes MFT */
86529 +       le = NULL;
86530 +       attr = NULL;
86532 +       /*
86533 +        * to reduce tab pressure use goto instead of
86534 +        * while( (attr = ni_enum_attr_ex(ni, attr, &le, NULL) ))
86535 +        */
86536 +next_attr:
86537 +       run = NULL;
86538 +       err = -EINVAL;
86539 +       attr = ni_enum_attr_ex(ni, attr, &le, NULL);
86540 +       if (!attr)
86541 +               goto end_enum;
86543 +       if (le && le->vcn) {
86544 +               /* This is non primary attribute segment. Ignore if not MFT */
86545 +               if (ino != MFT_REC_MFT || attr->type != ATTR_DATA)
86546 +                       goto next_attr;
86548 +               run = &ni->file.run;
86549 +               asize = le32_to_cpu(attr->size);
86550 +               goto attr_unpack_run;
86551 +       }
86553 +       roff = attr->non_res ? 0 : le16_to_cpu(attr->res.data_off);
86554 +       rsize = attr->non_res ? 0 : le32_to_cpu(attr->res.data_size);
86555 +       asize = le32_to_cpu(attr->size);
86557 +       switch (attr->type) {
86558 +       case ATTR_STD:
86559 +               if (attr->non_res ||
86560 +                   asize < sizeof(struct ATTR_STD_INFO) + roff ||
86561 +                   rsize < sizeof(struct ATTR_STD_INFO))
86562 +                       goto out;
86564 +               if (std5)
86565 +                       goto next_attr;
86567 +               std5 = Add2Ptr(attr, roff);
86569 +#ifdef STATX_BTIME
86570 +               nt2kernel(std5->cr_time, &ni->i_crtime);
86571 +#endif
86572 +               nt2kernel(std5->a_time, &inode->i_atime);
86573 +               nt2kernel(std5->c_time, &inode->i_ctime);
86574 +               nt2kernel(std5->m_time, &inode->i_mtime);
86576 +               ni->std_fa = std5->fa;
86578 +               if (asize >= sizeof(struct ATTR_STD_INFO5) + roff &&
86579 +                   rsize >= sizeof(struct ATTR_STD_INFO5))
86580 +                       ni->std_security_id = std5->security_id;
86581 +               goto next_attr;
86583 +       case ATTR_LIST:
86584 +               if (attr->name_len || le || ino == MFT_REC_LOG)
86585 +                       goto out;
86587 +               err = ntfs_load_attr_list(ni, attr);
86588 +               if (err)
86589 +                       goto out;
86591 +               le = NULL;
86592 +               attr = NULL;
86593 +               goto next_attr;
86595 +       case ATTR_NAME:
86596 +               if (attr->non_res || asize < SIZEOF_ATTRIBUTE_FILENAME + roff ||
86597 +                   rsize < SIZEOF_ATTRIBUTE_FILENAME)
86598 +                       goto out;
86600 +               fname = Add2Ptr(attr, roff);
86601 +               if (fname->type == FILE_NAME_DOS)
86602 +                       goto next_attr;
86604 +               names += 1;
86605 +               if (name && name->len == fname->name_len &&
86606 +                   !ntfs_cmp_names_cpu(name, (struct le_str *)&fname->name_len,
86607 +                                       NULL, false))
86608 +                       is_match = true;
86610 +               goto next_attr;
86612 +       case ATTR_DATA:
86613 +               if (is_dir) {
86614 +                       /* ignore data attribute in dir record */
86615 +                       goto next_attr;
86616 +               }
86618 +               if (ino == MFT_REC_BADCLUST && !attr->non_res)
86619 +                       goto next_attr;
86621 +               if (attr->name_len &&
86622 +                   ((ino != MFT_REC_BADCLUST || !attr->non_res ||
86623 +                     attr->name_len != ARRAY_SIZE(BAD_NAME) ||
86624 +                     memcmp(attr_name(attr), BAD_NAME, sizeof(BAD_NAME))) &&
86625 +                    (ino != MFT_REC_SECURE || !attr->non_res ||
86626 +                     attr->name_len != ARRAY_SIZE(SDS_NAME) ||
86627 +                     memcmp(attr_name(attr), SDS_NAME, sizeof(SDS_NAME))))) {
86628 +                       /* file contains stream attribute. ignore it */
86629 +                       goto next_attr;
86630 +               }
86632 +               if (is_attr_sparsed(attr))
86633 +                       ni->std_fa |= FILE_ATTRIBUTE_SPARSE_FILE;
86634 +               else
86635 +                       ni->std_fa &= ~FILE_ATTRIBUTE_SPARSE_FILE;
86637 +               if (is_attr_compressed(attr))
86638 +                       ni->std_fa |= FILE_ATTRIBUTE_COMPRESSED;
86639 +               else
86640 +                       ni->std_fa &= ~FILE_ATTRIBUTE_COMPRESSED;
86642 +               if (is_attr_encrypted(attr))
86643 +                       ni->std_fa |= FILE_ATTRIBUTE_ENCRYPTED;
86644 +               else
86645 +                       ni->std_fa &= ~FILE_ATTRIBUTE_ENCRYPTED;
86647 +               if (!attr->non_res) {
86648 +                       ni->i_valid = inode->i_size = rsize;
86649 +                       inode_set_bytes(inode, rsize);
86650 +                       t32 = asize;
86651 +               } else {
86652 +                       t32 = le16_to_cpu(attr->nres.run_off);
86653 +               }
86655 +               mode = S_IFREG | (0777 & sbi->options.fs_fmask_inv);
86657 +               if (!attr->non_res) {
86658 +                       ni->ni_flags |= NI_FLAG_RESIDENT;
86659 +                       goto next_attr;
86660 +               }
86662 +               inode_set_bytes(inode, attr_ondisk_size(attr));
86664 +               ni->i_valid = le64_to_cpu(attr->nres.valid_size);
86665 +               inode->i_size = le64_to_cpu(attr->nres.data_size);
86666 +               if (!attr->nres.alloc_size)
86667 +                       goto next_attr;
86669 +               run = ino == MFT_REC_BITMAP ? &sbi->used.bitmap.run
86670 +                                           : &ni->file.run;
86671 +               break;
86673 +       case ATTR_ROOT:
86674 +               if (attr->non_res)
86675 +                       goto out;
86677 +               root = Add2Ptr(attr, roff);
86678 +               is_root = true;
86680 +               if (attr->name_len != ARRAY_SIZE(I30_NAME) ||
86681 +                   memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
86682 +                       goto next_attr;
86684 +               if (root->type != ATTR_NAME ||
86685 +                   root->rule != NTFS_COLLATION_TYPE_FILENAME)
86686 +                       goto out;
86688 +               if (!is_dir)
86689 +                       goto next_attr;
86691 +               ni->ni_flags |= NI_FLAG_DIR;
86693 +               err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
86694 +               if (err)
86695 +                       goto out;
86697 +               mode = sb->s_root
86698 +                              ? (S_IFDIR | (0777 & sbi->options.fs_dmask_inv))
86699 +                              : (S_IFDIR | 0777);
86700 +               goto next_attr;
86702 +       case ATTR_ALLOC:
86703 +               if (!is_root || attr->name_len != ARRAY_SIZE(I30_NAME) ||
86704 +                   memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
86705 +                       goto next_attr;
86707 +               inode->i_size = le64_to_cpu(attr->nres.data_size);
86708 +               ni->i_valid = le64_to_cpu(attr->nres.valid_size);
86709 +               inode_set_bytes(inode, le64_to_cpu(attr->nres.alloc_size));
86711 +               run = &ni->dir.alloc_run;
86712 +               break;
86714 +       case ATTR_BITMAP:
86715 +               if (ino == MFT_REC_MFT) {
86716 +                       if (!attr->non_res)
86717 +                               goto out;
86718 +#ifndef CONFIG_NTFS3_64BIT_CLUSTER
86719 +                       /* 0x20000000 = 2^32 / 8 */
86720 +                       if (le64_to_cpu(attr->nres.alloc_size) >= 0x20000000)
86721 +                               goto out;
86722 +#endif
86723 +                       run = &sbi->mft.bitmap.run;
86724 +                       break;
86725 +               } else if (is_dir && attr->name_len == ARRAY_SIZE(I30_NAME) &&
86726 +                          !memcmp(attr_name(attr), I30_NAME,
86727 +                                  sizeof(I30_NAME)) &&
86728 +                          attr->non_res) {
86729 +                       run = &ni->dir.bitmap_run;
86730 +                       break;
86731 +               }
86732 +               goto next_attr;
86734 +       case ATTR_REPARSE:
86735 +               if (attr->name_len)
86736 +                       goto next_attr;
86738 +               rp_fa = ni_parse_reparse(ni, attr, &rp);
86739 +               switch (rp_fa) {
86740 +               case REPARSE_LINK:
86741 +                       if (!attr->non_res) {
86742 +                               inode->i_size = rsize;
86743 +                               inode_set_bytes(inode, rsize);
86744 +                               t32 = asize;
86745 +                       } else {
86746 +                               inode->i_size =
86747 +                                       le64_to_cpu(attr->nres.data_size);
86748 +                               t32 = le16_to_cpu(attr->nres.run_off);
86749 +                       }
86751 +                       /* Looks like normal symlink */
86752 +                       ni->i_valid = inode->i_size;
86754 +                       /* Clear directory bit */
86755 +                       if (ni->ni_flags & NI_FLAG_DIR) {
86756 +                               indx_clear(&ni->dir);
86757 +                               memset(&ni->dir, 0, sizeof(ni->dir));
86758 +                               ni->ni_flags &= ~NI_FLAG_DIR;
86759 +                       } else {
86760 +                               run_close(&ni->file.run);
86761 +                       }
86762 +                       mode = S_IFLNK | 0777;
86763 +                       is_dir = false;
86764 +                       if (attr->non_res) {
86765 +                               run = &ni->file.run;
86766 +                               goto attr_unpack_run; // double break
86767 +                       }
86768 +                       break;
86770 +               case REPARSE_COMPRESSED:
86771 +                       break;
86773 +               case REPARSE_DEDUPLICATED:
86774 +                       break;
86775 +               }
86776 +               goto next_attr;
86778 +       case ATTR_EA_INFO:
86779 +               if (!attr->name_len &&
86780 +                   resident_data_ex(attr, sizeof(struct EA_INFO)))
86781 +                       ni->ni_flags |= NI_FLAG_EA;
86782 +               goto next_attr;
86784 +       default:
86785 +               goto next_attr;
86786 +       }
86788 +attr_unpack_run:
86789 +       roff = le16_to_cpu(attr->nres.run_off);
86791 +       t64 = le64_to_cpu(attr->nres.svcn);
86792 +       err = run_unpack_ex(run, sbi, ino, t64, le64_to_cpu(attr->nres.evcn),
86793 +                           t64, Add2Ptr(attr, roff), asize - roff);
86794 +       if (err < 0)
86795 +               goto out;
86796 +       err = 0;
86797 +       goto next_attr;
86799 +end_enum:
86801 +       if (!std5)
86802 +               goto out;
86804 +       if (!is_match && name) {
86805 +               /* reuse rec as buffer for ascii name */
86806 +               err = -ENOENT;
86807 +               goto out;
86808 +       }
86810 +       if (std5->fa & FILE_ATTRIBUTE_READONLY)
86811 +               mode &= ~0222;
86813 +       /* Setup 'uid' and 'gid' */
86814 +       inode->i_uid = sbi->options.fs_uid;
86815 +       inode->i_gid = sbi->options.fs_gid;
86817 +       if (!names) {
86818 +               err = -EINVAL;
86819 +               goto out;
86820 +       }
86822 +       if (S_ISDIR(mode)) {
86823 +               ni->std_fa |= FILE_ATTRIBUTE_DIRECTORY;
86825 +               /*
86826 +                * dot and dot-dot should be included in count but was not
86827 +                * included in enumeration.
86828 +                * Usually a hard links to directories are disabled
86829 +                */
86830 +               set_nlink(inode, 1);
86831 +               inode->i_op = &ntfs_dir_inode_operations;
86832 +               inode->i_fop = &ntfs_dir_operations;
86833 +               ni->i_valid = 0;
86834 +       } else if (S_ISLNK(mode)) {
86835 +               ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
86836 +               inode->i_op = &ntfs_link_inode_operations;
86837 +               inode->i_fop = NULL;
86838 +               inode_nohighmem(inode); // ??
86839 +               set_nlink(inode, names);
86840 +       } else if (S_ISREG(mode)) {
86841 +               ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
86843 +               set_nlink(inode, names);
86845 +               inode->i_op = &ntfs_file_inode_operations;
86846 +               inode->i_fop = &ntfs_file_operations;
86847 +               inode->i_mapping->a_ops =
86848 +                       is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops;
86850 +               if (ino != MFT_REC_MFT)
86851 +                       init_rwsem(&ni->file.run_lock);
86852 +       } else if (fname && fname->home.low == cpu_to_le32(MFT_REC_EXTEND) &&
86853 +                  fname->home.seq == cpu_to_le16(MFT_REC_EXTEND)) {
86854 +               /* Records in $Extend are not a files or general directories */
86855 +       } else {
86856 +               err = -EINVAL;
86857 +               goto out;
86858 +       }
86860 +       if ((sbi->options.sys_immutable &&
86861 +            (std5->fa & FILE_ATTRIBUTE_SYSTEM)) &&
86862 +           !S_ISFIFO(mode) && !S_ISSOCK(mode) && !S_ISLNK(mode)) {
86863 +               inode->i_flags |= S_IMMUTABLE;
86864 +       } else {
86865 +               inode->i_flags &= ~S_IMMUTABLE;
86866 +       }
86868 +       inode->i_mode = mode;
86869 +       if (!(ni->ni_flags & NI_FLAG_EA)) {
86870 +               /* if no xattr then no security (stored in xattr) */
86871 +               inode->i_flags |= S_NOSEC;
86872 +       }
86874 +Ok:
86875 +       if (ino == MFT_REC_MFT && !sb->s_root)
86876 +               sbi->mft.ni = NULL;
86878 +       unlock_new_inode(inode);
86880 +       return inode;
86882 +out:
86883 +       if (ino == MFT_REC_MFT && !sb->s_root)
86884 +               sbi->mft.ni = NULL;
86886 +       iget_failed(inode);
86887 +       return ERR_PTR(err);
86890 +/* returns 1 if match */
86891 +static int ntfs_test_inode(struct inode *inode, void *data)
86893 +       struct MFT_REF *ref = data;
86895 +       return ino_get(ref) == inode->i_ino;
86898 +static int ntfs_set_inode(struct inode *inode, void *data)
86900 +       const struct MFT_REF *ref = data;
86902 +       inode->i_ino = ino_get(ref);
86903 +       return 0;
86906 +struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
86907 +                        const struct cpu_str *name)
86909 +       struct inode *inode;
86911 +       inode = iget5_locked(sb, ino_get(ref), ntfs_test_inode, ntfs_set_inode,
86912 +                            (void *)ref);
86913 +       if (unlikely(!inode))
86914 +               return ERR_PTR(-ENOMEM);
86916 +       /* If this is a freshly allocated inode, need to read it now. */
86917 +       if (inode->i_state & I_NEW)
86918 +               inode = ntfs_read_mft(inode, name, ref);
86919 +       else if (ref->seq != ntfs_i(inode)->mi.mrec->seq) {
86920 +               /* inode overlaps? */
86921 +               make_bad_inode(inode);
86922 +       }
86924 +       return inode;
86927 +enum get_block_ctx {
86928 +       GET_BLOCK_GENERAL = 0,
86929 +       GET_BLOCK_WRITE_BEGIN = 1,
86930 +       GET_BLOCK_DIRECT_IO_R = 2,
86931 +       GET_BLOCK_DIRECT_IO_W = 3,
86932 +       GET_BLOCK_BMAP = 4,
86935 +static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
86936 +                                      struct buffer_head *bh, int create,
86937 +                                      enum get_block_ctx ctx)
86939 +       struct super_block *sb = inode->i_sb;
86940 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
86941 +       struct ntfs_inode *ni = ntfs_i(inode);
86942 +       struct page *page = bh->b_page;
86943 +       u8 cluster_bits = sbi->cluster_bits;
86944 +       u32 block_size = sb->s_blocksize;
86945 +       u64 bytes, lbo, valid;
86946 +       u32 off;
86947 +       int err;
86948 +       CLST vcn, lcn, len;
86949 +       bool new;
86951 +       /*clear previous state*/
86952 +       clear_buffer_new(bh);
86953 +       clear_buffer_uptodate(bh);
86955 +       /* direct write uses 'create=0'*/
86956 +       if (!create && vbo >= ni->i_valid) {
86957 +               /* out of valid */
86958 +               return 0;
86959 +       }
86961 +       if (vbo >= inode->i_size) {
86962 +               /* out of size */
86963 +               return 0;
86964 +       }
86966 +       if (is_resident(ni)) {
86967 +               ni_lock(ni);
86968 +               err = attr_data_read_resident(ni, page);
86969 +               ni_unlock(ni);
86971 +               if (!err)
86972 +                       set_buffer_uptodate(bh);
86973 +               bh->b_size = block_size;
86974 +               return err;
86975 +       }
86977 +       vcn = vbo >> cluster_bits;
86978 +       off = vbo & sbi->cluster_mask;
86979 +       new = false;
86981 +       err = attr_data_get_block(ni, vcn, 1, &lcn, &len, create ? &new : NULL);
86982 +       if (err)
86983 +               goto out;
86985 +       if (!len)
86986 +               return 0;
86988 +       bytes = ((u64)len << cluster_bits) - off;
86990 +       if (lcn == SPARSE_LCN) {
86991 +               if (!create) {
86992 +                       if (bh->b_size > bytes)
86993 +                               bh->b_size = bytes;
86995 +                       return 0;
86996 +               }
86997 +               WARN_ON(1);
86998 +       }
87000 +       if (new) {
87001 +               set_buffer_new(bh);
87002 +               if ((len << cluster_bits) > block_size)
87003 +                       ntfs_sparse_cluster(inode, page, vcn, len);
87004 +       }
87006 +       lbo = ((u64)lcn << cluster_bits) + off;
87008 +       set_buffer_mapped(bh);
87009 +       bh->b_bdev = sb->s_bdev;
87010 +       bh->b_blocknr = lbo >> sb->s_blocksize_bits;
87012 +       valid = ni->i_valid;
87014 +       if (ctx == GET_BLOCK_DIRECT_IO_W) {
87015 +               /*ntfs_direct_IO will update ni->i_valid */
87016 +               if (vbo >= valid)
87017 +                       set_buffer_new(bh);
87018 +       } else if (create) {
87019 +               /*normal write*/
87020 +               if (vbo >= valid) {
87021 +                       set_buffer_new(bh);
87022 +                       if (bytes > bh->b_size)
87023 +                               bytes = bh->b_size;
87024 +                       ni->i_valid = vbo + bytes;
87025 +                       mark_inode_dirty(inode);
87026 +               }
87027 +       } else if (valid >= inode->i_size) {
87028 +               /* normal read of normal file*/
87029 +       } else if (vbo >= valid) {
87030 +               /* read out of valid data*/
87031 +               /* should never be here 'cause already checked */
87032 +               clear_buffer_mapped(bh);
87033 +       } else if (vbo + bytes <= valid) {
87034 +               /* normal read */
87035 +       } else if (vbo + block_size <= valid) {
87036 +               /* normal short read */
87037 +               bytes = block_size;
87038 +       } else {
87039 +               /*
87040 +                * read across valid size: vbo < valid && valid < vbo + block_size
87041 +                */
87042 +               u32 voff = valid - vbo;
87044 +               bh->b_size = bytes = block_size;
87045 +               off = vbo & (PAGE_SIZE - 1);
87046 +               set_bh_page(bh, page, off);
87047 +               ll_rw_block(REQ_OP_READ, 0, 1, &bh);
87048 +               wait_on_buffer(bh);
87049 +               /* Uhhuh. Read error. Complain and punt. */
87050 +               if (!buffer_uptodate(bh)) {
87051 +                       err = -EIO;
87052 +                       goto out;
87053 +               }
87054 +               zero_user_segment(page, off + voff, off + block_size);
87055 +       }
87057 +       if (bh->b_size > bytes)
87058 +               bh->b_size = bytes;
87060 +#ifndef __LP64__
87061 +       if (ctx == GET_BLOCK_DIRECT_IO_W || ctx == GET_BLOCK_DIRECT_IO_R) {
87062 +               static_assert(sizeof(size_t) < sizeof(loff_t));
87063 +               if (bytes > 0x40000000u)
87064 +                       bh->b_size = 0x40000000u;
87065 +       }
87066 +#endif
87068 +       return 0;
87070 +out:
87071 +       return err;
87074 +int ntfs_get_block(struct inode *inode, sector_t vbn,
87075 +                  struct buffer_head *bh_result, int create)
87077 +       return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits,
87078 +                                 bh_result, create, GET_BLOCK_GENERAL);
87081 +static int ntfs_get_block_bmap(struct inode *inode, sector_t vsn,
87082 +                              struct buffer_head *bh_result, int create)
87084 +       return ntfs_get_block_vbo(inode,
87085 +                                 (u64)vsn << inode->i_sb->s_blocksize_bits,
87086 +                                 bh_result, create, GET_BLOCK_BMAP);
87089 +static sector_t ntfs_bmap(struct address_space *mapping, sector_t block)
87091 +       return generic_block_bmap(mapping, block, ntfs_get_block_bmap);
87094 +static int ntfs_readpage(struct file *file, struct page *page)
87096 +       int err;
87097 +       struct address_space *mapping = page->mapping;
87098 +       struct inode *inode = mapping->host;
87099 +       struct ntfs_inode *ni = ntfs_i(inode);
87101 +       if (is_resident(ni)) {
87102 +               ni_lock(ni);
87103 +               err = attr_data_read_resident(ni, page);
87104 +               ni_unlock(ni);
87105 +               if (err != E_NTFS_NONRESIDENT) {
87106 +                       unlock_page(page);
87107 +                       return err;
87108 +               }
87109 +       }
87111 +       if (is_compressed(ni)) {
87112 +               ni_lock(ni);
87113 +               err = ni_readpage_cmpr(ni, page);
87114 +               ni_unlock(ni);
87115 +               return err;
87116 +       }
87118 +       /* normal + sparse files */
87119 +       return mpage_readpage(page, ntfs_get_block);
87122 +static void ntfs_readahead(struct readahead_control *rac)
87124 +       struct address_space *mapping = rac->mapping;
87125 +       struct inode *inode = mapping->host;
87126 +       struct ntfs_inode *ni = ntfs_i(inode);
87127 +       u64 valid;
87128 +       loff_t pos;
87130 +       if (is_resident(ni)) {
87131 +               /* no readahead for resident */
87132 +               return;
87133 +       }
87135 +       if (is_compressed(ni)) {
87136 +               /* no readahead for compressed */
87137 +               return;
87138 +       }
87140 +       valid = ni->i_valid;
87141 +       pos = readahead_pos(rac);
87143 +       if (valid < i_size_read(inode) && pos <= valid &&
87144 +           valid < pos + readahead_length(rac)) {
87145 +               /* range cross 'valid'. read it page by page */
87146 +               return;
87147 +       }
87149 +       mpage_readahead(rac, ntfs_get_block);
87152 +static int ntfs_get_block_direct_IO_R(struct inode *inode, sector_t iblock,
87153 +                                     struct buffer_head *bh_result, int create)
87155 +       return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
87156 +                                 bh_result, create, GET_BLOCK_DIRECT_IO_R);
87159 +static int ntfs_get_block_direct_IO_W(struct inode *inode, sector_t iblock,
87160 +                                     struct buffer_head *bh_result, int create)
87162 +       return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
87163 +                                 bh_result, create, GET_BLOCK_DIRECT_IO_W);
87166 +static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
87168 +       struct file *file = iocb->ki_filp;
87169 +       struct address_space *mapping = file->f_mapping;
87170 +       struct inode *inode = mapping->host;
87171 +       struct ntfs_inode *ni = ntfs_i(inode);
87172 +       size_t count = iov_iter_count(iter);
87173 +       loff_t vbo = iocb->ki_pos;
87174 +       loff_t end = vbo + count;
87175 +       int wr = iov_iter_rw(iter) & WRITE;
87176 +       const struct iovec *iov = iter->iov;
87177 +       unsigned long nr_segs = iter->nr_segs;
87178 +       loff_t valid;
87179 +       ssize_t ret;
87181 +       if (is_resident(ni)) {
87182 +               /*switch to buffered write*/
87183 +               ret = 0;
87184 +               goto out;
87185 +       }
87187 +       ret = blockdev_direct_IO(iocb, inode, iter,
87188 +                                wr ? ntfs_get_block_direct_IO_W
87189 +                                   : ntfs_get_block_direct_IO_R);
87190 +       valid = ni->i_valid;
87191 +       if (wr) {
87192 +               if (ret <= 0)
87193 +                       goto out;
87195 +               vbo += ret;
87196 +               if (vbo > valid && !S_ISBLK(inode->i_mode)) {
87197 +                       ni->i_valid = vbo;
87198 +                       mark_inode_dirty(inode);
87199 +               }
87200 +       } else if (vbo < valid && valid < end) {
87201 +               /* fix page */
87202 +               unsigned long uaddr = ~0ul;
87203 +               struct page *page;
87204 +               long i, npages;
87205 +               size_t dvbo = valid - vbo;
87206 +               size_t off = 0;
87208 +               /*Find user address*/
87209 +               for (i = 0; i < nr_segs; i++) {
87210 +                       if (off <= dvbo && dvbo < off + iov[i].iov_len) {
87211 +                               uaddr = (unsigned long)iov[i].iov_base + dvbo -
87212 +                                       off;
87213 +                               break;
87214 +                       }
87215 +                       off += iov[i].iov_len;
87216 +               }
87218 +               if (uaddr == ~0ul)
87219 +                       goto fix_error;
87221 +               npages = get_user_pages_unlocked(uaddr, 1, &page, FOLL_WRITE);
87223 +               if (npages <= 0)
87224 +                       goto fix_error;
87226 +               zero_user_segment(page, valid & (PAGE_SIZE - 1), PAGE_SIZE);
87227 +               put_page(page);
87228 +       }
87230 +out:
87231 +       return ret;
87232 +fix_error:
87233 +       ntfs_inode_warn(inode, "file garbage at 0x%llx", valid);
87234 +       goto out;
87237 +int ntfs_set_size(struct inode *inode, u64 new_size)
87239 +       struct super_block *sb = inode->i_sb;
87240 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
87241 +       struct ntfs_inode *ni = ntfs_i(inode);
87242 +       int err;
87244 +       /* Check for maximum file size */
87245 +       if (is_sparsed(ni) || is_compressed(ni)) {
87246 +               if (new_size > sbi->maxbytes_sparse) {
87247 +                       err = -EFBIG;
87248 +                       goto out;
87249 +               }
87250 +       } else if (new_size > sbi->maxbytes) {
87251 +               err = -EFBIG;
87252 +               goto out;
87253 +       }
87255 +       ni_lock(ni);
87256 +       down_write(&ni->file.run_lock);
87258 +       err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
87259 +                           &ni->i_valid, true, NULL);
87261 +       up_write(&ni->file.run_lock);
87262 +       ni_unlock(ni);
87264 +       mark_inode_dirty(inode);
87266 +out:
87267 +       return err;
87270 +static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
87272 +       struct address_space *mapping = page->mapping;
87273 +       struct inode *inode = mapping->host;
87274 +       struct ntfs_inode *ni = ntfs_i(inode);
87275 +       int err;
87277 +       if (is_resident(ni)) {
87278 +               ni_lock(ni);
87279 +               err = attr_data_write_resident(ni, page);
87280 +               ni_unlock(ni);
87281 +               if (err != E_NTFS_NONRESIDENT) {
87282 +                       unlock_page(page);
87283 +                       return err;
87284 +               }
87285 +       }
87287 +       return block_write_full_page(page, ntfs_get_block, wbc);
87290 +static int ntfs_writepages(struct address_space *mapping,
87291 +                          struct writeback_control *wbc)
87293 +       struct inode *inode = mapping->host;
87294 +       struct ntfs_inode *ni = ntfs_i(inode);
87295 +       /* redirect call to 'ntfs_writepage' for resident files*/
87296 +       get_block_t *get_block = is_resident(ni) ? NULL : &ntfs_get_block;
87298 +       return mpage_writepages(mapping, wbc, get_block);
87301 +static int ntfs_get_block_write_begin(struct inode *inode, sector_t vbn,
87302 +                                     struct buffer_head *bh_result, int create)
87304 +       return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits,
87305 +                                 bh_result, create, GET_BLOCK_WRITE_BEGIN);
87308 +static int ntfs_write_begin(struct file *file, struct address_space *mapping,
87309 +                           loff_t pos, u32 len, u32 flags, struct page **pagep,
87310 +                           void **fsdata)
87312 +       int err;
87313 +       struct inode *inode = mapping->host;
87314 +       struct ntfs_inode *ni = ntfs_i(inode);
87316 +       *pagep = NULL;
87317 +       if (is_resident(ni)) {
87318 +               struct page *page = grab_cache_page_write_begin(
87319 +                       mapping, pos >> PAGE_SHIFT, flags);
87321 +               if (!page) {
87322 +                       err = -ENOMEM;
87323 +                       goto out;
87324 +               }
87326 +               ni_lock(ni);
87327 +               err = attr_data_read_resident(ni, page);
87328 +               ni_unlock(ni);
87330 +               if (!err) {
87331 +                       *pagep = page;
87332 +                       goto out;
87333 +               }
87334 +               unlock_page(page);
87335 +               put_page(page);
87337 +               if (err != E_NTFS_NONRESIDENT)
87338 +                       goto out;
87339 +       }
87341 +       err = block_write_begin(mapping, pos, len, flags, pagep,
87342 +                               ntfs_get_block_write_begin);
87344 +out:
87345 +       return err;
87348 +/* address_space_operations::write_end */
87349 +static int ntfs_write_end(struct file *file, struct address_space *mapping,
87350 +                         loff_t pos, u32 len, u32 copied, struct page *page,
87351 +                         void *fsdata)
87354 +       struct inode *inode = mapping->host;
87355 +       struct ntfs_inode *ni = ntfs_i(inode);
87356 +       u64 valid = ni->i_valid;
87357 +       bool dirty = false;
87358 +       int err;
87360 +       if (is_resident(ni)) {
87361 +               ni_lock(ni);
87362 +               err = attr_data_write_resident(ni, page);
87363 +               ni_unlock(ni);
87364 +               if (!err) {
87365 +                       dirty = true;
87366 +                       /* clear any buffers in page*/
87367 +                       if (page_has_buffers(page)) {
87368 +                               struct buffer_head *head, *bh;
87370 +                               bh = head = page_buffers(page);
87371 +                               do {
87372 +                                       clear_buffer_dirty(bh);
87373 +                                       clear_buffer_mapped(bh);
87374 +                                       set_buffer_uptodate(bh);
87375 +                               } while (head != (bh = bh->b_this_page));
87376 +                       }
87377 +                       SetPageUptodate(page);
87378 +                       err = copied;
87379 +               }
87380 +               unlock_page(page);
87381 +               put_page(page);
87382 +       } else {
87383 +               err = generic_write_end(file, mapping, pos, len, copied, page,
87384 +                                       fsdata);
87385 +       }
87387 +       if (err >= 0) {
87388 +               if (!(ni->std_fa & FILE_ATTRIBUTE_ARCHIVE)) {
87389 +                       inode->i_ctime = inode->i_mtime = current_time(inode);
87390 +                       ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
87391 +                       dirty = true;
87392 +               }
87394 +               if (valid != ni->i_valid) {
87395 +                       /* ni->i_valid is changed in ntfs_get_block_vbo */
87396 +                       dirty = true;
87397 +               }
87399 +               if (dirty)
87400 +                       mark_inode_dirty(inode);
87401 +       }
87403 +       return err;
87406 +int reset_log_file(struct inode *inode)
87408 +       int err;
87409 +       loff_t pos = 0;
87410 +       u32 log_size = inode->i_size;
87411 +       struct address_space *mapping = inode->i_mapping;
87413 +       for (;;) {
87414 +               u32 len;
87415 +               void *kaddr;
87416 +               struct page *page;
87418 +               len = pos + PAGE_SIZE > log_size ? (log_size - pos) : PAGE_SIZE;
87420 +               err = block_write_begin(mapping, pos, len, 0, &page,
87421 +                                       ntfs_get_block_write_begin);
87422 +               if (err)
87423 +                       goto out;
87425 +               kaddr = kmap_atomic(page);
87426 +               memset(kaddr, -1, len);
87427 +               kunmap_atomic(kaddr);
87428 +               flush_dcache_page(page);
87430 +               err = block_write_end(NULL, mapping, pos, len, len, page, NULL);
87431 +               if (err < 0)
87432 +                       goto out;
87433 +               pos += len;
87435 +               if (pos >= log_size)
87436 +                       break;
87437 +               balance_dirty_pages_ratelimited(mapping);
87438 +       }
87439 +out:
87440 +       mark_inode_dirty_sync(inode);
87442 +       return err;
87445 +int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc)
87447 +       return _ni_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
87450 +int ntfs_sync_inode(struct inode *inode)
87452 +       return _ni_write_inode(inode, 1);
87456 + * helper function for ntfs_flush_inodes.  This writes both the inode
87457 + * and the file data blocks, waiting for in flight data blocks before
87458 + * the start of the call.  It does not wait for any io started
87459 + * during the call
87460 + */
87461 +static int writeback_inode(struct inode *inode)
87463 +       int ret = sync_inode_metadata(inode, 0);
87465 +       if (!ret)
87466 +               ret = filemap_fdatawrite(inode->i_mapping);
87467 +       return ret;
87471 + * write data and metadata corresponding to i1 and i2.  The io is
87472 + * started but we do not wait for any of it to finish.
87473 + *
87474 + * filemap_flush is used for the block device, so if there is a dirty
87475 + * page for a block already in flight, we will not wait and start the
87476 + * io over again
87477 + */
87478 +int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
87479 +                     struct inode *i2)
87481 +       int ret = 0;
87483 +       if (i1)
87484 +               ret = writeback_inode(i1);
87485 +       if (!ret && i2)
87486 +               ret = writeback_inode(i2);
87487 +       if (!ret)
87488 +               ret = filemap_flush(sb->s_bdev->bd_inode->i_mapping);
87489 +       return ret;
87492 +int inode_write_data(struct inode *inode, const void *data, size_t bytes)
87494 +       pgoff_t idx;
87496 +       /* Write non resident data */
87497 +       for (idx = 0; bytes; idx++) {
87498 +               size_t op = bytes > PAGE_SIZE ? PAGE_SIZE : bytes;
87499 +               struct page *page = ntfs_map_page(inode->i_mapping, idx);
87501 +               if (IS_ERR(page))
87502 +                       return PTR_ERR(page);
87504 +               lock_page(page);
87505 +               WARN_ON(!PageUptodate(page));
87506 +               ClearPageUptodate(page);
87508 +               memcpy(page_address(page), data, op);
87510 +               flush_dcache_page(page);
87511 +               SetPageUptodate(page);
87512 +               unlock_page(page);
87514 +               ntfs_unmap_page(page);
87516 +               bytes -= op;
87517 +               data = Add2Ptr(data, PAGE_SIZE);
87518 +       }
87519 +       return 0;
87523 + * number of bytes to for REPARSE_DATA_BUFFER(IO_REPARSE_TAG_SYMLINK)
87524 + * for unicode string of 'uni_len' length
87525 + */
87526 +static inline u32 ntfs_reparse_bytes(u32 uni_len)
87528 +       /* header + unicode string + decorated unicode string */
87529 +       return sizeof(short) * (2 * uni_len + 4) +
87530 +              offsetof(struct REPARSE_DATA_BUFFER,
87531 +                       SymbolicLinkReparseBuffer.PathBuffer);
87534 +static struct REPARSE_DATA_BUFFER *
87535 +ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
87536 +                          u32 size, u16 *nsize)
87538 +       int i, err;
87539 +       struct REPARSE_DATA_BUFFER *rp;
87540 +       __le16 *rp_name;
87541 +       typeof(rp->SymbolicLinkReparseBuffer) *rs;
87543 +       rp = ntfs_zalloc(ntfs_reparse_bytes(2 * size + 2));
87544 +       if (!rp)
87545 +               return ERR_PTR(-ENOMEM);
87547 +       rs = &rp->SymbolicLinkReparseBuffer;
87548 +       rp_name = rs->PathBuffer;
87550 +       /* Convert link name to utf16 */
87551 +       err = ntfs_nls_to_utf16(sbi, symname, size,
87552 +                               (struct cpu_str *)(rp_name - 1), 2 * size,
87553 +                               UTF16_LITTLE_ENDIAN);
87554 +       if (err < 0)
87555 +               goto out;
87557 +       /* err = the length of unicode name of symlink */
87558 +       *nsize = ntfs_reparse_bytes(err);
87560 +       if (*nsize > sbi->reparse.max_size) {
87561 +               err = -EFBIG;
87562 +               goto out;
87563 +       }
87565 +       /* translate linux '/' into windows '\' */
87566 +       for (i = 0; i < err; i++) {
87567 +               if (rp_name[i] == cpu_to_le16('/'))
87568 +                       rp_name[i] = cpu_to_le16('\\');
87569 +       }
87571 +       rp->ReparseTag = IO_REPARSE_TAG_SYMLINK;
87572 +       rp->ReparseDataLength =
87573 +               cpu_to_le16(*nsize - offsetof(struct REPARSE_DATA_BUFFER,
87574 +                                             SymbolicLinkReparseBuffer));
87576 +       /* PrintName + SubstituteName */
87577 +       rs->SubstituteNameOffset = cpu_to_le16(sizeof(short) * err);
87578 +       rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + 8);
87579 +       rs->PrintNameLength = rs->SubstituteNameOffset;
87581 +       /*
87582 +        * TODO: use relative path if possible to allow windows to parse this path
87583 +        * 0-absolute path 1- relative path (SYMLINK_FLAG_RELATIVE)
87584 +        */
87585 +       rs->Flags = 0;
87587 +       memmove(rp_name + err + 4, rp_name, sizeof(short) * err);
87589 +       /* decorate SubstituteName */
87590 +       rp_name += err;
87591 +       rp_name[0] = cpu_to_le16('\\');
87592 +       rp_name[1] = cpu_to_le16('?');
87593 +       rp_name[2] = cpu_to_le16('?');
87594 +       rp_name[3] = cpu_to_le16('\\');
87596 +       return rp;
87597 +out:
87598 +       ntfs_free(rp);
87599 +       return ERR_PTR(err);
87602 +struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
87603 +                               struct inode *dir, struct dentry *dentry,
87604 +                               const struct cpu_str *uni, umode_t mode,
87605 +                               dev_t dev, const char *symname, u32 size,
87606 +                               int excl, struct ntfs_fnd *fnd)
87608 +       int err;
87609 +       struct super_block *sb = dir->i_sb;
87610 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
87611 +       const struct qstr *name = &dentry->d_name;
87612 +       CLST ino = 0;
87613 +       struct ntfs_inode *dir_ni = ntfs_i(dir);
87614 +       struct ntfs_inode *ni = NULL;
87615 +       struct inode *inode = NULL;
87616 +       struct ATTRIB *attr;
87617 +       struct ATTR_STD_INFO5 *std5;
87618 +       struct ATTR_FILE_NAME *fname;
87619 +       struct MFT_REC *rec;
87620 +       u32 asize, dsize, sd_size;
87621 +       enum FILE_ATTRIBUTE fa;
87622 +       __le32 security_id = SECURITY_ID_INVALID;
87623 +       CLST vcn;
87624 +       const void *sd;
87625 +       u16 t16, nsize = 0, aid = 0;
87626 +       struct INDEX_ROOT *root, *dir_root;
87627 +       struct NTFS_DE *e, *new_de = NULL;
87628 +       struct REPARSE_DATA_BUFFER *rp = NULL;
87629 +       bool is_dir = S_ISDIR(mode);
87630 +       bool is_link = S_ISLNK(mode);
87631 +       bool rp_inserted = false;
87632 +       bool is_sp = S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode) ||
87633 +                    S_ISSOCK(mode);
87635 +       if (is_sp)
87636 +               return ERR_PTR(-EOPNOTSUPP);
87638 +       dir_root = indx_get_root(&dir_ni->dir, dir_ni, NULL, NULL);
87639 +       if (!dir_root)
87640 +               return ERR_PTR(-EINVAL);
87642 +       if (is_dir) {
87643 +               /* use parent's directory attributes */
87644 +               fa = dir_ni->std_fa | FILE_ATTRIBUTE_DIRECTORY |
87645 +                    FILE_ATTRIBUTE_ARCHIVE;
87646 +               /*
87647 +                * By default child directory inherits parent attributes
87648 +                * root directory is hidden + system
87649 +                * Make an exception for children in root
87650 +                */
87651 +               if (dir->i_ino == MFT_REC_ROOT)
87652 +                       fa &= ~(FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM);
87653 +       } else if (is_link) {
87654 +               /* It is good idea that link should be the same type (file/dir) as target */
87655 +               fa = FILE_ATTRIBUTE_REPARSE_POINT;
87657 +               /*
87658 +                * linux: there are dir/file/symlink and so on
87659 +                * NTFS: symlinks are "dir + reparse" or "file + reparse"
87660 +                * It is good idea to create:
87661 +                * dir + reparse if 'symname' points to directory
87662 +                * or
87663 +                * file + reparse if 'symname' points to file
87664 +                * Unfortunately kern_path hangs if symname contains 'dir'
87665 +                */
87667 +               /*
87668 +                *      struct path path;
87669 +                *
87670 +                *      if (!kern_path(symname, LOOKUP_FOLLOW, &path)){
87671 +                *              struct inode *target = d_inode(path.dentry);
87672 +                *
87673 +                *              if (S_ISDIR(target->i_mode))
87674 +                *                      fa |= FILE_ATTRIBUTE_DIRECTORY;
87675 +                *              // if ( target->i_sb == sb ){
87676 +                *              //      use relative path?
87677 +                *              // }
87678 +                *              path_put(&path);
87679 +                *      }
87680 +                */
87681 +       } else if (sbi->options.sparse) {
87682 +               /* sparsed regular file, cause option 'sparse' */
87683 +               fa = FILE_ATTRIBUTE_SPARSE_FILE | FILE_ATTRIBUTE_ARCHIVE;
87684 +       } else if (dir_ni->std_fa & FILE_ATTRIBUTE_COMPRESSED) {
87685 +               /* compressed regular file, if parent is compressed */
87686 +               fa = FILE_ATTRIBUTE_COMPRESSED | FILE_ATTRIBUTE_ARCHIVE;
87687 +       } else {
87688 +               /* regular file, default attributes */
87689 +               fa = FILE_ATTRIBUTE_ARCHIVE;
87690 +       }
87692 +       if (!(mode & 0222))
87693 +               fa |= FILE_ATTRIBUTE_READONLY;
87695 +       /* allocate PATH_MAX bytes */
87696 +       new_de = __getname();
87697 +       if (!new_de) {
87698 +               err = -ENOMEM;
87699 +               goto out1;
87700 +       }
87702 +       /*mark rw ntfs as dirty. it will be cleared at umount*/
87703 +       ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
87705 +       /* Step 1: allocate and fill new mft record */
87706 +       err = ntfs_look_free_mft(sbi, &ino, false, NULL, NULL);
87707 +       if (err)
87708 +               goto out2;
87710 +       ni = ntfs_new_inode(sbi, ino, fa & FILE_ATTRIBUTE_DIRECTORY);
87711 +       if (IS_ERR(ni)) {
87712 +               err = PTR_ERR(ni);
87713 +               ni = NULL;
87714 +               goto out3;
87715 +       }
87716 +       inode = &ni->vfs_inode;
87718 +       inode->i_atime = inode->i_mtime = inode->i_ctime = ni->i_crtime =
87719 +               current_time(inode);
87721 +       rec = ni->mi.mrec;
87722 +       rec->hard_links = cpu_to_le16(1);
87723 +       attr = Add2Ptr(rec, le16_to_cpu(rec->attr_off));
87725 +       /* Get default security id */
87726 +       sd = s_default_security;
87727 +       sd_size = sizeof(s_default_security);
87729 +       if (is_ntfs3(sbi)) {
87730 +               security_id = dir_ni->std_security_id;
87731 +               if (le32_to_cpu(security_id) < SECURITY_ID_FIRST) {
87732 +                       security_id = sbi->security.def_security_id;
87734 +                       if (security_id == SECURITY_ID_INVALID &&
87735 +                           !ntfs_insert_security(sbi, sd, sd_size,
87736 +                                                 &security_id, NULL))
87737 +                               sbi->security.def_security_id = security_id;
87738 +               }
87739 +       }
87741 +       /* Insert standard info */
87742 +       std5 = Add2Ptr(attr, SIZEOF_RESIDENT);
87744 +       if (security_id == SECURITY_ID_INVALID) {
87745 +               dsize = sizeof(struct ATTR_STD_INFO);
87746 +       } else {
87747 +               dsize = sizeof(struct ATTR_STD_INFO5);
87748 +               std5->security_id = security_id;
87749 +               ni->std_security_id = security_id;
87750 +       }
87751 +       asize = SIZEOF_RESIDENT + dsize;
87753 +       attr->type = ATTR_STD;
87754 +       attr->size = cpu_to_le32(asize);
87755 +       attr->id = cpu_to_le16(aid++);
87756 +       attr->res.data_off = SIZEOF_RESIDENT_LE;
87757 +       attr->res.data_size = cpu_to_le32(dsize);
87759 +       std5->cr_time = std5->m_time = std5->c_time = std5->a_time =
87760 +               kernel2nt(&inode->i_atime);
87762 +       ni->std_fa = fa;
87763 +       std5->fa = fa;
87765 +       attr = Add2Ptr(attr, asize);
87767 +       /* Insert file name */
87768 +       err = fill_name_de(sbi, new_de, name, uni);
87769 +       if (err)
87770 +               goto out4;
87772 +       mi_get_ref(&ni->mi, &new_de->ref);
87774 +       fname = (struct ATTR_FILE_NAME *)(new_de + 1);
87775 +       mi_get_ref(&dir_ni->mi, &fname->home);
87776 +       fname->dup.cr_time = fname->dup.m_time = fname->dup.c_time =
87777 +               fname->dup.a_time = std5->cr_time;
87778 +       fname->dup.alloc_size = fname->dup.data_size = 0;
87779 +       fname->dup.fa = std5->fa;
87780 +       fname->dup.ea_size = fname->dup.reparse = 0;
87782 +       dsize = le16_to_cpu(new_de->key_size);
87783 +       asize = QuadAlign(SIZEOF_RESIDENT + dsize);
87785 +       attr->type = ATTR_NAME;
87786 +       attr->size = cpu_to_le32(asize);
87787 +       attr->res.data_off = SIZEOF_RESIDENT_LE;
87788 +       attr->res.flags = RESIDENT_FLAG_INDEXED;
87789 +       attr->id = cpu_to_le16(aid++);
87790 +       attr->res.data_size = cpu_to_le32(dsize);
87791 +       memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), fname, dsize);
87793 +       attr = Add2Ptr(attr, asize);
87795 +       if (security_id == SECURITY_ID_INVALID) {
87796 +               /* Insert security attribute */
87797 +               asize = SIZEOF_RESIDENT + QuadAlign(sd_size);
87799 +               attr->type = ATTR_SECURE;
87800 +               attr->size = cpu_to_le32(asize);
87801 +               attr->id = cpu_to_le16(aid++);
87802 +               attr->res.data_off = SIZEOF_RESIDENT_LE;
87803 +               attr->res.data_size = cpu_to_le32(sd_size);
87804 +               memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), sd, sd_size);
87806 +               attr = Add2Ptr(attr, asize);
87807 +       }
87809 +       if (fa & FILE_ATTRIBUTE_DIRECTORY) {
87810 +               /*
87811 +                * regular directory or symlink to directory
87812 +                * Create root attribute
87813 +                */
87814 +               dsize = sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE);
87815 +               asize = sizeof(I30_NAME) + SIZEOF_RESIDENT + dsize;
87817 +               attr->type = ATTR_ROOT;
87818 +               attr->size = cpu_to_le32(asize);
87819 +               attr->id = cpu_to_le16(aid++);
87821 +               attr->name_len = ARRAY_SIZE(I30_NAME);
87822 +               attr->name_off = SIZEOF_RESIDENT_LE;
87823 +               attr->res.data_off =
87824 +                       cpu_to_le16(sizeof(I30_NAME) + SIZEOF_RESIDENT);
87825 +               attr->res.data_size = cpu_to_le32(dsize);
87826 +               memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), I30_NAME,
87827 +                      sizeof(I30_NAME));
87829 +               root = Add2Ptr(attr, sizeof(I30_NAME) + SIZEOF_RESIDENT);
87830 +               memcpy(root, dir_root, offsetof(struct INDEX_ROOT, ihdr));
87831 +               root->ihdr.de_off =
87832 +                       cpu_to_le32(sizeof(struct INDEX_HDR)); // 0x10
87833 +               root->ihdr.used = cpu_to_le32(sizeof(struct INDEX_HDR) +
87834 +                                             sizeof(struct NTFS_DE));
87835 +               root->ihdr.total = root->ihdr.used;
87837 +               e = Add2Ptr(root, sizeof(struct INDEX_ROOT));
87838 +               e->size = cpu_to_le16(sizeof(struct NTFS_DE));
87839 +               e->flags = NTFS_IE_LAST;
87840 +       } else if (is_link) {
87841 +               /*
87842 +                * symlink to file
87843 +                * Create empty resident data attribute
87844 +                */
87845 +               asize = SIZEOF_RESIDENT;
87847 +               /* insert empty ATTR_DATA */
87848 +               attr->type = ATTR_DATA;
87849 +               attr->size = cpu_to_le32(SIZEOF_RESIDENT);
87850 +               attr->id = cpu_to_le16(aid++);
87851 +               attr->name_off = SIZEOF_RESIDENT_LE;
87852 +               attr->res.data_off = SIZEOF_RESIDENT_LE;
87853 +       } else {
87854 +               /*
87855 +                * regular file
87856 +                */
87857 +               attr->type = ATTR_DATA;
87858 +               attr->id = cpu_to_le16(aid++);
87859 +               /* Create empty non resident data attribute */
87860 +               attr->non_res = 1;
87861 +               attr->nres.evcn = cpu_to_le64(-1ll);
87862 +               if (fa & FILE_ATTRIBUTE_SPARSE_FILE) {
87863 +                       attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
87864 +                       attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
87865 +                       attr->flags = ATTR_FLAG_SPARSED;
87866 +                       asize = SIZEOF_NONRESIDENT_EX + 8;
87867 +               } else if (fa & FILE_ATTRIBUTE_COMPRESSED) {
87868 +                       attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
87869 +                       attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
87870 +                       attr->flags = ATTR_FLAG_COMPRESSED;
87871 +                       attr->nres.c_unit = COMPRESSION_UNIT;
87872 +                       asize = SIZEOF_NONRESIDENT_EX + 8;
87873 +               } else {
87874 +                       attr->size = cpu_to_le32(SIZEOF_NONRESIDENT + 8);
87875 +                       attr->name_off = SIZEOF_NONRESIDENT_LE;
87876 +                       asize = SIZEOF_NONRESIDENT + 8;
87877 +               }
87878 +               attr->nres.run_off = attr->name_off;
87879 +       }
87881 +       if (is_dir) {
87882 +               ni->ni_flags |= NI_FLAG_DIR;
87883 +               err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
87884 +               if (err)
87885 +                       goto out4;
87886 +       } else if (is_link) {
87887 +               rp = ntfs_create_reparse_buffer(sbi, symname, size, &nsize);
87889 +               if (IS_ERR(rp)) {
87890 +                       err = PTR_ERR(rp);
87891 +                       rp = NULL;
87892 +                       goto out4;
87893 +               }
87895 +               /*
87896 +                * Insert ATTR_REPARSE
87897 +                */
87898 +               attr = Add2Ptr(attr, asize);
87899 +               attr->type = ATTR_REPARSE;
87900 +               attr->id = cpu_to_le16(aid++);
87902 +               /* resident or non resident? */
87903 +               asize = QuadAlign(SIZEOF_RESIDENT + nsize);
87904 +               t16 = PtrOffset(rec, attr);
87906 +               if (asize + t16 + 8 > sbi->record_size) {
87907 +                       CLST alen;
87908 +                       CLST clst = bytes_to_cluster(sbi, nsize);
87910 +                       /* bytes per runs */
87911 +                       t16 = sbi->record_size - t16 - SIZEOF_NONRESIDENT;
87913 +                       attr->non_res = 1;
87914 +                       attr->nres.evcn = cpu_to_le64(clst - 1);
87915 +                       attr->name_off = SIZEOF_NONRESIDENT_LE;
87916 +                       attr->nres.run_off = attr->name_off;
87917 +                       attr->nres.data_size = cpu_to_le64(nsize);
87918 +                       attr->nres.valid_size = attr->nres.data_size;
87919 +                       attr->nres.alloc_size =
87920 +                               cpu_to_le64(ntfs_up_cluster(sbi, nsize));
87922 +                       err = attr_allocate_clusters(sbi, &ni->file.run, 0, 0,
87923 +                                                    clst, NULL, 0, &alen, 0,
87924 +                                                    NULL);
87925 +                       if (err)
87926 +                               goto out5;
87928 +                       err = run_pack(&ni->file.run, 0, clst,
87929 +                                      Add2Ptr(attr, SIZEOF_NONRESIDENT), t16,
87930 +                                      &vcn);
87931 +                       if (err < 0)
87932 +                               goto out5;
87934 +                       if (vcn != clst) {
87935 +                               err = -EINVAL;
87936 +                               goto out5;
87937 +                       }
87939 +                       asize = SIZEOF_NONRESIDENT + QuadAlign(err);
87940 +                       inode->i_size = nsize;
87941 +               } else {
87942 +                       attr->res.data_off = SIZEOF_RESIDENT_LE;
87943 +                       attr->res.data_size = cpu_to_le32(nsize);
87944 +                       memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), rp, nsize);
87945 +                       inode->i_size = nsize;
87946 +                       nsize = 0;
87947 +               }
87949 +               attr->size = cpu_to_le32(asize);
87951 +               err = ntfs_insert_reparse(sbi, IO_REPARSE_TAG_SYMLINK,
87952 +                                         &new_de->ref);
87953 +               if (err)
87954 +                       goto out5;
87956 +               rp_inserted = true;
87957 +       }
87959 +       attr = Add2Ptr(attr, asize);
87960 +       attr->type = ATTR_END;
87962 +       rec->used = cpu_to_le32(PtrOffset(rec, attr) + 8);
87963 +       rec->next_attr_id = cpu_to_le16(aid);
87965 +       /* Step 2: Add new name in index */
87966 +       err = indx_insert_entry(&dir_ni->dir, dir_ni, new_de, sbi, fnd);
87967 +       if (err)
87968 +               goto out6;
87970 +       /* Update current directory record */
87971 +       mark_inode_dirty(dir);
87973 +       /* Fill vfs inode fields */
87974 +       inode->i_uid = sbi->options.uid ? sbi->options.fs_uid : current_fsuid();
87975 +       inode->i_gid = sbi->options.gid          ? sbi->options.fs_gid
87976 +                      : (dir->i_mode & S_ISGID) ? dir->i_gid
87977 +                                                : current_fsgid();
87978 +       inode->i_generation = le16_to_cpu(rec->seq);
87980 +       dir->i_mtime = dir->i_ctime = inode->i_atime;
87982 +       if (is_dir) {
87983 +               if (dir->i_mode & S_ISGID)
87984 +                       mode |= S_ISGID;
87985 +               inode->i_op = &ntfs_dir_inode_operations;
87986 +               inode->i_fop = &ntfs_dir_operations;
87987 +       } else if (is_link) {
87988 +               inode->i_op = &ntfs_link_inode_operations;
87989 +               inode->i_fop = NULL;
87990 +               inode->i_mapping->a_ops = &ntfs_aops;
87991 +       } else {
87992 +               inode->i_op = &ntfs_file_inode_operations;
87993 +               inode->i_fop = &ntfs_file_operations;
87994 +               inode->i_mapping->a_ops =
87995 +                       is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops;
87996 +               init_rwsem(&ni->file.run_lock);
87997 +       }
87999 +       inode->i_mode = mode;
88001 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
88002 +       if (!is_link && (sb->s_flags & SB_POSIXACL)) {
88003 +               err = ntfs_init_acl(mnt_userns, inode, dir);
88004 +               if (err)
88005 +                       goto out6;
88006 +       } else
88007 +#endif
88008 +       {
88009 +               inode->i_flags |= S_NOSEC;
88010 +       }
88012 +       /* Write non resident data */
88013 +       if (nsize) {
88014 +               err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rp, nsize);
88015 +               if (err)
88016 +                       goto out7;
88017 +       }
88019 +       /* call 'd_instantiate' after inode->i_op is set but before finish_open */
88020 +       d_instantiate(dentry, inode);
88022 +       mark_inode_dirty(inode);
88023 +       mark_inode_dirty(dir);
88025 +       /* normal exit */
88026 +       goto out2;
88028 +out7:
88030 +       /* undo 'indx_insert_entry' */
88031 +       indx_delete_entry(&dir_ni->dir, dir_ni, new_de + 1,
88032 +                         le16_to_cpu(new_de->key_size), sbi);
88033 +out6:
88034 +       if (rp_inserted)
88035 +               ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref);
88037 +out5:
88038 +       if (is_dir || run_is_empty(&ni->file.run))
88039 +               goto out4;
88041 +       run_deallocate(sbi, &ni->file.run, false);
88043 +out4:
88044 +       clear_rec_inuse(rec);
88045 +       clear_nlink(inode);
88046 +       ni->mi.dirty = false;
88047 +       discard_new_inode(inode);
88048 +out3:
88049 +       ntfs_mark_rec_free(sbi, ino);
88051 +out2:
88052 +       __putname(new_de);
88053 +       ntfs_free(rp);
88055 +out1:
88056 +       if (err)
88057 +               return ERR_PTR(err);
88059 +       unlock_new_inode(inode);
88061 +       return inode;
88064 +int ntfs_link_inode(struct inode *inode, struct dentry *dentry)
88066 +       int err;
88067 +       struct inode *dir = d_inode(dentry->d_parent);
88068 +       struct ntfs_inode *dir_ni = ntfs_i(dir);
88069 +       struct ntfs_inode *ni = ntfs_i(inode);
88070 +       struct super_block *sb = inode->i_sb;
88071 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
88072 +       const struct qstr *name = &dentry->d_name;
88073 +       struct NTFS_DE *new_de = NULL;
88074 +       struct ATTR_FILE_NAME *fname;
88075 +       struct ATTRIB *attr;
88076 +       u16 key_size;
88077 +       struct INDEX_ROOT *dir_root;
88079 +       dir_root = indx_get_root(&dir_ni->dir, dir_ni, NULL, NULL);
88080 +       if (!dir_root)
88081 +               return -EINVAL;
88083 +       /* allocate PATH_MAX bytes */
88084 +       new_de = __getname();
88085 +       if (!new_de)
88086 +               return -ENOMEM;
88088 +       /*mark rw ntfs as dirty. it will be cleared at umount*/
88089 +       ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY);
88091 +       // Insert file name
88092 +       err = fill_name_de(sbi, new_de, name, NULL);
88093 +       if (err)
88094 +               goto out;
88096 +       key_size = le16_to_cpu(new_de->key_size);
88097 +       err = ni_insert_resident(ni, key_size, ATTR_NAME, NULL, 0, &attr, NULL);
88098 +       if (err)
88099 +               goto out;
88101 +       mi_get_ref(&ni->mi, &new_de->ref);
88103 +       fname = (struct ATTR_FILE_NAME *)(new_de + 1);
88104 +       mi_get_ref(&dir_ni->mi, &fname->home);
88105 +       fname->dup.cr_time = fname->dup.m_time = fname->dup.c_time =
88106 +               fname->dup.a_time = kernel2nt(&inode->i_ctime);
88107 +       fname->dup.alloc_size = fname->dup.data_size = 0;
88108 +       fname->dup.fa = ni->std_fa;
88109 +       fname->dup.ea_size = fname->dup.reparse = 0;
88111 +       memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), fname, key_size);
88113 +       err = indx_insert_entry(&dir_ni->dir, dir_ni, new_de, sbi, NULL);
88114 +       if (err)
88115 +               goto out;
88117 +       le16_add_cpu(&ni->mi.mrec->hard_links, 1);
88118 +       ni->mi.dirty = true;
88120 +out:
88121 +       __putname(new_de);
88122 +       return err;
88126 + * ntfs_unlink_inode
88127 + *
88128 + * inode_operations::unlink
88129 + * inode_operations::rmdir
88130 + */
88131 +int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry)
88133 +       int err;
88134 +       struct super_block *sb = dir->i_sb;
88135 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
88136 +       struct inode *inode = d_inode(dentry);
88137 +       struct ntfs_inode *ni = ntfs_i(inode);
88138 +       const struct qstr *name = &dentry->d_name;
88139 +       struct ntfs_inode *dir_ni = ntfs_i(dir);
88140 +       struct ntfs_index *indx = &dir_ni->dir;
88141 +       struct cpu_str *uni = NULL;
88142 +       struct ATTR_FILE_NAME *fname;
88143 +       u8 name_type;
88144 +       struct ATTR_LIST_ENTRY *le;
88145 +       struct MFT_REF ref;
88146 +       bool is_dir = S_ISDIR(inode->i_mode);
88147 +       struct INDEX_ROOT *dir_root;
88149 +       dir_root = indx_get_root(indx, dir_ni, NULL, NULL);
88150 +       if (!dir_root)
88151 +               return -EINVAL;
88153 +       ni_lock(ni);
88155 +       if (is_dir && !dir_is_empty(inode)) {
88156 +               err = -ENOTEMPTY;
88157 +               goto out1;
88158 +       }
88160 +       if (ntfs_is_meta_file(sbi, inode->i_ino)) {
88161 +               err = -EINVAL;
88162 +               goto out1;
88163 +       }
88165 +       /* allocate PATH_MAX bytes */
88166 +       uni = __getname();
88167 +       if (!uni) {
88168 +               err = -ENOMEM;
88169 +               goto out1;
88170 +       }
88172 +       /* Convert input string to unicode */
88173 +       err = ntfs_nls_to_utf16(sbi, name->name, name->len, uni, NTFS_NAME_LEN,
88174 +                               UTF16_HOST_ENDIAN);
88175 +       if (err < 0)
88176 +               goto out2;
88178 +       /*mark rw ntfs as dirty. it will be cleared at umount*/
88179 +       ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
88181 +       /* find name in record */
88182 +       mi_get_ref(&dir_ni->mi, &ref);
88184 +       le = NULL;
88185 +       fname = ni_fname_name(ni, uni, &ref, &le);
88186 +       if (!fname) {
88187 +               err = -ENOENT;
88188 +               goto out3;
88189 +       }
88191 +       name_type = paired_name(fname->type);
88193 +       err = indx_delete_entry(indx, dir_ni, fname, fname_full_size(fname),
88194 +                               sbi);
88195 +       if (err)
88196 +               goto out3;
88198 +       /* Then remove name from mft */
88199 +       ni_remove_attr_le(ni, attr_from_name(fname), le);
88201 +       le16_add_cpu(&ni->mi.mrec->hard_links, -1);
88202 +       ni->mi.dirty = true;
88204 +       if (name_type != FILE_NAME_POSIX) {
88205 +               /* Now we should delete name by type */
88206 +               fname = ni_fname_type(ni, name_type, &le);
88207 +               if (fname) {
88208 +                       err = indx_delete_entry(indx, dir_ni, fname,
88209 +                                               fname_full_size(fname), sbi);
88210 +                       if (err)
88211 +                               goto out3;
88213 +                       ni_remove_attr_le(ni, attr_from_name(fname), le);
88215 +                       le16_add_cpu(&ni->mi.mrec->hard_links, -1);
88216 +               }
88217 +       }
88218 +out3:
88219 +       switch (err) {
88220 +       case 0:
88221 +               drop_nlink(inode);
88222 +       case -ENOTEMPTY:
88223 +       case -ENOSPC:
88224 +       case -EROFS:
88225 +               break;
88226 +       default:
88227 +               make_bad_inode(inode);
88228 +       }
88230 +       dir->i_mtime = dir->i_ctime = current_time(dir);
88231 +       mark_inode_dirty(dir);
88232 +       inode->i_ctime = dir->i_ctime;
88233 +       if (inode->i_nlink)
88234 +               mark_inode_dirty(inode);
88236 +out2:
88237 +       __putname(uni);
88238 +out1:
88239 +       ni_unlock(ni);
88240 +       return err;
88243 +void ntfs_evict_inode(struct inode *inode)
88245 +       truncate_inode_pages_final(&inode->i_data);
88247 +       if (inode->i_nlink)
88248 +               _ni_write_inode(inode, inode_needs_sync(inode));
88250 +       invalidate_inode_buffers(inode);
88251 +       clear_inode(inode);
88253 +       ni_clear(ntfs_i(inode));
88256 +static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
88257 +                                     int buflen)
88259 +       int i, err = 0;
88260 +       struct ntfs_inode *ni = ntfs_i(inode);
88261 +       struct super_block *sb = inode->i_sb;
88262 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
88263 +       u64 i_size = inode->i_size;
88264 +       u16 nlen = 0;
88265 +       void *to_free = NULL;
88266 +       struct REPARSE_DATA_BUFFER *rp;
88267 +       struct le_str *uni;
88268 +       struct ATTRIB *attr;
88270 +       /* Reparse data present. Try to parse it */
88271 +       static_assert(!offsetof(struct REPARSE_DATA_BUFFER, ReparseTag));
88272 +       static_assert(sizeof(u32) == sizeof(rp->ReparseTag));
88274 +       *buffer = 0;
88276 +       /* Read into temporal buffer */
88277 +       if (i_size > sbi->reparse.max_size || i_size <= sizeof(u32)) {
88278 +               err = -EINVAL;
88279 +               goto out;
88280 +       }
88282 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_REPARSE, NULL, 0, NULL, NULL);
88283 +       if (!attr) {
88284 +               err = -EINVAL;
88285 +               goto out;
88286 +       }
88288 +       if (!attr->non_res) {
88289 +               rp = resident_data_ex(attr, i_size);
88290 +               if (!rp) {
88291 +                       err = -EINVAL;
88292 +                       goto out;
88293 +               }
88294 +       } else {
88295 +               rp = ntfs_malloc(i_size);
88296 +               if (!rp) {
88297 +                       err = -ENOMEM;
88298 +                       goto out;
88299 +               }
88300 +               to_free = rp;
88301 +               err = ntfs_read_run_nb(sbi, &ni->file.run, 0, rp, i_size, NULL);
88302 +               if (err)
88303 +                       goto out;
88304 +       }
88306 +       err = -EINVAL;
88308 +       /* Microsoft Tag */
88309 +       switch (rp->ReparseTag) {
88310 +       case IO_REPARSE_TAG_MOUNT_POINT:
88311 +               /* Mount points and junctions */
88312 +               /* Can we use 'Rp->MountPointReparseBuffer.PrintNameLength'? */
88313 +               if (i_size <= offsetof(struct REPARSE_DATA_BUFFER,
88314 +                                      MountPointReparseBuffer.PathBuffer))
88315 +                       goto out;
88316 +               uni = Add2Ptr(rp,
88317 +                             offsetof(struct REPARSE_DATA_BUFFER,
88318 +                                      MountPointReparseBuffer.PathBuffer) +
88319 +                                     le16_to_cpu(rp->MountPointReparseBuffer
88320 +                                                         .PrintNameOffset) -
88321 +                                     2);
88322 +               nlen = le16_to_cpu(rp->MountPointReparseBuffer.PrintNameLength);
88323 +               break;
88325 +       case IO_REPARSE_TAG_SYMLINK:
88326 +               /* FolderSymbolicLink */
88327 +               /* Can we use 'Rp->SymbolicLinkReparseBuffer.PrintNameLength'? */
88328 +               if (i_size <= offsetof(struct REPARSE_DATA_BUFFER,
88329 +                                      SymbolicLinkReparseBuffer.PathBuffer))
88330 +                       goto out;
88331 +               uni = Add2Ptr(rp,
88332 +                             offsetof(struct REPARSE_DATA_BUFFER,
88333 +                                      SymbolicLinkReparseBuffer.PathBuffer) +
88334 +                                     le16_to_cpu(rp->SymbolicLinkReparseBuffer
88335 +                                                         .PrintNameOffset) -
88336 +                                     2);
88337 +               nlen = le16_to_cpu(
88338 +                       rp->SymbolicLinkReparseBuffer.PrintNameLength);
88339 +               break;
88341 +       case IO_REPARSE_TAG_CLOUD:
88342 +       case IO_REPARSE_TAG_CLOUD_1:
88343 +       case IO_REPARSE_TAG_CLOUD_2:
88344 +       case IO_REPARSE_TAG_CLOUD_3:
88345 +       case IO_REPARSE_TAG_CLOUD_4:
88346 +       case IO_REPARSE_TAG_CLOUD_5:
88347 +       case IO_REPARSE_TAG_CLOUD_6:
88348 +       case IO_REPARSE_TAG_CLOUD_7:
88349 +       case IO_REPARSE_TAG_CLOUD_8:
88350 +       case IO_REPARSE_TAG_CLOUD_9:
88351 +       case IO_REPARSE_TAG_CLOUD_A:
88352 +       case IO_REPARSE_TAG_CLOUD_B:
88353 +       case IO_REPARSE_TAG_CLOUD_C:
88354 +       case IO_REPARSE_TAG_CLOUD_D:
88355 +       case IO_REPARSE_TAG_CLOUD_E:
88356 +       case IO_REPARSE_TAG_CLOUD_F:
88357 +               err = sizeof("OneDrive") - 1;
88358 +               if (err > buflen)
88359 +                       err = buflen;
88360 +               memcpy(buffer, "OneDrive", err);
88361 +               goto out;
88363 +       default:
88364 +               if (IsReparseTagMicrosoft(rp->ReparseTag)) {
88365 +                       /* unknown Microsoft Tag */
88366 +                       goto out;
88367 +               }
88368 +               if (!IsReparseTagNameSurrogate(rp->ReparseTag) ||
88369 +                   i_size <= sizeof(struct REPARSE_POINT)) {
88370 +                       goto out;
88371 +               }
88373 +               /* Users tag */
88374 +               uni = Add2Ptr(rp, sizeof(struct REPARSE_POINT) - 2);
88375 +               nlen = le16_to_cpu(rp->ReparseDataLength) -
88376 +                      sizeof(struct REPARSE_POINT);
88377 +       }
88379 +       /* Convert nlen from bytes to UNICODE chars */
88380 +       nlen >>= 1;
88382 +       /* Check that name is available */
88383 +       if (!nlen || &uni->name[nlen] > (__le16 *)Add2Ptr(rp, i_size))
88384 +               goto out;
88386 +       /* If name is already zero terminated then truncate it now */
88387 +       if (!uni->name[nlen - 1])
88388 +               nlen -= 1;
88389 +       uni->len = nlen;
88391 +       err = ntfs_utf16_to_nls(sbi, uni, buffer, buflen);
88393 +       if (err < 0)
88394 +               goto out;
88396 +       /* translate windows '\' into linux '/' */
88397 +       for (i = 0; i < err; i++) {
88398 +               if (buffer[i] == '\\')
88399 +                       buffer[i] = '/';
88400 +       }
88402 +       /* Always set last zero */
88403 +       buffer[err] = 0;
88404 +out:
88405 +       ntfs_free(to_free);
88406 +       return err;
88409 +static const char *ntfs_get_link(struct dentry *de, struct inode *inode,
88410 +                                struct delayed_call *done)
88412 +       int err;
88413 +       char *ret;
88415 +       if (!de)
88416 +               return ERR_PTR(-ECHILD);
88418 +       ret = kmalloc(PAGE_SIZE, GFP_NOFS);
88419 +       if (!ret)
88420 +               return ERR_PTR(-ENOMEM);
88422 +       err = ntfs_readlink_hlp(inode, ret, PAGE_SIZE);
88423 +       if (err < 0) {
88424 +               kfree(ret);
88425 +               return ERR_PTR(err);
88426 +       }
88428 +       set_delayed_call(done, kfree_link, ret);
88430 +       return ret;
88433 +const struct inode_operations ntfs_link_inode_operations = {
88434 +       .get_link = ntfs_get_link,
88435 +       .setattr = ntfs3_setattr,
88436 +       .listxattr = ntfs_listxattr,
88437 +       .permission = ntfs_permission,
88438 +       .get_acl = ntfs_get_acl,
88439 +       .set_acl = ntfs_set_acl,
88442 +const struct address_space_operations ntfs_aops = {
88443 +       .readpage = ntfs_readpage,
88444 +       .readahead = ntfs_readahead,
88445 +       .writepage = ntfs_writepage,
88446 +       .writepages = ntfs_writepages,
88447 +       .write_begin = ntfs_write_begin,
88448 +       .write_end = ntfs_write_end,
88449 +       .direct_IO = ntfs_direct_IO,
88450 +       .bmap = ntfs_bmap,
88453 +const struct address_space_operations ntfs_aops_cmpr = {
88454 +       .readpage = ntfs_readpage,
88455 +       .readahead = ntfs_readahead,
88457 diff --git a/fs/ntfs3/lib/decompress_common.c b/fs/ntfs3/lib/decompress_common.c
88458 new file mode 100644
88459 index 000000000000..83c9e93aea77
88460 --- /dev/null
88461 +++ b/fs/ntfs3/lib/decompress_common.c
88462 @@ -0,0 +1,332 @@
88463 +// SPDX-License-Identifier: GPL-2.0-or-later
88465 + * decompress_common.c - Code shared by the XPRESS and LZX decompressors
88466 + *
88467 + * Copyright (C) 2015 Eric Biggers
88468 + *
88469 + * This program is free software: you can redistribute it and/or modify it under
88470 + * the terms of the GNU General Public License as published by the Free Software
88471 + * Foundation, either version 2 of the License, or (at your option) any later
88472 + * version.
88473 + *
88474 + * This program is distributed in the hope that it will be useful, but WITHOUT
88475 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
88476 + * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
88477 + * details.
88478 + *
88479 + * You should have received a copy of the GNU General Public License along with
88480 + * this program.  If not, see <http://www.gnu.org/licenses/>.
88481 + */
88483 +#include "decompress_common.h"
88486 + * make_huffman_decode_table() -
88487 + *
88488 + * Build a decoding table for a canonical prefix code, or "Huffman code".
88489 + *
88490 + * This is an internal function, not part of the library API!
88491 + *
88492 + * This takes as input the length of the codeword for each symbol in the
88493 + * alphabet and produces as output a table that can be used for fast
88494 + * decoding of prefix-encoded symbols using read_huffsym().
88495 + *
88496 + * Strictly speaking, a canonical prefix code might not be a Huffman
88497 + * code.  But this algorithm will work either way; and in fact, since
88498 + * Huffman codes are defined in terms of symbol frequencies, there is no
88499 + * way for the decompressor to know whether the code is a true Huffman
88500 + * code or not until all symbols have been decoded.
88501 + *
88502 + * Because the prefix code is assumed to be "canonical", it can be
88503 + * reconstructed directly from the codeword lengths.  A prefix code is
88504 + * canonical if and only if a longer codeword never lexicographically
88505 + * precedes a shorter codeword, and the lexicographic ordering of
88506 + * codewords of the same length is the same as the lexicographic ordering
88507 + * of the corresponding symbols.  Consequently, we can sort the symbols
88508 + * primarily by codeword length and secondarily by symbol value, then
88509 + * reconstruct the prefix code by generating codewords lexicographically
88510 + * in that order.
88511 + *
88512 + * This function does not, however, generate the prefix code explicitly.
88513 + * Instead, it directly builds a table for decoding symbols using the
88514 + * code.  The basic idea is this: given the next 'max_codeword_len' bits
88515 + * in the input, we can look up the decoded symbol by indexing a table
88516 + * containing 2**max_codeword_len entries.  A codeword with length
88517 + * 'max_codeword_len' will have exactly one entry in this table, whereas
88518 + * a codeword shorter than 'max_codeword_len' will have multiple entries
88519 + * in this table.  Precisely, a codeword of length n will be represented
88520 + * by 2**(max_codeword_len - n) entries in this table.  The 0-based index
88521 + * of each such entry will contain the corresponding codeword as a prefix
88522 + * when zero-padded on the left to 'max_codeword_len' binary digits.
88523 + *
88524 + * That's the basic idea, but we implement two optimizations regarding
88525 + * the format of the decode table itself:
88526 + *
88527 + * - For many compression formats, the maximum codeword length is too
88528 + *   long for it to be efficient to build the full decoding table
88529 + *   whenever a new prefix code is used.  Instead, we can build the table
88530 + *   using only 2**table_bits entries, where 'table_bits' is some number
88531 + *   less than or equal to 'max_codeword_len'.  Then, only codewords of
88532 + *   length 'table_bits' and shorter can be directly looked up.  For
88533 + *   longer codewords, the direct lookup instead produces the root of a
88534 + *   binary tree.  Using this tree, the decoder can do traditional
88535 + *   bit-by-bit decoding of the remainder of the codeword.  Child nodes
88536 + *   are allocated in extra entries at the end of the table; leaf nodes
88537 + *   contain symbols.  Note that the long-codeword case is, in general,
88538 + *   not performance critical, since in Huffman codes the most frequently
88539 + *   used symbols are assigned the shortest codeword lengths.
88540 + *
88541 + * - When we decode a symbol using a direct lookup of the table, we still
88542 + *   need to know its length so that the bitstream can be advanced by the
88543 + *   appropriate number of bits.  The simple solution is to simply retain
88544 + *   the 'lens' array and use the decoded symbol as an index into it.
88545 + *   However, this requires two separate array accesses in the fast path.
88546 + *   The optimization is to store the length directly in the decode
88547 + *   table.  We use the bottom 11 bits for the symbol and the top 5 bits
88548 + *   for the length.  In addition, to combine this optimization with the
88549 + *   previous one, we introduce a special case where the top 2 bits of
88550 + *   the length are both set if the entry is actually the root of a
88551 + *   binary tree.
88552 + *
88553 + * @decode_table:
88554 + *     The array in which to create the decoding table.  This must have
88555 + *     a length of at least ((2**table_bits) + 2 * num_syms) entries.
88556 + *
88557 + * @num_syms:
88558 + *     The number of symbols in the alphabet; also, the length of the
88559 + *     'lens' array.  Must be less than or equal to 2048.
88560 + *
88561 + * @table_bits:
88562 + *     The order of the decode table size, as explained above.  Must be
88563 + *     less than or equal to 13.
88564 + *
88565 + * @lens:
88566 + *     An array of length @num_syms, indexable by symbol, that gives the
88567 + *     length of the codeword, in bits, for that symbol.  The length can
88568 + *     be 0, which means that the symbol does not have a codeword
88569 + *     assigned.
88570 + *
88571 + * @max_codeword_len:
88572 + *     The longest codeword length allowed in the compression format.
88573 + *     All entries in 'lens' must be less than or equal to this value.
88574 + *     This must be less than or equal to 23.
88575 + *
88576 + * @working_space
88577 + *     A temporary array of length '2 * (max_codeword_len + 1) +
88578 + *     num_syms'.
88579 + *
88580 + * Returns 0 on success, or -1 if the lengths do not form a valid prefix
88581 + * code.
88582 + */
88583 +int make_huffman_decode_table(u16 decode_table[], const u32 num_syms,
88584 +                             const u32 table_bits, const u8 lens[],
88585 +                             const u32 max_codeword_len,
88586 +                             u16 working_space[])
88588 +       const u32 table_num_entries = 1 << table_bits;
88589 +       u16 * const len_counts = &working_space[0];
88590 +       u16 * const offsets = &working_space[1 * (max_codeword_len + 1)];
88591 +       u16 * const sorted_syms = &working_space[2 * (max_codeword_len + 1)];
88592 +       int left;
88593 +       void *decode_table_ptr;
88594 +       u32 sym_idx;
88595 +       u32 codeword_len;
88596 +       u32 stores_per_loop;
88597 +       u32 decode_table_pos;
88598 +       u32 len;
88599 +       u32 sym;
88601 +       /* Count how many symbols have each possible codeword length.
88602 +        * Note that a length of 0 indicates the corresponding symbol is not
88603 +        * used in the code and therefore does not have a codeword.
88604 +        */
88605 +       for (len = 0; len <= max_codeword_len; len++)
88606 +               len_counts[len] = 0;
88607 +       for (sym = 0; sym < num_syms; sym++)
88608 +               len_counts[lens[sym]]++;
88610 +       /* We can assume all lengths are <= max_codeword_len, but we
88611 +        * cannot assume they form a valid prefix code.  A codeword of
88612 +        * length n should require a proportion of the codespace equaling
88613 +        * (1/2)^n.  The code is valid if and only if the codespace is
88614 +        * exactly filled by the lengths, by this measure.
88615 +        */
88616 +       left = 1;
88617 +       for (len = 1; len <= max_codeword_len; len++) {
88618 +               left <<= 1;
88619 +               left -= len_counts[len];
88620 +               if (left < 0) {
88621 +                       /* The lengths overflow the codespace; that is, the code
88622 +                        * is over-subscribed.
88623 +                        */
88624 +                       return -1;
88625 +               }
88626 +       }
88628 +       if (left) {
88629 +               /* The lengths do not fill the codespace; that is, they form an
88630 +                * incomplete set.
88631 +                */
88632 +               if (left == (1 << max_codeword_len)) {
88633 +                       /* The code is completely empty.  This is arguably
88634 +                        * invalid, but in fact it is valid in LZX and XPRESS,
88635 +                        * so we must allow it.  By definition, no symbols can
88636 +                        * be decoded with an empty code.  Consequently, we
88637 +                        * technically don't even need to fill in the decode
88638 +                        * table.  However, to avoid accessing uninitialized
88639 +                        * memory if the algorithm nevertheless attempts to
88640 +                        * decode symbols using such a code, we zero out the
88641 +                        * decode table.
88642 +                        */
88643 +                       memset(decode_table, 0,
88644 +                              table_num_entries * sizeof(decode_table[0]));
88645 +                       return 0;
88646 +               }
88647 +               return -1;
88648 +       }
88650 +       /* Sort the symbols primarily by length and secondarily by symbol order.
88651 +        */
88653 +       /* Initialize 'offsets' so that offsets[len] for 1 <= len <=
88654 +        * max_codeword_len is the number of codewords shorter than 'len' bits.
88655 +        */
88656 +       offsets[1] = 0;
88657 +       for (len = 1; len < max_codeword_len; len++)
88658 +               offsets[len + 1] = offsets[len] + len_counts[len];
88660 +       /* Use the 'offsets' array to sort the symbols.  Note that we do not
88661 +        * include symbols that are not used in the code.  Consequently, fewer
88662 +        * than 'num_syms' entries in 'sorted_syms' may be filled.
88663 +        */
88664 +       for (sym = 0; sym < num_syms; sym++)
88665 +               if (lens[sym])
88666 +                       sorted_syms[offsets[lens[sym]]++] = sym;
88668 +       /* Fill entries for codewords with length <= table_bits
88669 +        * --- that is, those short enough for a direct mapping.
88670 +        *
88671 +        * The table will start with entries for the shortest codeword(s), which
88672 +        * have the most entries.  From there, the number of entries per
88673 +        * codeword will decrease.
88674 +        */
88675 +       decode_table_ptr = decode_table;
88676 +       sym_idx = 0;
88677 +       codeword_len = 1;
88678 +       stores_per_loop = (1 << (table_bits - codeword_len));
88679 +       for (; stores_per_loop != 0; codeword_len++, stores_per_loop >>= 1) {
88680 +               u32 end_sym_idx = sym_idx + len_counts[codeword_len];
88682 +               for (; sym_idx < end_sym_idx; sym_idx++) {
88683 +                       u16 entry;
88684 +                       u16 *p;
88685 +                       u32 n;
88687 +                       entry = ((u32)codeword_len << 11) | sorted_syms[sym_idx];
88688 +                       p = (u16 *)decode_table_ptr;
88689 +                       n = stores_per_loop;
88691 +                       do {
88692 +                               *p++ = entry;
88693 +                       } while (--n);
88695 +                       decode_table_ptr = p;
88696 +               }
88697 +       }
88699 +       /* If we've filled in the entire table, we are done.  Otherwise,
88700 +        * there are codewords longer than table_bits for which we must
88701 +        * generate binary trees.
88702 +        */
88703 +       decode_table_pos = (u16 *)decode_table_ptr - decode_table;
88704 +       if (decode_table_pos != table_num_entries) {
88705 +               u32 j;
88706 +               u32 next_free_tree_slot;
88707 +               u32 cur_codeword;
88709 +               /* First, zero out the remaining entries.  This is
88710 +                * necessary so that these entries appear as
88711 +                * "unallocated" in the next part.  Each of these entries
88712 +                * will eventually be filled with the representation of
88713 +                * the root node of a binary tree.
88714 +                */
88715 +               j = decode_table_pos;
88716 +               do {
88717 +                       decode_table[j] = 0;
88718 +               } while (++j != table_num_entries);
88720 +               /* We allocate child nodes starting at the end of the
88721 +                * direct lookup table.  Note that there should be
88722 +                * 2*num_syms extra entries for this purpose, although
88723 +                * fewer than this may actually be needed.
88724 +                */
88725 +               next_free_tree_slot = table_num_entries;
88727 +               /* Iterate through each codeword with length greater than
88728 +                * 'table_bits', primarily in order of codeword length
88729 +                * and secondarily in order of symbol.
88730 +                */
88731 +               for (cur_codeword = decode_table_pos << 1;
88732 +                    codeword_len <= max_codeword_len;
88733 +                    codeword_len++, cur_codeword <<= 1) {
88734 +                       u32 end_sym_idx = sym_idx + len_counts[codeword_len];
88736 +                       for (; sym_idx < end_sym_idx; sym_idx++, cur_codeword++) {
88737 +                               /* 'sorted_sym' is the symbol represented by the
88738 +                                * codeword.
88739 +                                */
88740 +                               u32 sorted_sym = sorted_syms[sym_idx];
88741 +                               u32 extra_bits = codeword_len - table_bits;
88742 +                               u32 node_idx = cur_codeword >> extra_bits;
88744 +                               /* Go through each bit of the current codeword
88745 +                                * beyond the prefix of length @table_bits and
88746 +                                * walk the appropriate binary tree, allocating
88747 +                                * any slots that have not yet been allocated.
88748 +                                *
88749 +                                * Note that the 'pointer' entry to the binary
88750 +                                * tree, which is stored in the direct lookup
88751 +                                * portion of the table, is represented
88752 +                                * identically to other internal (non-leaf)
88753 +                                * nodes of the binary tree; it can be thought
88754 +                                * of as simply the root of the tree.  The
88755 +                                * representation of these internal nodes is
88756 +                                * simply the index of the left child combined
88757 +                                * with the special bits 0xC000 to distingush
88758 +                                * the entry from direct mapping and leaf node
88759 +                                * entries.
88760 +                                */
88761 +                               do {
88762 +                                       /* At least one bit remains in the
88763 +                                        * codeword, but the current node is an
88764 +                                        * unallocated leaf.  Change it to an
88765 +                                        * internal node.
88766 +                                        */
88767 +                                       if (decode_table[node_idx] == 0) {
88768 +                                               decode_table[node_idx] =
88769 +                                                       next_free_tree_slot | 0xC000;
88770 +                                               decode_table[next_free_tree_slot++] = 0;
88771 +                                               decode_table[next_free_tree_slot++] = 0;
88772 +                                       }
88774 +                                       /* Go to the left child if the next bit
88775 +                                        * in the codeword is 0; otherwise go to
88776 +                                        * the right child.
88777 +                                        */
88778 +                                       node_idx = decode_table[node_idx] & 0x3FFF;
88779 +                                       --extra_bits;
88780 +                                       node_idx += (cur_codeword >> extra_bits) & 1;
88781 +                               } while (extra_bits != 0);
88783 +                               /* We've traversed the tree using the entire
88784 +                                * codeword, and we're now at the entry where
88785 +                                * the actual symbol will be stored.  This is
88786 +                                * distinguished from internal nodes by not
88787 +                                * having its high two bits set.
88788 +                                */
88789 +                               decode_table[node_idx] = sorted_sym;
88790 +                       }
88791 +               }
88792 +       }
88793 +       return 0;
88795 diff --git a/fs/ntfs3/lib/decompress_common.h b/fs/ntfs3/lib/decompress_common.h
88796 new file mode 100644
88797 index 000000000000..66297f398403
88798 --- /dev/null
88799 +++ b/fs/ntfs3/lib/decompress_common.h
88800 @@ -0,0 +1,352 @@
88801 +/* SPDX-License-Identifier: GPL-2.0-or-later */
88804 + * decompress_common.h - Code shared by the XPRESS and LZX decompressors
88805 + *
88806 + * Copyright (C) 2015 Eric Biggers
88807 + *
88808 + * This program is free software: you can redistribute it and/or modify it under
88809 + * the terms of the GNU General Public License as published by the Free Software
88810 + * Foundation, either version 2 of the License, or (at your option) any later
88811 + * version.
88812 + *
88813 + * This program is distributed in the hope that it will be useful, but WITHOUT
88814 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
88815 + * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
88816 + * details.
88817 + *
88818 + * You should have received a copy of the GNU General Public License along with
88819 + * this program.  If not, see <http://www.gnu.org/licenses/>.
88820 + */
88822 +#include <linux/string.h>
88823 +#include <linux/compiler.h>
88824 +#include <linux/types.h>
88825 +#include <linux/slab.h>
88826 +#include <asm/unaligned.h>
88829 +/* "Force inline" macro (not required, but helpful for performance)  */
88830 +#define forceinline __always_inline
88832 +/* Enable whole-word match copying on selected architectures  */
88833 +#if defined(__i386__) || defined(__x86_64__) || defined(__ARM_FEATURE_UNALIGNED)
88834 +#  define FAST_UNALIGNED_ACCESS
88835 +#endif
88837 +/* Size of a machine word  */
88838 +#define WORDBYTES (sizeof(size_t))
88840 +static forceinline void
88841 +copy_unaligned_word(const void *src, void *dst)
88843 +       put_unaligned(get_unaligned((const size_t *)src), (size_t *)dst);
88847 +/* Generate a "word" with platform-dependent size whose bytes all contain the
88848 + * value 'b'.
88849 + */
88850 +static forceinline size_t repeat_byte(u8 b)
88852 +       size_t v;
88854 +       v = b;
88855 +       v |= v << 8;
88856 +       v |= v << 16;
88857 +       v |= v << ((WORDBYTES == 8) ? 32 : 0);
88858 +       return v;
88861 +/* Structure that encapsulates a block of in-memory data being interpreted as a
88862 + * stream of bits, optionally with interwoven literal bytes.  Bits are assumed
88863 + * to be stored in little endian 16-bit coding units, with the bits ordered high
88864 + * to low.
88865 + */
88866 +struct input_bitstream {
88868 +       /* Bits that have been read from the input buffer.  The bits are
88869 +        * left-justified; the next bit is always bit 31.
88870 +        */
88871 +       u32 bitbuf;
88873 +       /* Number of bits currently held in @bitbuf.  */
88874 +       u32 bitsleft;
88876 +       /* Pointer to the next byte to be retrieved from the input buffer.  */
88877 +       const u8 *next;
88879 +       /* Pointer to just past the end of the input buffer.  */
88880 +       const u8 *end;
88883 +/* Initialize a bitstream to read from the specified input buffer.  */
88884 +static forceinline void init_input_bitstream(struct input_bitstream *is,
88885 +                                            const void *buffer, u32 size)
88887 +       is->bitbuf = 0;
88888 +       is->bitsleft = 0;
88889 +       is->next = buffer;
88890 +       is->end = is->next + size;
88893 +/* Ensure the bit buffer variable for the bitstream contains at least @num_bits
88894 + * bits.  Following this, bitstream_peek_bits() and/or bitstream_remove_bits()
88895 + * may be called on the bitstream to peek or remove up to @num_bits bits.  Note
88896 + * that @num_bits must be <= 16.
88897 + */
88898 +static forceinline void bitstream_ensure_bits(struct input_bitstream *is,
88899 +                                             u32 num_bits)
88901 +       if (is->bitsleft < num_bits) {
88902 +               if (is->end - is->next >= 2) {
88903 +                       is->bitbuf |= (u32)get_unaligned_le16(is->next)
88904 +                                       << (16 - is->bitsleft);
88905 +                       is->next += 2;
88906 +               }
88907 +               is->bitsleft += 16;
88908 +       }
88911 +/* Return the next @num_bits bits from the bitstream, without removing them.
88912 + * There must be at least @num_bits remaining in the buffer variable, from a
88913 + * previous call to bitstream_ensure_bits().
88914 + */
88915 +static forceinline u32
88916 +bitstream_peek_bits(const struct input_bitstream *is, const u32 num_bits)
88918 +       return (is->bitbuf >> 1) >> (sizeof(is->bitbuf) * 8 - num_bits - 1);
88921 +/* Remove @num_bits from the bitstream.  There must be at least @num_bits
88922 + * remaining in the buffer variable, from a previous call to
88923 + * bitstream_ensure_bits().
88924 + */
88925 +static forceinline void
88926 +bitstream_remove_bits(struct input_bitstream *is, u32 num_bits)
88928 +       is->bitbuf <<= num_bits;
88929 +       is->bitsleft -= num_bits;
88932 +/* Remove and return @num_bits bits from the bitstream.  There must be at least
88933 + * @num_bits remaining in the buffer variable, from a previous call to
88934 + * bitstream_ensure_bits().
88935 + */
88936 +static forceinline u32
88937 +bitstream_pop_bits(struct input_bitstream *is, u32 num_bits)
88939 +       u32 bits = bitstream_peek_bits(is, num_bits);
88941 +       bitstream_remove_bits(is, num_bits);
88942 +       return bits;
88945 +/* Read and return the next @num_bits bits from the bitstream.  */
88946 +static forceinline u32
88947 +bitstream_read_bits(struct input_bitstream *is, u32 num_bits)
88949 +       bitstream_ensure_bits(is, num_bits);
88950 +       return bitstream_pop_bits(is, num_bits);
88953 +/* Read and return the next literal byte embedded in the bitstream.  */
88954 +static forceinline u8
88955 +bitstream_read_byte(struct input_bitstream *is)
88957 +       if (unlikely(is->end == is->next))
88958 +               return 0;
88959 +       return *is->next++;
88962 +/* Read and return the next 16-bit integer embedded in the bitstream.  */
88963 +static forceinline u16
88964 +bitstream_read_u16(struct input_bitstream *is)
88966 +       u16 v;
88968 +       if (unlikely(is->end - is->next < 2))
88969 +               return 0;
88970 +       v = get_unaligned_le16(is->next);
88971 +       is->next += 2;
88972 +       return v;
88975 +/* Read and return the next 32-bit integer embedded in the bitstream.  */
88976 +static forceinline u32
88977 +bitstream_read_u32(struct input_bitstream *is)
88979 +       u32 v;
88981 +       if (unlikely(is->end - is->next < 4))
88982 +               return 0;
88983 +       v = get_unaligned_le32(is->next);
88984 +       is->next += 4;
88985 +       return v;
88988 +/* Read into @dst_buffer an array of literal bytes embedded in the bitstream.
88989 + * Return either a pointer to the byte past the last written, or NULL if the
88990 + * read overflows the input buffer.
88991 + */
88992 +static forceinline void *bitstream_read_bytes(struct input_bitstream *is,
88993 +                                             void *dst_buffer, size_t count)
88995 +       if ((size_t)(is->end - is->next) < count)
88996 +               return NULL;
88997 +       memcpy(dst_buffer, is->next, count);
88998 +       is->next += count;
88999 +       return (u8 *)dst_buffer + count;
89002 +/* Align the input bitstream on a coding-unit boundary.  */
89003 +static forceinline void bitstream_align(struct input_bitstream *is)
89005 +       is->bitsleft = 0;
89006 +       is->bitbuf = 0;
89009 +extern int make_huffman_decode_table(u16 decode_table[], const u32 num_syms,
89010 +                                    const u32 num_bits, const u8 lens[],
89011 +                                    const u32 max_codeword_len,
89012 +                                    u16 working_space[]);
89015 +/* Reads and returns the next Huffman-encoded symbol from a bitstream.  If the
89016 + * input data is exhausted, the Huffman symbol is decoded as if the missing bits
89017 + * are all zeroes.
89018 + */
89019 +static forceinline u32 read_huffsym(struct input_bitstream *istream,
89020 +                                        const u16 decode_table[],
89021 +                                        u32 table_bits,
89022 +                                        u32 max_codeword_len)
89024 +       u32 entry;
89025 +       u32 key_bits;
89027 +       bitstream_ensure_bits(istream, max_codeword_len);
89029 +       /* Index the decode table by the next table_bits bits of the input.  */
89030 +       key_bits = bitstream_peek_bits(istream, table_bits);
89031 +       entry = decode_table[key_bits];
89032 +       if (entry < 0xC000) {
89033 +               /* Fast case: The decode table directly provided the
89034 +                * symbol and codeword length.  The low 11 bits are the
89035 +                * symbol, and the high 5 bits are the codeword length.
89036 +                */
89037 +               bitstream_remove_bits(istream, entry >> 11);
89038 +               return entry & 0x7FF;
89039 +       }
89040 +       /* Slow case: The codeword for the symbol is longer than
89041 +        * table_bits, so the symbol does not have an entry
89042 +        * directly in the first (1 << table_bits) entries of the
89043 +        * decode table.  Traverse the appropriate binary tree
89044 +        * bit-by-bit to decode the symbol.
89045 +        */
89046 +       bitstream_remove_bits(istream, table_bits);
89047 +       do {
89048 +               key_bits = (entry & 0x3FFF) + bitstream_pop_bits(istream, 1);
89049 +       } while ((entry = decode_table[key_bits]) >= 0xC000);
89050 +       return entry;
89054 + * Copy an LZ77 match at (dst - offset) to dst.
89055 + *
89056 + * The length and offset must be already validated --- that is, (dst - offset)
89057 + * can't underrun the output buffer, and (dst + length) can't overrun the output
89058 + * buffer.  Also, the length cannot be 0.
89059 + *
89060 + * @bufend points to the byte past the end of the output buffer.  This function
89061 + * won't write any data beyond this position.
89062 + *
89063 + * Returns dst + length.
89064 + */
89065 +static forceinline u8 *lz_copy(u8 *dst, u32 length, u32 offset, const u8 *bufend,
89066 +                              u32 min_length)
89068 +       const u8 *src = dst - offset;
89070 +       /*
89071 +        * Try to copy one machine word at a time.  On i386 and x86_64 this is
89072 +        * faster than copying one byte at a time, unless the data is
89073 +        * near-random and all the matches have very short lengths.  Note that
89074 +        * since this requires unaligned memory accesses, it won't necessarily
89075 +        * be faster on every architecture.
89076 +        *
89077 +        * Also note that we might copy more than the length of the match.  For
89078 +        * example, if a word is 8 bytes and the match is of length 5, then
89079 +        * we'll simply copy 8 bytes.  This is okay as long as we don't write
89080 +        * beyond the end of the output buffer, hence the check for (bufend -
89081 +        * end >= WORDBYTES - 1).
89082 +        */
89083 +#ifdef FAST_UNALIGNED_ACCESS
89084 +       u8 * const end = dst + length;
89086 +       if (bufend - end >= (ptrdiff_t)(WORDBYTES - 1)) {
89088 +               if (offset >= WORDBYTES) {
89089 +                       /* The source and destination words don't overlap.  */
89091 +                       /* To improve branch prediction, one iteration of this
89092 +                        * loop is unrolled.  Most matches are short and will
89093 +                        * fail the first check.  But if that check passes, then
89094 +                        * it becomes increasing likely that the match is long
89095 +                        * and we'll need to continue copying.
89096 +                        */
89098 +                       copy_unaligned_word(src, dst);
89099 +                       src += WORDBYTES;
89100 +                       dst += WORDBYTES;
89102 +                       if (dst < end) {
89103 +                               do {
89104 +                                       copy_unaligned_word(src, dst);
89105 +                                       src += WORDBYTES;
89106 +                                       dst += WORDBYTES;
89107 +                               } while (dst < end);
89108 +                       }
89109 +                       return end;
89110 +               } else if (offset == 1) {
89112 +                       /* Offset 1 matches are equivalent to run-length
89113 +                        * encoding of the previous byte.  This case is common
89114 +                        * if the data contains many repeated bytes.
89115 +                        */
89116 +                       size_t v = repeat_byte(*(dst - 1));
89118 +                       do {
89119 +                               put_unaligned(v, (size_t *)dst);
89120 +                               src += WORDBYTES;
89121 +                               dst += WORDBYTES;
89122 +                       } while (dst < end);
89123 +                       return end;
89124 +               }
89125 +               /*
89126 +                * We don't bother with special cases for other 'offset <
89127 +                * WORDBYTES', which are usually rarer than 'offset == 1'.  Extra
89128 +                * checks will just slow things down.  Actually, it's possible
89129 +                * to handle all the 'offset < WORDBYTES' cases using the same
89130 +                * code, but it still becomes more complicated doesn't seem any
89131 +                * faster overall; it definitely slows down the more common
89132 +                * 'offset == 1' case.
89133 +                */
89134 +       }
89135 +#endif /* FAST_UNALIGNED_ACCESS */
89137 +       /* Fall back to a bytewise copy.  */
89139 +       if (min_length >= 2) {
89140 +               *dst++ = *src++;
89141 +               length--;
89142 +       }
89143 +       if (min_length >= 3) {
89144 +               *dst++ = *src++;
89145 +               length--;
89146 +       }
89147 +       do {
89148 +               *dst++ = *src++;
89149 +       } while (--length);
89151 +       return dst;
89153 diff --git a/fs/ntfs3/lib/lib.h b/fs/ntfs3/lib/lib.h
89154 new file mode 100644
89155 index 000000000000..f508fbad2e71
89156 --- /dev/null
89157 +++ b/fs/ntfs3/lib/lib.h
89158 @@ -0,0 +1,26 @@
89159 +/* SPDX-License-Identifier: GPL-2.0-or-later */
89161 + * Adapted for linux kernel by Alexander Mamaev:
89162 + * - remove implementations of get_unaligned_
89163 + * - assume GCC is always defined
89164 + * - ISO C90
89165 + * - linux kernel code style
89166 + */
89169 +/* globals from xpress_decompress.c */
89170 +struct xpress_decompressor *xpress_allocate_decompressor(void);
89171 +void xpress_free_decompressor(struct xpress_decompressor *d);
89172 +int xpress_decompress(struct xpress_decompressor *__restrict d,
89173 +                     const void *__restrict compressed_data,
89174 +                     size_t compressed_size,
89175 +                     void *__restrict uncompressed_data,
89176 +                     size_t uncompressed_size);
89178 +/* globals from lzx_decompress.c */
89179 +struct lzx_decompressor *lzx_allocate_decompressor(void);
89180 +void lzx_free_decompressor(struct lzx_decompressor *d);
89181 +int lzx_decompress(struct lzx_decompressor *__restrict d,
89182 +                  const void *__restrict compressed_data,
89183 +                  size_t compressed_size, void *__restrict uncompressed_data,
89184 +                  size_t uncompressed_size);
89185 diff --git a/fs/ntfs3/lib/lzx_decompress.c b/fs/ntfs3/lib/lzx_decompress.c
89186 new file mode 100644
89187 index 000000000000..77a381a693d1
89188 --- /dev/null
89189 +++ b/fs/ntfs3/lib/lzx_decompress.c
89190 @@ -0,0 +1,683 @@
89191 +// SPDX-License-Identifier: GPL-2.0-or-later
89193 + * lzx_decompress.c - A decompressor for the LZX compression format, which can
89194 + * be used in "System Compressed" files.  This is based on the code from wimlib.
89195 + * This code only supports a window size (dictionary size) of 32768 bytes, since
89196 + * this is the only size used in System Compression.
89197 + *
89198 + * Copyright (C) 2015 Eric Biggers
89199 + *
89200 + * This program is free software: you can redistribute it and/or modify it under
89201 + * the terms of the GNU General Public License as published by the Free Software
89202 + * Foundation, either version 2 of the License, or (at your option) any later
89203 + * version.
89204 + *
89205 + * This program is distributed in the hope that it will be useful, but WITHOUT
89206 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
89207 + * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
89208 + * details.
89209 + *
89210 + * You should have received a copy of the GNU General Public License along with
89211 + * this program.  If not, see <http://www.gnu.org/licenses/>.
89212 + */
89214 +#include "decompress_common.h"
89215 +#include "lib.h"
89217 +/* Number of literal byte values  */
89218 +#define LZX_NUM_CHARS                  256
89220 +/* The smallest and largest allowed match lengths  */
89221 +#define LZX_MIN_MATCH_LEN              2
89222 +#define LZX_MAX_MATCH_LEN              257
89224 +/* Number of distinct match lengths that can be represented  */
89225 +#define LZX_NUM_LENS                   (LZX_MAX_MATCH_LEN - LZX_MIN_MATCH_LEN + 1)
89227 +/* Number of match lengths for which no length symbol is required  */
89228 +#define LZX_NUM_PRIMARY_LENS           7
89229 +#define LZX_NUM_LEN_HEADERS            (LZX_NUM_PRIMARY_LENS + 1)
89231 +/* Valid values of the 3-bit block type field  */
89232 +#define LZX_BLOCKTYPE_VERBATIM         1
89233 +#define LZX_BLOCKTYPE_ALIGNED          2
89234 +#define LZX_BLOCKTYPE_UNCOMPRESSED     3
89236 +/* Number of offset slots for a window size of 32768  */
89237 +#define LZX_NUM_OFFSET_SLOTS           30
89239 +/* Number of symbols in the main code for a window size of 32768  */
89240 +#define LZX_MAINCODE_NUM_SYMBOLS       \
89241 +       (LZX_NUM_CHARS + (LZX_NUM_OFFSET_SLOTS * LZX_NUM_LEN_HEADERS))
89243 +/* Number of symbols in the length code  */
89244 +#define LZX_LENCODE_NUM_SYMBOLS                (LZX_NUM_LENS - LZX_NUM_PRIMARY_LENS)
89246 +/* Number of symbols in the precode  */
89247 +#define LZX_PRECODE_NUM_SYMBOLS                20
89249 +/* Number of bits in which each precode codeword length is represented  */
89250 +#define LZX_PRECODE_ELEMENT_SIZE       4
89252 +/* Number of low-order bits of each match offset that are entropy-encoded in
89253 + * aligned offset blocks
89254 + */
89255 +#define LZX_NUM_ALIGNED_OFFSET_BITS    3
89257 +/* Number of symbols in the aligned offset code  */
89258 +#define LZX_ALIGNEDCODE_NUM_SYMBOLS    (1 << LZX_NUM_ALIGNED_OFFSET_BITS)
89260 +/* Mask for the match offset bits that are entropy-encoded in aligned offset
89261 + * blocks
89262 + */
89263 +#define LZX_ALIGNED_OFFSET_BITMASK     ((1 << LZX_NUM_ALIGNED_OFFSET_BITS) - 1)
89265 +/* Number of bits in which each aligned offset codeword length is represented  */
89266 +#define LZX_ALIGNEDCODE_ELEMENT_SIZE   3
89268 +/* Maximum lengths (in bits) of the codewords in each Huffman code  */
89269 +#define LZX_MAX_MAIN_CODEWORD_LEN      16
89270 +#define LZX_MAX_LEN_CODEWORD_LEN       16
89271 +#define LZX_MAX_PRE_CODEWORD_LEN       ((1 << LZX_PRECODE_ELEMENT_SIZE) - 1)
89272 +#define LZX_MAX_ALIGNED_CODEWORD_LEN   ((1 << LZX_ALIGNEDCODE_ELEMENT_SIZE) - 1)
89274 +/* The default "filesize" value used in pre/post-processing.  In the LZX format
89275 + * used in cabinet files this value must be given to the decompressor, whereas
89276 + * in the LZX format used in WIM files and system-compressed files this value is
89277 + * fixed at 12000000.
89278 + */
89279 +#define LZX_DEFAULT_FILESIZE           12000000
89281 +/* Assumed block size when the encoded block size begins with a 0 bit.  */
89282 +#define LZX_DEFAULT_BLOCK_SIZE         32768
89284 +/* Number of offsets in the recent (or "repeat") offsets queue.  */
89285 +#define LZX_NUM_RECENT_OFFSETS         3
89287 +/* These values are chosen for fast decompression.  */
89288 +#define LZX_MAINCODE_TABLEBITS         11
89289 +#define LZX_LENCODE_TABLEBITS          10
89290 +#define LZX_PRECODE_TABLEBITS          6
89291 +#define LZX_ALIGNEDCODE_TABLEBITS      7
89293 +#define LZX_READ_LENS_MAX_OVERRUN      50
89295 +/* Mapping: offset slot => first match offset that uses that offset slot.
89296 + */
89297 +static const u32 lzx_offset_slot_base[LZX_NUM_OFFSET_SLOTS + 1] = {
89298 +       0,      1,      2,      3,      4,      /* 0  --- 4  */
89299 +       6,      8,      12,     16,     24,     /* 5  --- 9  */
89300 +       32,     48,     64,     96,     128,    /* 10 --- 14 */
89301 +       192,    256,    384,    512,    768,    /* 15 --- 19 */
89302 +       1024,   1536,   2048,   3072,   4096,   /* 20 --- 24 */
89303 +       6144,   8192,   12288,  16384,  24576,  /* 25 --- 29 */
89304 +       32768,                                  /* extra     */
89307 +/* Mapping: offset slot => how many extra bits must be read and added to the
89308 + * corresponding offset slot base to decode the match offset.
89309 + */
89310 +static const u8 lzx_extra_offset_bits[LZX_NUM_OFFSET_SLOTS] = {
89311 +       0,      0,      0,      0,      1,
89312 +       1,      2,      2,      3,      3,
89313 +       4,      4,      5,      5,      6,
89314 +       6,      7,      7,      8,      8,
89315 +       9,      9,      10,     10,     11,
89316 +       11,     12,     12,     13,     13,
89319 +/* Reusable heap-allocated memory for LZX decompression  */
89320 +struct lzx_decompressor {
89322 +       /* Huffman decoding tables, and arrays that map symbols to codeword
89323 +        * lengths
89324 +        */
89326 +       u16 maincode_decode_table[(1 << LZX_MAINCODE_TABLEBITS) +
89327 +                                       (LZX_MAINCODE_NUM_SYMBOLS * 2)];
89328 +       u8 maincode_lens[LZX_MAINCODE_NUM_SYMBOLS + LZX_READ_LENS_MAX_OVERRUN];
89331 +       u16 lencode_decode_table[(1 << LZX_LENCODE_TABLEBITS) +
89332 +                                       (LZX_LENCODE_NUM_SYMBOLS * 2)];
89333 +       u8 lencode_lens[LZX_LENCODE_NUM_SYMBOLS + LZX_READ_LENS_MAX_OVERRUN];
89336 +       u16 alignedcode_decode_table[(1 << LZX_ALIGNEDCODE_TABLEBITS) +
89337 +                                       (LZX_ALIGNEDCODE_NUM_SYMBOLS * 2)];
89338 +       u8 alignedcode_lens[LZX_ALIGNEDCODE_NUM_SYMBOLS];
89340 +       u16 precode_decode_table[(1 << LZX_PRECODE_TABLEBITS) +
89341 +                                (LZX_PRECODE_NUM_SYMBOLS * 2)];
89342 +       u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
89344 +       /* Temporary space for make_huffman_decode_table()  */
89345 +       u16 working_space[2 * (1 + LZX_MAX_MAIN_CODEWORD_LEN) +
89346 +                         LZX_MAINCODE_NUM_SYMBOLS];
89349 +static void undo_e8_translation(void *target, s32 input_pos)
89351 +       s32 abs_offset, rel_offset;
89353 +       abs_offset = get_unaligned_le32(target);
89354 +       if (abs_offset >= 0) {
89355 +               if (abs_offset < LZX_DEFAULT_FILESIZE) {
89356 +                       /* "good translation" */
89357 +                       rel_offset = abs_offset - input_pos;
89358 +                       put_unaligned_le32(rel_offset, target);
89359 +               }
89360 +       } else {
89361 +               if (abs_offset >= -input_pos) {
89362 +                       /* "compensating translation" */
89363 +                       rel_offset = abs_offset + LZX_DEFAULT_FILESIZE;
89364 +                       put_unaligned_le32(rel_offset, target);
89365 +               }
89366 +       }
89370 + * Undo the 'E8' preprocessing used in LZX.  Before compression, the
89371 + * uncompressed data was preprocessed by changing the targets of suspected x86
89372 + * CALL instructions from relative offsets to absolute offsets.  After
89373 + * match/literal decoding, the decompressor must undo the translation.
89374 + */
89375 +static void lzx_postprocess(u8 *data, u32 size)
89377 +       /*
89378 +        * A worthwhile optimization is to push the end-of-buffer check into the
89379 +        * relatively rare E8 case.  This is possible if we replace the last six
89380 +        * bytes of data with E8 bytes; then we are guaranteed to hit an E8 byte
89381 +        * before reaching end-of-buffer.  In addition, this scheme guarantees
89382 +        * that no translation can begin following an E8 byte in the last 10
89383 +        * bytes because a 4-byte offset containing E8 as its high byte is a
89384 +        * large negative number that is not valid for translation.  That is
89385 +        * exactly what we need.
89386 +        */
89387 +       u8 *tail;
89388 +       u8 saved_bytes[6];
89389 +       u8 *p;
89391 +       if (size <= 10)
89392 +               return;
89394 +       tail = &data[size - 6];
89395 +       memcpy(saved_bytes, tail, 6);
89396 +       memset(tail, 0xE8, 6);
89397 +       p = data;
89398 +       for (;;) {
89399 +               while (*p != 0xE8)
89400 +                       p++;
89401 +               if (p >= tail)
89402 +                       break;
89403 +               undo_e8_translation(p + 1, p - data);
89404 +               p += 5;
89405 +       }
89406 +       memcpy(tail, saved_bytes, 6);
89409 +/* Read a Huffman-encoded symbol using the precode.  */
89410 +static forceinline u32 read_presym(const struct lzx_decompressor *d,
89411 +                                       struct input_bitstream *is)
89413 +       return read_huffsym(is, d->precode_decode_table,
89414 +                           LZX_PRECODE_TABLEBITS, LZX_MAX_PRE_CODEWORD_LEN);
89417 +/* Read a Huffman-encoded symbol using the main code.  */
89418 +static forceinline u32 read_mainsym(const struct lzx_decompressor *d,
89419 +                                        struct input_bitstream *is)
89421 +       return read_huffsym(is, d->maincode_decode_table,
89422 +                           LZX_MAINCODE_TABLEBITS, LZX_MAX_MAIN_CODEWORD_LEN);
89425 +/* Read a Huffman-encoded symbol using the length code.  */
89426 +static forceinline u32 read_lensym(const struct lzx_decompressor *d,
89427 +                                       struct input_bitstream *is)
89429 +       return read_huffsym(is, d->lencode_decode_table,
89430 +                           LZX_LENCODE_TABLEBITS, LZX_MAX_LEN_CODEWORD_LEN);
89433 +/* Read a Huffman-encoded symbol using the aligned offset code.  */
89434 +static forceinline u32 read_alignedsym(const struct lzx_decompressor *d,
89435 +                                           struct input_bitstream *is)
89437 +       return read_huffsym(is, d->alignedcode_decode_table,
89438 +                           LZX_ALIGNEDCODE_TABLEBITS,
89439 +                           LZX_MAX_ALIGNED_CODEWORD_LEN);
89443 + * Read the precode from the compressed input bitstream, then use it to decode
89444 + * @num_lens codeword length values.
89445 + *
89446 + * @is:                The input bitstream.
89447 + *
89448 + * @lens:      An array that contains the length values from the previous time
89449 + *             the codeword lengths for this Huffman code were read, or all 0's
89450 + *             if this is the first time.  This array must have at least
89451 + *             (@num_lens + LZX_READ_LENS_MAX_OVERRUN) entries.
89452 + *
89453 + * @num_lens:  Number of length values to decode.
89454 + *
89455 + * Returns 0 on success, or -1 if the data was invalid.
89456 + */
89457 +static int lzx_read_codeword_lens(struct lzx_decompressor *d,
89458 +                                 struct input_bitstream *is,
89459 +                                 u8 *lens, u32 num_lens)
89461 +       u8 *len_ptr = lens;
89462 +       u8 *lens_end = lens + num_lens;
89463 +       int i;
89465 +       /* Read the lengths of the precode codewords.  These are given
89466 +        * explicitly.
89467 +        */
89468 +       for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++) {
89469 +               d->precode_lens[i] =
89470 +                       bitstream_read_bits(is, LZX_PRECODE_ELEMENT_SIZE);
89471 +       }
89473 +       /* Make the decoding table for the precode.  */
89474 +       if (make_huffman_decode_table(d->precode_decode_table,
89475 +                                     LZX_PRECODE_NUM_SYMBOLS,
89476 +                                     LZX_PRECODE_TABLEBITS,
89477 +                                     d->precode_lens,
89478 +                                     LZX_MAX_PRE_CODEWORD_LEN,
89479 +                                     d->working_space))
89480 +               return -1;
89482 +       /* Decode the codeword lengths.  */
89483 +       do {
89484 +               u32 presym;
89485 +               u8 len;
89487 +               /* Read the next precode symbol.  */
89488 +               presym = read_presym(d, is);
89489 +               if (presym < 17) {
89490 +                       /* Difference from old length  */
89491 +                       len = *len_ptr - presym;
89492 +                       if ((s8)len < 0)
89493 +                               len += 17;
89494 +                       *len_ptr++ = len;
89495 +               } else {
89496 +                       /* Special RLE values  */
89498 +                       u32 run_len;
89500 +                       if (presym == 17) {
89501 +                               /* Run of 0's  */
89502 +                               run_len = 4 + bitstream_read_bits(is, 4);
89503 +                               len = 0;
89504 +                       } else if (presym == 18) {
89505 +                               /* Longer run of 0's  */
89506 +                               run_len = 20 + bitstream_read_bits(is, 5);
89507 +                               len = 0;
89508 +                       } else {
89509 +                               /* Run of identical lengths  */
89510 +                               run_len = 4 + bitstream_read_bits(is, 1);
89511 +                               presym = read_presym(d, is);
89512 +                               if (presym > 17)
89513 +                                       return -1;
89514 +                               len = *len_ptr - presym;
89515 +                               if ((s8)len < 0)
89516 +                                       len += 17;
89517 +                       }
89519 +                       do {
89520 +                               *len_ptr++ = len;
89521 +                       } while (--run_len);
89522 +                       /* Worst case overrun is when presym == 18,
89523 +                        * run_len == 20 + 31, and only 1 length was remaining.
89524 +                        * So LZX_READ_LENS_MAX_OVERRUN == 50.
89525 +                        *
89526 +                        * Overrun while reading the first half of maincode_lens
89527 +                        * can corrupt the previous values in the second half.
89528 +                        * This doesn't really matter because the resulting
89529 +                        * lengths will still be in range, and data that
89530 +                        * generates overruns is invalid anyway.
89531 +                        */
89532 +               }
89533 +       } while (len_ptr < lens_end);
89535 +       return 0;
89539 + * Read the header of an LZX block and save the block type and (uncompressed)
89540 + * size in *block_type_ret and *block_size_ret, respectively.
89541 + *
89542 + * If the block is compressed, also update the Huffman decode @tables with the
89543 + * new Huffman codes.  If the block is uncompressed, also update the match
89544 + * offset @queue with the new match offsets.
89545 + *
89546 + * Return 0 on success, or -1 if the data was invalid.
89547 + */
89548 +static int lzx_read_block_header(struct lzx_decompressor *d,
89549 +                                struct input_bitstream *is,
89550 +                                int *block_type_ret,
89551 +                                u32 *block_size_ret,
89552 +                                u32 recent_offsets[])
89554 +       int block_type;
89555 +       u32 block_size;
89556 +       int i;
89558 +       bitstream_ensure_bits(is, 4);
89560 +       /* The first three bits tell us what kind of block it is, and should be
89561 +        * one of the LZX_BLOCKTYPE_* values.
89562 +        */
89563 +       block_type = bitstream_pop_bits(is, 3);
89565 +       /* Read the block size.  */
89566 +       if (bitstream_pop_bits(is, 1)) {
89567 +               block_size = LZX_DEFAULT_BLOCK_SIZE;
89568 +       } else {
89569 +               block_size = 0;
89570 +               block_size |= bitstream_read_bits(is, 8);
89571 +               block_size <<= 8;
89572 +               block_size |= bitstream_read_bits(is, 8);
89573 +       }
89575 +       switch (block_type) {
89577 +       case LZX_BLOCKTYPE_ALIGNED:
89579 +               /* Read the aligned offset code and prepare its decode table.
89580 +                */
89582 +               for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
89583 +                       d->alignedcode_lens[i] =
89584 +                               bitstream_read_bits(is,
89585 +                                                   LZX_ALIGNEDCODE_ELEMENT_SIZE);
89586 +               }
89588 +               if (make_huffman_decode_table(d->alignedcode_decode_table,
89589 +                                             LZX_ALIGNEDCODE_NUM_SYMBOLS,
89590 +                                             LZX_ALIGNEDCODE_TABLEBITS,
89591 +                                             d->alignedcode_lens,
89592 +                                             LZX_MAX_ALIGNED_CODEWORD_LEN,
89593 +                                             d->working_space))
89594 +                       return -1;
89596 +               /* Fall though, since the rest of the header for aligned offset
89597 +                * blocks is the same as that for verbatim blocks.
89598 +                */
89599 +               fallthrough;
89601 +       case LZX_BLOCKTYPE_VERBATIM:
89603 +               /* Read the main code and prepare its decode table.
89604 +                *
89605 +                * Note that the codeword lengths in the main code are encoded
89606 +                * in two parts: one part for literal symbols, and one part for
89607 +                * match symbols.
89608 +                */
89610 +               if (lzx_read_codeword_lens(d, is, d->maincode_lens,
89611 +                                          LZX_NUM_CHARS))
89612 +                       return -1;
89614 +               if (lzx_read_codeword_lens(d, is,
89615 +                                          d->maincode_lens + LZX_NUM_CHARS,
89616 +                                          LZX_MAINCODE_NUM_SYMBOLS - LZX_NUM_CHARS))
89617 +                       return -1;
89619 +               if (make_huffman_decode_table(d->maincode_decode_table,
89620 +                                             LZX_MAINCODE_NUM_SYMBOLS,
89621 +                                             LZX_MAINCODE_TABLEBITS,
89622 +                                             d->maincode_lens,
89623 +                                             LZX_MAX_MAIN_CODEWORD_LEN,
89624 +                                             d->working_space))
89625 +                       return -1;
89627 +               /* Read the length code and prepare its decode table.  */
89629 +               if (lzx_read_codeword_lens(d, is, d->lencode_lens,
89630 +                                          LZX_LENCODE_NUM_SYMBOLS))
89631 +                       return -1;
89633 +               if (make_huffman_decode_table(d->lencode_decode_table,
89634 +                                             LZX_LENCODE_NUM_SYMBOLS,
89635 +                                             LZX_LENCODE_TABLEBITS,
89636 +                                             d->lencode_lens,
89637 +                                             LZX_MAX_LEN_CODEWORD_LEN,
89638 +                                             d->working_space))
89639 +                       return -1;
89641 +               break;
89643 +       case LZX_BLOCKTYPE_UNCOMPRESSED:
89645 +               /* Before reading the three recent offsets from the uncompressed
89646 +                * block header, the stream must be aligned on a 16-bit
89647 +                * boundary.  But if the stream is *already* aligned, then the
89648 +                * next 16 bits must be discarded.
89649 +                */
89650 +               bitstream_ensure_bits(is, 1);
89651 +               bitstream_align(is);
89653 +               recent_offsets[0] = bitstream_read_u32(is);
89654 +               recent_offsets[1] = bitstream_read_u32(is);
89655 +               recent_offsets[2] = bitstream_read_u32(is);
89657 +               /* Offsets of 0 are invalid.  */
89658 +               if (recent_offsets[0] == 0 || recent_offsets[1] == 0 ||
89659 +                   recent_offsets[2] == 0)
89660 +                       return -1;
89661 +               break;
89663 +       default:
89664 +               /* Unrecognized block type.  */
89665 +               return -1;
89666 +       }
89668 +       *block_type_ret = block_type;
89669 +       *block_size_ret = block_size;
89670 +       return 0;
89673 +/* Decompress a block of LZX-compressed data.  */
89674 +static int lzx_decompress_block(const struct lzx_decompressor *d,
89675 +                               struct input_bitstream *is,
89676 +                               int block_type, u32 block_size,
89677 +                               u8 * const out_begin, u8 *out_next,
89678 +                               u32 recent_offsets[])
89680 +       u8 * const block_end = out_next + block_size;
89681 +       u32 ones_if_aligned = 0U - (block_type == LZX_BLOCKTYPE_ALIGNED);
89683 +       do {
89684 +               u32 mainsym;
89685 +               u32 match_len;
89686 +               u32 match_offset;
89687 +               u32 offset_slot;
89688 +               u32 num_extra_bits;
89690 +               mainsym = read_mainsym(d, is);
89691 +               if (mainsym < LZX_NUM_CHARS) {
89692 +                       /* Literal  */
89693 +                       *out_next++ = mainsym;
89694 +                       continue;
89695 +               }
89697 +               /* Match  */
89699 +               /* Decode the length header and offset slot.  */
89700 +               mainsym -= LZX_NUM_CHARS;
89701 +               match_len = mainsym % LZX_NUM_LEN_HEADERS;
89702 +               offset_slot = mainsym / LZX_NUM_LEN_HEADERS;
89704 +               /* If needed, read a length symbol to decode the full length. */
89705 +               if (match_len == LZX_NUM_PRIMARY_LENS)
89706 +                       match_len += read_lensym(d, is);
89707 +               match_len += LZX_MIN_MATCH_LEN;
89709 +               if (offset_slot < LZX_NUM_RECENT_OFFSETS) {
89710 +                       /* Repeat offset  */
89712 +                       /* Note: This isn't a real LRU queue, since using the R2
89713 +                        * offset doesn't bump the R1 offset down to R2.  This
89714 +                        * quirk allows all 3 recent offsets to be handled by
89715 +                        * the same code.  (For R0, the swap is a no-op.)
89716 +                        */
89717 +                       match_offset = recent_offsets[offset_slot];
89718 +                       recent_offsets[offset_slot] = recent_offsets[0];
89719 +                       recent_offsets[0] = match_offset;
89720 +               } else {
89721 +                       /* Explicit offset  */
89723 +                       /* Look up the number of extra bits that need to be read
89724 +                        * to decode offsets with this offset slot.
89725 +                        */
89726 +                       num_extra_bits = lzx_extra_offset_bits[offset_slot];
89728 +                       /* Start with the offset slot base value.  */
89729 +                       match_offset = lzx_offset_slot_base[offset_slot];
89731 +                       /* In aligned offset blocks, the low-order 3 bits of
89732 +                        * each offset are encoded using the aligned offset
89733 +                        * code.  Otherwise, all the extra bits are literal.
89734 +                        */
89736 +                       if ((num_extra_bits & ones_if_aligned) >= LZX_NUM_ALIGNED_OFFSET_BITS) {
89737 +                               match_offset +=
89738 +                                       bitstream_read_bits(is, num_extra_bits -
89739 +                                                               LZX_NUM_ALIGNED_OFFSET_BITS)
89740 +                                                       << LZX_NUM_ALIGNED_OFFSET_BITS;
89741 +                               match_offset += read_alignedsym(d, is);
89742 +                       } else {
89743 +                               match_offset += bitstream_read_bits(is, num_extra_bits);
89744 +                       }
89746 +                       /* Adjust the offset.  */
89747 +                       match_offset -= (LZX_NUM_RECENT_OFFSETS - 1);
89749 +                       /* Update the recent offsets.  */
89750 +                       recent_offsets[2] = recent_offsets[1];
89751 +                       recent_offsets[1] = recent_offsets[0];
89752 +                       recent_offsets[0] = match_offset;
89753 +               }
89755 +               /* Validate the match, then copy it to the current position.  */
89757 +               if (match_len > (size_t)(block_end - out_next))
89758 +                       return -1;
89760 +               if (match_offset > (size_t)(out_next - out_begin))
89761 +                       return -1;
89763 +               out_next = lz_copy(out_next, match_len, match_offset,
89764 +                                  block_end, LZX_MIN_MATCH_LEN);
89766 +       } while (out_next != block_end);
89768 +       return 0;
89772 + * lzx_allocate_decompressor - Allocate an LZX decompressor
89773 + *
89774 + * Return the pointer to the decompressor on success, or return NULL and set
89775 + * errno on failure.
89776 + */
89777 +struct lzx_decompressor *lzx_allocate_decompressor(void)
89779 +       return kmalloc(sizeof(struct lzx_decompressor), GFP_NOFS);
89783 + * lzx_decompress - Decompress a buffer of LZX-compressed data
89784 + *
89785 + * @decompressor:      A decompressor allocated with lzx_allocate_decompressor()
89786 + * @compressed_data:   The buffer of data to decompress
89787 + * @compressed_size:   Number of bytes of compressed data
89788 + * @uncompressed_data: The buffer in which to store the decompressed data
89789 + * @uncompressed_size: The number of bytes the data decompresses into
89790 + *
89791 + * Return 0 on success, or return -1 and set errno on failure.
89792 + */
89793 +int lzx_decompress(struct lzx_decompressor *decompressor,
89794 +                  const void *compressed_data, size_t compressed_size,
89795 +                  void *uncompressed_data, size_t uncompressed_size)
89797 +       struct lzx_decompressor *d = decompressor;
89798 +       u8 * const out_begin = uncompressed_data;
89799 +       u8 *out_next = out_begin;
89800 +       u8 * const out_end = out_begin + uncompressed_size;
89801 +       struct input_bitstream is;
89802 +       u32 recent_offsets[LZX_NUM_RECENT_OFFSETS] = {1, 1, 1};
89803 +       int e8_status = 0;
89805 +       init_input_bitstream(&is, compressed_data, compressed_size);
89807 +       /* Codeword lengths begin as all 0's for delta encoding purposes.  */
89808 +       memset(d->maincode_lens, 0, LZX_MAINCODE_NUM_SYMBOLS);
89809 +       memset(d->lencode_lens, 0, LZX_LENCODE_NUM_SYMBOLS);
89811 +       /* Decompress blocks until we have all the uncompressed data.  */
89813 +       while (out_next != out_end) {
89814 +               int block_type;
89815 +               u32 block_size;
89817 +               if (lzx_read_block_header(d, &is, &block_type, &block_size,
89818 +                                         recent_offsets))
89819 +                       goto invalid;
89821 +               if (block_size < 1 || block_size > (size_t)(out_end - out_next))
89822 +                       goto invalid;
89824 +               if (block_type != LZX_BLOCKTYPE_UNCOMPRESSED) {
89826 +                       /* Compressed block  */
89828 +                       if (lzx_decompress_block(d,
89829 +                                                &is,
89830 +                                                block_type,
89831 +                                                block_size,
89832 +                                                out_begin,
89833 +                                                out_next,
89834 +                                                recent_offsets))
89835 +                               goto invalid;
89837 +                       e8_status |= d->maincode_lens[0xe8];
89838 +                       out_next += block_size;
89839 +               } else {
89840 +                       /* Uncompressed block  */
89842 +                       out_next = bitstream_read_bytes(&is, out_next,
89843 +                                                       block_size);
89844 +                       if (!out_next)
89845 +                               goto invalid;
89847 +                       if (block_size & 1)
89848 +                               bitstream_read_byte(&is);
89850 +                       e8_status = 1;
89851 +               }
89852 +       }
89854 +       /* Postprocess the data unless it cannot possibly contain 0xe8 bytes. */
89855 +       if (e8_status)
89856 +               lzx_postprocess(uncompressed_data, uncompressed_size);
89858 +       return 0;
89860 +invalid:
89861 +       return -1;
89865 + * lzx_free_decompressor - Free an LZX decompressor
89866 + *
89867 + * @decompressor:       A decompressor that was allocated with
89868 + *                     lzx_allocate_decompressor(), or NULL.
89869 + */
89870 +void lzx_free_decompressor(struct lzx_decompressor *decompressor)
89872 +       kfree(decompressor);
89874 diff --git a/fs/ntfs3/lib/xpress_decompress.c b/fs/ntfs3/lib/xpress_decompress.c
89875 new file mode 100644
89876 index 000000000000..3d98f36a981e
89877 --- /dev/null
89878 +++ b/fs/ntfs3/lib/xpress_decompress.c
89879 @@ -0,0 +1,155 @@
89880 +// SPDX-License-Identifier: GPL-2.0-or-later
89882 + * xpress_decompress.c - A decompressor for the XPRESS compression format
89883 + * (Huffman variant), which can be used in "System Compressed" files.  This is
89884 + * based on the code from wimlib.
89885 + *
89886 + * Copyright (C) 2015 Eric Biggers
89887 + *
89888 + * This program is free software: you can redistribute it and/or modify it under
89889 + * the terms of the GNU General Public License as published by the Free Software
89890 + * Foundation, either version 2 of the License, or (at your option) any later
89891 + * version.
89892 + *
89893 + * This program is distributed in the hope that it will be useful, but WITHOUT
89894 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
89895 + * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
89896 + * details.
89897 + *
89898 + * You should have received a copy of the GNU General Public License along with
89899 + * this program.  If not, see <http://www.gnu.org/licenses/>.
89900 + */
89902 +#include "decompress_common.h"
89903 +#include "lib.h"
89905 +#define XPRESS_NUM_SYMBOLS     512
89906 +#define XPRESS_MAX_CODEWORD_LEN        15
89907 +#define XPRESS_MIN_MATCH_LEN   3
89909 +/* This value is chosen for fast decompression.  */
89910 +#define XPRESS_TABLEBITS 12
89912 +/* Reusable heap-allocated memory for XPRESS decompression  */
89913 +struct xpress_decompressor {
89915 +       /* The Huffman decoding table  */
89916 +       u16 decode_table[(1 << XPRESS_TABLEBITS) + 2 * XPRESS_NUM_SYMBOLS];
89918 +       /* An array that maps symbols to codeword lengths  */
89919 +       u8 lens[XPRESS_NUM_SYMBOLS];
89921 +       /* Temporary space for make_huffman_decode_table()  */
89922 +       u16 working_space[2 * (1 + XPRESS_MAX_CODEWORD_LEN) +
89923 +                         XPRESS_NUM_SYMBOLS];
89927 + * xpress_allocate_decompressor - Allocate an XPRESS decompressor
89928 + *
89929 + * Return the pointer to the decompressor on success, or return NULL and set
89930 + * errno on failure.
89931 + */
89932 +struct xpress_decompressor *xpress_allocate_decompressor(void)
89934 +       return kmalloc(sizeof(struct xpress_decompressor), GFP_NOFS);
89938 + * xpress_decompress - Decompress a buffer of XPRESS-compressed data
89939 + *
89940 + * @decompressor:       A decompressor that was allocated with
89941 + *                     xpress_allocate_decompressor()
89942 + * @compressed_data:   The buffer of data to decompress
89943 + * @compressed_size:   Number of bytes of compressed data
89944 + * @uncompressed_data: The buffer in which to store the decompressed data
89945 + * @uncompressed_size: The number of bytes the data decompresses into
89946 + *
89947 + * Return 0 on success, or return -1 and set errno on failure.
89948 + */
89949 +int xpress_decompress(struct xpress_decompressor *decompressor,
89950 +                     const void *compressed_data, size_t compressed_size,
89951 +                     void *uncompressed_data, size_t uncompressed_size)
89953 +       struct xpress_decompressor *d = decompressor;
89954 +       const u8 * const in_begin = compressed_data;
89955 +       u8 * const out_begin = uncompressed_data;
89956 +       u8 *out_next = out_begin;
89957 +       u8 * const out_end = out_begin + uncompressed_size;
89958 +       struct input_bitstream is;
89959 +       u32 i;
89961 +       /* Read the Huffman codeword lengths.  */
89962 +       if (compressed_size < XPRESS_NUM_SYMBOLS / 2)
89963 +               goto invalid;
89964 +       for (i = 0; i < XPRESS_NUM_SYMBOLS / 2; i++) {
89965 +               d->lens[i*2 + 0] = in_begin[i] & 0xF;
89966 +               d->lens[i*2 + 1] = in_begin[i] >> 4;
89967 +       }
89969 +       /* Build a decoding table for the Huffman code.  */
89970 +       if (make_huffman_decode_table(d->decode_table, XPRESS_NUM_SYMBOLS,
89971 +                                     XPRESS_TABLEBITS, d->lens,
89972 +                                     XPRESS_MAX_CODEWORD_LEN,
89973 +                                     d->working_space))
89974 +               goto invalid;
89976 +       /* Decode the matches and literals.  */
89978 +       init_input_bitstream(&is, in_begin + XPRESS_NUM_SYMBOLS / 2,
89979 +                            compressed_size - XPRESS_NUM_SYMBOLS / 2);
89981 +       while (out_next != out_end) {
89982 +               u32 sym;
89983 +               u32 log2_offset;
89984 +               u32 length;
89985 +               u32 offset;
89987 +               sym = read_huffsym(&is, d->decode_table,
89988 +                                  XPRESS_TABLEBITS, XPRESS_MAX_CODEWORD_LEN);
89989 +               if (sym < 256) {
89990 +                       /* Literal  */
89991 +                       *out_next++ = sym;
89992 +               } else {
89993 +                       /* Match  */
89994 +                       length = sym & 0xf;
89995 +                       log2_offset = (sym >> 4) & 0xf;
89997 +                       bitstream_ensure_bits(&is, 16);
89999 +                       offset = ((u32)1 << log2_offset) |
90000 +                                bitstream_pop_bits(&is, log2_offset);
90002 +                       if (length == 0xf) {
90003 +                               length += bitstream_read_byte(&is);
90004 +                               if (length == 0xf + 0xff)
90005 +                                       length = bitstream_read_u16(&is);
90006 +                       }
90007 +                       length += XPRESS_MIN_MATCH_LEN;
90009 +                       if (offset > (size_t)(out_next - out_begin))
90010 +                               goto invalid;
90012 +                       if (length > (size_t)(out_end - out_next))
90013 +                               goto invalid;
90015 +                       out_next = lz_copy(out_next, length, offset, out_end,
90016 +                                          XPRESS_MIN_MATCH_LEN);
90017 +               }
90018 +       }
90019 +       return 0;
90021 +invalid:
90022 +       return -1;
90026 + * xpress_free_decompressor - Free an XPRESS decompressor
90027 + *
90028 + * @decompressor:       A decompressor that was allocated with
90029 + *                     xpress_allocate_decompressor(), or NULL.
90030 + */
90031 +void xpress_free_decompressor(struct xpress_decompressor *decompressor)
90033 +       kfree(decompressor);
90035 diff --git a/fs/ntfs3/lznt.c b/fs/ntfs3/lznt.c
90036 new file mode 100644
90037 index 000000000000..ead9ab7d69b3
90038 --- /dev/null
90039 +++ b/fs/ntfs3/lznt.c
90040 @@ -0,0 +1,452 @@
90041 +// SPDX-License-Identifier: GPL-2.0
90043 + *
90044 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
90045 + *
90046 + */
90047 +#include <linux/blkdev.h>
90048 +#include <linux/buffer_head.h>
90049 +#include <linux/fs.h>
90050 +#include <linux/nls.h>
90052 +#include "debug.h"
90053 +#include "ntfs.h"
90054 +#include "ntfs_fs.h"
90056 +// clang-format off
90057 +/* src buffer is zero */
90058 +#define LZNT_ERROR_ALL_ZEROS   1
90059 +#define LZNT_CHUNK_SIZE                0x1000
90060 +// clang-format on
90062 +struct lznt_hash {
90063 +       const u8 *p1;
90064 +       const u8 *p2;
90067 +struct lznt {
90068 +       const u8 *unc;
90069 +       const u8 *unc_end;
90070 +       const u8 *best_match;
90071 +       size_t max_len;
90072 +       bool std;
90074 +       struct lznt_hash hash[LZNT_CHUNK_SIZE];
90077 +static inline size_t get_match_len(const u8 *ptr, const u8 *end, const u8 *prev,
90078 +                                  size_t max_len)
90080 +       size_t len = 0;
90082 +       while (ptr + len < end && ptr[len] == prev[len] && ++len < max_len)
90083 +               ;
90084 +       return len;
90087 +static size_t longest_match_std(const u8 *src, struct lznt *ctx)
90089 +       size_t hash_index;
90090 +       size_t len1 = 0, len2 = 0;
90091 +       const u8 **hash;
90093 +       hash_index =
90094 +               ((40543U * ((((src[0] << 4) ^ src[1]) << 4) ^ src[2])) >> 4) &
90095 +               (LZNT_CHUNK_SIZE - 1);
90097 +       hash = &(ctx->hash[hash_index].p1);
90099 +       if (hash[0] >= ctx->unc && hash[0] < src && hash[0][0] == src[0] &&
90100 +           hash[0][1] == src[1] && hash[0][2] == src[2]) {
90101 +               len1 = 3;
90102 +               if (ctx->max_len > 3)
90103 +                       len1 += get_match_len(src + 3, ctx->unc_end,
90104 +                                             hash[0] + 3, ctx->max_len - 3);
90105 +       }
90107 +       if (hash[1] >= ctx->unc && hash[1] < src && hash[1][0] == src[0] &&
90108 +           hash[1][1] == src[1] && hash[1][2] == src[2]) {
90109 +               len2 = 3;
90110 +               if (ctx->max_len > 3)
90111 +                       len2 += get_match_len(src + 3, ctx->unc_end,
90112 +                                             hash[1] + 3, ctx->max_len - 3);
90113 +       }
90115 +       /* Compare two matches and select the best one */
90116 +       if (len1 < len2) {
90117 +               ctx->best_match = hash[1];
90118 +               len1 = len2;
90119 +       } else {
90120 +               ctx->best_match = hash[0];
90121 +       }
90123 +       hash[1] = hash[0];
90124 +       hash[0] = src;
90125 +       return len1;
90128 +static size_t longest_match_best(const u8 *src, struct lznt *ctx)
90130 +       size_t max_len;
90131 +       const u8 *ptr;
90133 +       if (ctx->unc >= src || !ctx->max_len)
90134 +               return 0;
90136 +       max_len = 0;
90137 +       for (ptr = ctx->unc; ptr < src; ++ptr) {
90138 +               size_t len =
90139 +                       get_match_len(src, ctx->unc_end, ptr, ctx->max_len);
90140 +               if (len >= max_len) {
90141 +                       max_len = len;
90142 +                       ctx->best_match = ptr;
90143 +               }
90144 +       }
90146 +       return max_len >= 3 ? max_len : 0;
90149 +static const size_t s_max_len[] = {
90150 +       0x1002, 0x802, 0x402, 0x202, 0x102, 0x82, 0x42, 0x22, 0x12,
90153 +static const size_t s_max_off[] = {
90154 +       0x10, 0x20, 0x40, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
90157 +static inline u16 make_pair(size_t offset, size_t len, size_t index)
90159 +       return ((offset - 1) << (12 - index)) |
90160 +              ((len - 3) & (((1 << (12 - index)) - 1)));
90163 +static inline size_t parse_pair(u16 pair, size_t *offset, size_t index)
90165 +       *offset = 1 + (pair >> (12 - index));
90166 +       return 3 + (pair & ((1 << (12 - index)) - 1));
90170 + * compress_chunk
90171 + *
90172 + * returns one of the three values:
90173 + * 0 - ok, 'cmpr' contains 'cmpr_chunk_size' bytes of compressed data
90174 + * 1 - input buffer is full zero
90175 + * -2 - the compressed buffer is too small to hold the compressed data
90176 + */
90177 +static inline int compress_chunk(size_t (*match)(const u8 *, struct lznt *),
90178 +                                const u8 *unc, const u8 *unc_end, u8 *cmpr,
90179 +                                u8 *cmpr_end, size_t *cmpr_chunk_size,
90180 +                                struct lznt *ctx)
90182 +       size_t cnt = 0;
90183 +       size_t idx = 0;
90184 +       const u8 *up = unc;
90185 +       u8 *cp = cmpr + 3;
90186 +       u8 *cp2 = cmpr + 2;
90187 +       u8 not_zero = 0;
90188 +       /* Control byte of 8-bit values: ( 0 - means byte as is, 1 - short pair ) */
90189 +       u8 ohdr = 0;
90190 +       u8 *last;
90191 +       u16 t16;
90193 +       if (unc + LZNT_CHUNK_SIZE < unc_end)
90194 +               unc_end = unc + LZNT_CHUNK_SIZE;
90196 +       last = min(cmpr + LZNT_CHUNK_SIZE + sizeof(short), cmpr_end);
90198 +       ctx->unc = unc;
90199 +       ctx->unc_end = unc_end;
90200 +       ctx->max_len = s_max_len[0];
90202 +       while (up < unc_end) {
90203 +               size_t max_len;
90205 +               while (unc + s_max_off[idx] < up)
90206 +                       ctx->max_len = s_max_len[++idx];
90208 +               // Find match
90209 +               max_len = up + 3 <= unc_end ? (*match)(up, ctx) : 0;
90211 +               if (!max_len) {
90212 +                       if (cp >= last)
90213 +                               goto NotCompressed;
90214 +                       not_zero |= *cp++ = *up++;
90215 +               } else if (cp + 1 >= last) {
90216 +                       goto NotCompressed;
90217 +               } else {
90218 +                       t16 = make_pair(up - ctx->best_match, max_len, idx);
90219 +                       *cp++ = t16;
90220 +                       *cp++ = t16 >> 8;
90222 +                       ohdr |= 1 << cnt;
90223 +                       up += max_len;
90224 +               }
90226 +               cnt = (cnt + 1) & 7;
90227 +               if (!cnt) {
90228 +                       *cp2 = ohdr;
90229 +                       ohdr = 0;
90230 +                       cp2 = cp;
90231 +                       cp += 1;
90232 +               }
90233 +       }
90235 +       if (cp2 < last)
90236 +               *cp2 = ohdr;
90237 +       else
90238 +               cp -= 1;
90240 +       *cmpr_chunk_size = cp - cmpr;
90242 +       t16 = (*cmpr_chunk_size - 3) | 0xB000;
90243 +       cmpr[0] = t16;
90244 +       cmpr[1] = t16 >> 8;
90246 +       return not_zero ? 0 : LZNT_ERROR_ALL_ZEROS;
90248 +NotCompressed:
90250 +       if ((cmpr + LZNT_CHUNK_SIZE + sizeof(short)) > last)
90251 +               return -2;
90253 +       /*
90254 +        * Copy non cmpr data
90255 +        * 0x3FFF == ((LZNT_CHUNK_SIZE + 2 - 3) | 0x3000)
90256 +        */
90257 +       cmpr[0] = 0xff;
90258 +       cmpr[1] = 0x3f;
90260 +       memcpy(cmpr + sizeof(short), unc, LZNT_CHUNK_SIZE);
90261 +       *cmpr_chunk_size = LZNT_CHUNK_SIZE + sizeof(short);
90263 +       return 0;
90266 +static inline ssize_t decompress_chunk(u8 *unc, u8 *unc_end, const u8 *cmpr,
90267 +                                      const u8 *cmpr_end)
90269 +       u8 *up = unc;
90270 +       u8 ch = *cmpr++;
90271 +       size_t bit = 0;
90272 +       size_t index = 0;
90273 +       u16 pair;
90274 +       size_t offset, length;
90276 +       /* Do decompression until pointers are inside range */
90277 +       while (up < unc_end && cmpr < cmpr_end) {
90278 +               /* Correct index */
90279 +               while (unc + s_max_off[index] < up)
90280 +                       index += 1;
90282 +               /* Check the current flag for zero */
90283 +               if (!(ch & (1 << bit))) {
90284 +                       /* Just copy byte */
90285 +                       *up++ = *cmpr++;
90286 +                       goto next;
90287 +               }
90289 +               /* Check for boundary */
90290 +               if (cmpr + 1 >= cmpr_end)
90291 +                       return -EINVAL;
90293 +               /* Read a short from little endian stream */
90294 +               pair = cmpr[1];
90295 +               pair <<= 8;
90296 +               pair |= cmpr[0];
90298 +               cmpr += 2;
90300 +               /* Translate packed information into offset and length */
90301 +               length = parse_pair(pair, &offset, index);
90303 +               /* Check offset for boundary */
90304 +               if (unc + offset > up)
90305 +                       return -EINVAL;
90307 +               /* Truncate the length if necessary */
90308 +               if (up + length >= unc_end)
90309 +                       length = unc_end - up;
90311 +               /* Now we copy bytes. This is the heart of LZ algorithm. */
90312 +               for (; length > 0; length--, up++)
90313 +                       *up = *(up - offset);
90315 +next:
90316 +               /* Advance flag bit value */
90317 +               bit = (bit + 1) & 7;
90319 +               if (!bit) {
90320 +                       if (cmpr >= cmpr_end)
90321 +                               break;
90323 +                       ch = *cmpr++;
90324 +               }
90325 +       }
90327 +       /* return the size of uncompressed data */
90328 +       return up - unc;
90332 + * 0 - standard compression
90333 + * !0 - best compression, requires a lot of cpu
90334 + */
90335 +struct lznt *get_lznt_ctx(int level)
90337 +       struct lznt *r = ntfs_zalloc(level ? offsetof(struct lznt, hash)
90338 +                                          : sizeof(struct lznt));
90340 +       if (r)
90341 +               r->std = !level;
90342 +       return r;
90346 + * compress_lznt
90347 + *
90348 + * Compresses "unc" into "cmpr"
90349 + * +x - ok, 'cmpr' contains 'final_compressed_size' bytes of compressed data
90350 + * 0 - input buffer is full zero
90351 + */
90352 +size_t compress_lznt(const void *unc, size_t unc_size, void *cmpr,
90353 +                    size_t cmpr_size, struct lznt *ctx)
90355 +       int err;
90356 +       size_t (*match)(const u8 *src, struct lznt *ctx);
90357 +       u8 *p = cmpr;
90358 +       u8 *end = p + cmpr_size;
90359 +       const u8 *unc_chunk = unc;
90360 +       const u8 *unc_end = unc_chunk + unc_size;
90361 +       bool is_zero = true;
90363 +       if (ctx->std) {
90364 +               match = &longest_match_std;
90365 +               memset(ctx->hash, 0, sizeof(ctx->hash));
90366 +       } else {
90367 +               match = &longest_match_best;
90368 +       }
90370 +       /* compression cycle */
90371 +       for (; unc_chunk < unc_end; unc_chunk += LZNT_CHUNK_SIZE) {
90372 +               cmpr_size = 0;
90373 +               err = compress_chunk(match, unc_chunk, unc_end, p, end,
90374 +                                    &cmpr_size, ctx);
90375 +               if (err < 0)
90376 +                       return unc_size;
90378 +               if (is_zero && err != LZNT_ERROR_ALL_ZEROS)
90379 +                       is_zero = false;
90381 +               p += cmpr_size;
90382 +       }
90384 +       if (p <= end - 2)
90385 +               p[0] = p[1] = 0;
90387 +       return is_zero ? 0 : PtrOffset(cmpr, p);
90391 + * decompress_lznt
90392 + *
90393 + * decompresses "cmpr" into "unc"
90394 + */
90395 +ssize_t decompress_lznt(const void *cmpr, size_t cmpr_size, void *unc,
90396 +                       size_t unc_size)
90398 +       const u8 *cmpr_chunk = cmpr;
90399 +       const u8 *cmpr_end = cmpr_chunk + cmpr_size;
90400 +       u8 *unc_chunk = unc;
90401 +       u8 *unc_end = unc_chunk + unc_size;
90402 +       u16 chunk_hdr;
90404 +       if (cmpr_size < sizeof(short))
90405 +               return -EINVAL;
90407 +       /* read chunk header */
90408 +       chunk_hdr = cmpr_chunk[1];
90409 +       chunk_hdr <<= 8;
90410 +       chunk_hdr |= cmpr_chunk[0];
90412 +       /* loop through decompressing chunks */
90413 +       for (;;) {
90414 +               size_t chunk_size_saved;
90415 +               size_t unc_use;
90416 +               size_t cmpr_use = 3 + (chunk_hdr & (LZNT_CHUNK_SIZE - 1));
90418 +               /* Check that the chunk actually fits the supplied buffer */
90419 +               if (cmpr_chunk + cmpr_use > cmpr_end)
90420 +                       return -EINVAL;
90422 +               /* First make sure the chunk contains compressed data */
90423 +               if (chunk_hdr & 0x8000) {
90424 +                       /* Decompress a chunk and return if we get an error */
90425 +                       ssize_t err =
90426 +                               decompress_chunk(unc_chunk, unc_end,
90427 +                                                cmpr_chunk + sizeof(chunk_hdr),
90428 +                                                cmpr_chunk + cmpr_use);
90429 +                       if (err < 0)
90430 +                               return err;
90431 +                       unc_use = err;
90432 +               } else {
90433 +                       /* This chunk does not contain compressed data */
90434 +                       unc_use = unc_chunk + LZNT_CHUNK_SIZE > unc_end
90435 +                                         ? unc_end - unc_chunk
90436 +                                         : LZNT_CHUNK_SIZE;
90438 +                       if (cmpr_chunk + sizeof(chunk_hdr) + unc_use >
90439 +                           cmpr_end) {
90440 +                               return -EINVAL;
90441 +                       }
90443 +                       memcpy(unc_chunk, cmpr_chunk + sizeof(chunk_hdr),
90444 +                              unc_use);
90445 +               }
90447 +               /* Advance pointers */
90448 +               cmpr_chunk += cmpr_use;
90449 +               unc_chunk += unc_use;
90451 +               /* Check for the end of unc buffer */
90452 +               if (unc_chunk >= unc_end)
90453 +                       break;
90455 +               /* Proceed the next chunk */
90456 +               if (cmpr_chunk > cmpr_end - 2)
90457 +                       break;
90459 +               chunk_size_saved = LZNT_CHUNK_SIZE;
90461 +               /* read chunk header */
90462 +               chunk_hdr = cmpr_chunk[1];
90463 +               chunk_hdr <<= 8;
90464 +               chunk_hdr |= cmpr_chunk[0];
90466 +               if (!chunk_hdr)
90467 +                       break;
90469 +               /* Check the size of unc buffer */
90470 +               if (unc_use < chunk_size_saved) {
90471 +                       size_t t1 = chunk_size_saved - unc_use;
90472 +                       u8 *t2 = unc_chunk + t1;
90474 +                       /* 'Zero' memory */
90475 +                       if (t2 >= unc_end)
90476 +                               break;
90478 +                       memset(unc_chunk, 0, t1);
90479 +                       unc_chunk = t2;
90480 +               }
90481 +       }
90483 +       /* Check compression boundary */
90484 +       if (cmpr_chunk > cmpr_end)
90485 +               return -EINVAL;
90487 +       /*
90488 +        * The unc size is just a difference between current
90489 +        * pointer and original one
90490 +        */
90491 +       return PtrOffset(unc, unc_chunk);
90493 diff --git a/fs/ntfs3/namei.c b/fs/ntfs3/namei.c
90494 new file mode 100644
90495 index 000000000000..f5db12cd3b20
90496 --- /dev/null
90497 +++ b/fs/ntfs3/namei.c
90498 @@ -0,0 +1,578 @@
90499 +// SPDX-License-Identifier: GPL-2.0
90501 + *
90502 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
90503 + *
90504 + */
90506 +#include <linux/blkdev.h>
90507 +#include <linux/buffer_head.h>
90508 +#include <linux/fs.h>
90509 +#include <linux/iversion.h>
90510 +#include <linux/namei.h>
90511 +#include <linux/nls.h>
90513 +#include "debug.h"
90514 +#include "ntfs.h"
90515 +#include "ntfs_fs.h"
90518 + * fill_name_de
90519 + *
90520 + * formats NTFS_DE in 'buf'
90521 + */
90522 +int fill_name_de(struct ntfs_sb_info *sbi, void *buf, const struct qstr *name,
90523 +                const struct cpu_str *uni)
90525 +       int err;
90526 +       struct NTFS_DE *e = buf;
90527 +       u16 data_size;
90528 +       struct ATTR_FILE_NAME *fname = (struct ATTR_FILE_NAME *)(e + 1);
90530 +#ifndef CONFIG_NTFS3_64BIT_CLUSTER
90531 +       e->ref.high = fname->home.high = 0;
90532 +#endif
90533 +       if (uni) {
90534 +#ifdef __BIG_ENDIAN
90535 +               int ulen = uni->len;
90536 +               __le16 *uname = fname->name;
90537 +               const u16 *name_cpu = uni->name;
90539 +               while (ulen--)
90540 +                       *uname++ = cpu_to_le16(*name_cpu++);
90541 +#else
90542 +               memcpy(fname->name, uni->name, uni->len * sizeof(u16));
90543 +#endif
90544 +               fname->name_len = uni->len;
90546 +       } else {
90547 +               /* Convert input string to unicode */
90548 +               err = ntfs_nls_to_utf16(sbi, name->name, name->len,
90549 +                                       (struct cpu_str *)&fname->name_len,
90550 +                                       NTFS_NAME_LEN, UTF16_LITTLE_ENDIAN);
90551 +               if (err < 0)
90552 +                       return err;
90553 +       }
90555 +       fname->type = FILE_NAME_POSIX;
90556 +       data_size = fname_full_size(fname);
90558 +       e->size = cpu_to_le16(QuadAlign(data_size) + sizeof(struct NTFS_DE));
90559 +       e->key_size = cpu_to_le16(data_size);
90560 +       e->flags = 0;
90561 +       e->res = 0;
90563 +       return 0;
90567 + * ntfs_lookup
90568 + *
90569 + * inode_operations::lookup
90570 + */
90571 +static struct dentry *ntfs_lookup(struct inode *dir, struct dentry *dentry,
90572 +                                 u32 flags)
90574 +       struct ntfs_inode *ni = ntfs_i(dir);
90575 +       struct cpu_str *uni = __getname();
90576 +       struct inode *inode;
90577 +       int err;
90579 +       if (!uni)
90580 +               inode = ERR_PTR(-ENOMEM);
90581 +       else {
90582 +               err = ntfs_nls_to_utf16(ni->mi.sbi, dentry->d_name.name,
90583 +                                       dentry->d_name.len, uni, NTFS_NAME_LEN,
90584 +                                       UTF16_HOST_ENDIAN);
90585 +               if (err < 0)
90586 +                       inode = ERR_PTR(err);
90587 +               else {
90588 +                       ni_lock(ni);
90589 +                       inode = dir_search_u(dir, uni, NULL);
90590 +                       ni_unlock(ni);
90591 +               }
90592 +               __putname(uni);
90593 +       }
90595 +       return d_splice_alias(inode, dentry);
90599 + * ntfs_create
90600 + *
90601 + * inode_operations::create
90602 + */
90603 +static int ntfs_create(struct user_namespace *mnt_userns, struct inode *dir,
90604 +                      struct dentry *dentry, umode_t mode, bool excl)
90606 +       struct ntfs_inode *ni = ntfs_i(dir);
90607 +       struct inode *inode;
90609 +       ni_lock_dir(ni);
90611 +       inode = ntfs_create_inode(mnt_userns, dir, dentry, NULL, S_IFREG | mode,
90612 +                                 0, NULL, 0, excl, NULL);
90614 +       ni_unlock(ni);
90616 +       return IS_ERR(inode) ? PTR_ERR(inode) : 0;
90620 + * ntfs_link
90621 + *
90622 + * inode_operations::link
90623 + */
90624 +static int ntfs_link(struct dentry *ode, struct inode *dir, struct dentry *de)
90626 +       int err;
90627 +       struct inode *inode = d_inode(ode);
90628 +       struct ntfs_inode *ni = ntfs_i(inode);
90630 +       if (S_ISDIR(inode->i_mode))
90631 +               return -EPERM;
90633 +       if (inode->i_nlink >= NTFS_LINK_MAX)
90634 +               return -EMLINK;
90636 +       ni_lock_dir(ntfs_i(dir));
90637 +       if (inode != dir)
90638 +               ni_lock(ni);
90640 +       dir->i_ctime = dir->i_mtime = inode->i_ctime = current_time(inode);
90641 +       inc_nlink(inode);
90642 +       ihold(inode);
90644 +       err = ntfs_link_inode(inode, de);
90645 +       if (!err) {
90646 +               mark_inode_dirty(inode);
90647 +               mark_inode_dirty(dir);
90648 +               d_instantiate(de, inode);
90649 +       } else {
90650 +               drop_nlink(inode);
90651 +               iput(inode);
90652 +       }
90654 +       if (inode != dir)
90655 +               ni_unlock(ni);
90656 +       ni_unlock(ntfs_i(dir));
90658 +       return err;
90662 + * ntfs_unlink
90663 + *
90664 + * inode_operations::unlink
90665 + */
90666 +static int ntfs_unlink(struct inode *dir, struct dentry *dentry)
90668 +       struct ntfs_inode *ni = ntfs_i(dir);
90669 +       int err;
90671 +       ni_lock_dir(ni);
90673 +       err = ntfs_unlink_inode(dir, dentry);
90675 +       ni_unlock(ni);
90677 +       return err;
90681 + * ntfs_symlink
90682 + *
90683 + * inode_operations::symlink
90684 + */
90685 +static int ntfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
90686 +                       struct dentry *dentry, const char *symname)
90688 +       u32 size = strlen(symname);
90689 +       struct inode *inode;
90690 +       struct ntfs_inode *ni = ntfs_i(dir);
90692 +       ni_lock_dir(ni);
90694 +       inode = ntfs_create_inode(mnt_userns, dir, dentry, NULL, S_IFLNK | 0777,
90695 +                                 0, symname, size, 0, NULL);
90697 +       ni_unlock(ni);
90699 +       return IS_ERR(inode) ? PTR_ERR(inode) : 0;
90703 + * ntfs_mkdir
90704 + *
90705 + * inode_operations::mkdir
90706 + */
90707 +static int ntfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
90708 +                     struct dentry *dentry, umode_t mode)
90710 +       struct inode *inode;
90711 +       struct ntfs_inode *ni = ntfs_i(dir);
90713 +       ni_lock_dir(ni);
90715 +       inode = ntfs_create_inode(mnt_userns, dir, dentry, NULL, S_IFDIR | mode,
90716 +                                 0, NULL, -1, 0, NULL);
90718 +       ni_unlock(ni);
90720 +       return IS_ERR(inode) ? PTR_ERR(inode) : 0;
90724 + * ntfs_rmdir
90725 + *
90726 + * inode_operations::rm_dir
90727 + */
90728 +static int ntfs_rmdir(struct inode *dir, struct dentry *dentry)
90730 +       struct ntfs_inode *ni = ntfs_i(dir);
90731 +       int err;
90733 +       ni_lock_dir(ni);
90735 +       err = ntfs_unlink_inode(dir, dentry);
90737 +       ni_unlock(ni);
90739 +       return err;
90743 + * ntfs_rename
90744 + *
90745 + * inode_operations::rename
90746 + */
90747 +static int ntfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
90748 +                      struct dentry *old_dentry, struct inode *new_dir,
90749 +                      struct dentry *new_dentry, u32 flags)
90751 +       int err;
90752 +       struct super_block *sb = old_dir->i_sb;
90753 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
90754 +       struct ntfs_inode *old_dir_ni = ntfs_i(old_dir);
90755 +       struct ntfs_inode *new_dir_ni = ntfs_i(new_dir);
90756 +       struct ntfs_inode *old_ni;
90757 +       struct ATTR_FILE_NAME *old_name, *new_name, *fname;
90758 +       u8 name_type;
90759 +       bool is_same;
90760 +       struct inode *old_inode, *new_inode;
90761 +       struct NTFS_DE *old_de, *new_de;
90762 +       struct ATTRIB *attr;
90763 +       struct ATTR_LIST_ENTRY *le;
90764 +       u16 new_de_key_size;
90766 +       static_assert(SIZEOF_ATTRIBUTE_FILENAME_MAX + SIZEOF_RESIDENT < 1024);
90767 +       static_assert(SIZEOF_ATTRIBUTE_FILENAME_MAX + sizeof(struct NTFS_DE) <
90768 +                     1024);
90769 +       static_assert(PATH_MAX >= 4 * 1024);
90771 +       if (flags & ~RENAME_NOREPLACE)
90772 +               return -EINVAL;
90774 +       old_inode = d_inode(old_dentry);
90775 +       new_inode = d_inode(new_dentry);
90777 +       old_ni = ntfs_i(old_inode);
90779 +       is_same = old_dentry->d_name.len == new_dentry->d_name.len &&
90780 +                 !memcmp(old_dentry->d_name.name, new_dentry->d_name.name,
90781 +                         old_dentry->d_name.len);
90783 +       if (is_same && old_dir == new_dir) {
90784 +               /* Nothing to do */
90785 +               err = 0;
90786 +               goto out;
90787 +       }
90789 +       if (ntfs_is_meta_file(sbi, old_inode->i_ino)) {
90790 +               err = -EINVAL;
90791 +               goto out;
90792 +       }
90794 +       if (new_inode) {
90795 +               /*target name exists. unlink it*/
90796 +               dget(new_dentry);
90797 +               ni_lock_dir(new_dir_ni);
90798 +               err = ntfs_unlink_inode(new_dir, new_dentry);
90799 +               ni_unlock(new_dir_ni);
90800 +               dput(new_dentry);
90801 +               if (err)
90802 +                       goto out;
90803 +       }
90805 +       /* allocate PATH_MAX bytes */
90806 +       old_de = __getname();
90807 +       if (!old_de) {
90808 +               err = -ENOMEM;
90809 +               goto out;
90810 +       }
90812 +       err = fill_name_de(sbi, old_de, &old_dentry->d_name, NULL);
90813 +       if (err < 0)
90814 +               goto out1;
90816 +       old_name = (struct ATTR_FILE_NAME *)(old_de + 1);
90818 +       if (is_same) {
90819 +               new_de = old_de;
90820 +       } else {
90821 +               new_de = Add2Ptr(old_de, 1024);
90822 +               err = fill_name_de(sbi, new_de, &new_dentry->d_name, NULL);
90823 +               if (err < 0)
90824 +                       goto out1;
90825 +       }
90827 +       ni_lock_dir(old_dir_ni);
90828 +       ni_lock(old_ni);
90830 +       mi_get_ref(&old_dir_ni->mi, &old_name->home);
90832 +       /*get pointer to file_name in mft*/
90833 +       fname = ni_fname_name(old_ni, (struct cpu_str *)&old_name->name_len,
90834 +                             &old_name->home, &le);
90835 +       if (!fname) {
90836 +               err = -EINVAL;
90837 +               goto out2;
90838 +       }
90840 +       /* Copy fname info from record into new fname */
90841 +       new_name = (struct ATTR_FILE_NAME *)(new_de + 1);
90842 +       memcpy(&new_name->dup, &fname->dup, sizeof(fname->dup));
90844 +       name_type = paired_name(fname->type);
90846 +       /* remove first name from directory */
90847 +       err = indx_delete_entry(&old_dir_ni->dir, old_dir_ni, old_de + 1,
90848 +                               le16_to_cpu(old_de->key_size), sbi);
90849 +       if (err)
90850 +               goto out3;
90852 +       /* remove first name from mft */
90853 +       err = ni_remove_attr_le(old_ni, attr_from_name(fname), le);
90854 +       if (err)
90855 +               goto out4;
90857 +       le16_add_cpu(&old_ni->mi.mrec->hard_links, -1);
90858 +       old_ni->mi.dirty = true;
90860 +       if (name_type != FILE_NAME_POSIX) {
90861 +               /* get paired name */
90862 +               fname = ni_fname_type(old_ni, name_type, &le);
90863 +               if (fname) {
90864 +                       /* remove second name from directory */
90865 +                       err = indx_delete_entry(&old_dir_ni->dir, old_dir_ni,
90866 +                                               fname, fname_full_size(fname),
90867 +                                               sbi);
90868 +                       if (err)
90869 +                               goto out5;
90871 +                       /* remove second name from mft */
90872 +                       err = ni_remove_attr_le(old_ni, attr_from_name(fname),
90873 +                                               le);
90874 +                       if (err)
90875 +                               goto out6;
90877 +                       le16_add_cpu(&old_ni->mi.mrec->hard_links, -1);
90878 +                       old_ni->mi.dirty = true;
90879 +               }
90880 +       }
90882 +       /* Add new name */
90883 +       mi_get_ref(&old_ni->mi, &new_de->ref);
90884 +       mi_get_ref(&ntfs_i(new_dir)->mi, &new_name->home);
90886 +       new_de_key_size = le16_to_cpu(new_de->key_size);
90888 +       /* insert new name in mft */
90889 +       err = ni_insert_resident(old_ni, new_de_key_size, ATTR_NAME, NULL, 0,
90890 +                                &attr, NULL);
90891 +       if (err)
90892 +               goto out7;
90894 +       attr->res.flags = RESIDENT_FLAG_INDEXED;
90896 +       memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), new_name, new_de_key_size);
90898 +       le16_add_cpu(&old_ni->mi.mrec->hard_links, 1);
90899 +       old_ni->mi.dirty = true;
90901 +       /* insert new name in directory */
90902 +       err = indx_insert_entry(&new_dir_ni->dir, new_dir_ni, new_de, sbi,
90903 +                               NULL);
90904 +       if (err)
90905 +               goto out8;
90907 +       if (IS_DIRSYNC(new_dir))
90908 +               err = ntfs_sync_inode(old_inode);
90909 +       else
90910 +               mark_inode_dirty(old_inode);
90912 +       old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
90913 +       if (IS_DIRSYNC(old_dir))
90914 +               (void)ntfs_sync_inode(old_dir);
90915 +       else
90916 +               mark_inode_dirty(old_dir);
90918 +       if (old_dir != new_dir) {
90919 +               new_dir->i_mtime = new_dir->i_ctime = old_dir->i_ctime;
90920 +               mark_inode_dirty(new_dir);
90921 +       }
90923 +       if (old_inode) {
90924 +               old_inode->i_ctime = old_dir->i_ctime;
90925 +               mark_inode_dirty(old_inode);
90926 +       }
90928 +       err = 0;
90929 +       /* normal way */
90930 +       goto out2;
90932 +out8:
90933 +       /* undo
90934 +        * ni_insert_resident(old_ni, new_de_key_size, ATTR_NAME, NULL, 0,
90935 +        *                       &attr, NULL);
90936 +        */
90937 +       mi_remove_attr(&old_ni->mi, attr);
90938 +out7:
90939 +       /* undo
90940 +        * ni_remove_attr_le(old_ni, attr_from_name(fname), le);
90941 +        */
90942 +out6:
90943 +       /* undo
90944 +        * indx_delete_entry(&old_dir_ni->dir, old_dir_ni,
90945 +        *                                      fname, fname_full_size(fname),
90946 +        *                                      sbi);
90947 +        */
90948 +out5:
90949 +       /* undo
90950 +        * ni_remove_attr_le(old_ni, attr_from_name(fname), le);
90951 +        */
90952 +out4:
90953 +       /* undo:
90954 +        * indx_delete_entry(&old_dir_ni->dir, old_dir_ni, old_de + 1,
90955 +        *                      old_de->key_size, NULL);
90956 +        */
90957 +out3:
90958 +out2:
90959 +       ni_unlock(old_ni);
90960 +       ni_unlock(old_dir_ni);
90961 +out1:
90962 +       __putname(old_de);
90963 +out:
90964 +       return err;
90968 + * ntfs_atomic_open
90969 + *
90970 + * inode_operations::atomic_open
90971 + */
90972 +static int ntfs_atomic_open(struct inode *dir, struct dentry *dentry,
90973 +                           struct file *file, u32 flags, umode_t mode)
90975 +       int err;
90976 +       bool excl = !!(flags & O_EXCL);
90977 +       struct inode *inode;
90978 +       struct ntfs_fnd *fnd = NULL;
90979 +       struct ntfs_inode *ni = ntfs_i(dir);
90980 +       struct dentry *d = NULL;
90981 +       struct cpu_str *uni = __getname();
90983 +       if (!uni)
90984 +               return -ENOMEM;
90986 +       err = ntfs_nls_to_utf16(ni->mi.sbi, dentry->d_name.name,
90987 +                               dentry->d_name.len, uni, NTFS_NAME_LEN,
90988 +                               UTF16_HOST_ENDIAN);
90989 +       if (err < 0)
90990 +               goto out;
90992 +       ni_lock_dir(ni);
90994 +       if (d_in_lookup(dentry)) {
90995 +               fnd = fnd_get();
90996 +               if (!fnd) {
90997 +                       err = -ENOMEM;
90998 +                       goto out1;
90999 +               }
91001 +               d = d_splice_alias(dir_search_u(dir, uni, fnd), dentry);
91002 +               if (IS_ERR(d)) {
91003 +                       err = PTR_ERR(d);
91004 +                       d = NULL;
91005 +                       goto out2;
91006 +               }
91008 +               if (d)
91009 +                       dentry = d;
91010 +       }
91012 +       if (!(flags & O_CREAT) || d_really_is_positive(dentry)) {
91013 +               err = finish_no_open(file, d);
91014 +               goto out2;
91015 +       }
91017 +       file->f_mode |= FMODE_CREATED;
91019 +       /*fnd contains tree's path to insert to*/
91020 +       /* TODO: init_user_ns? */
91021 +       inode = ntfs_create_inode(&init_user_ns, dir, dentry, uni, mode, 0,
91022 +                                 NULL, 0, excl, fnd);
91023 +       err = IS_ERR(inode) ? PTR_ERR(inode)
91024 +                           : finish_open(file, dentry, ntfs_file_open);
91025 +       dput(d);
91027 +out2:
91028 +       fnd_put(fnd);
91029 +out1:
91030 +       ni_unlock(ni);
91031 +out:
91032 +       __putname(uni);
91034 +       return err;
91037 +struct dentry *ntfs3_get_parent(struct dentry *child)
91039 +       struct inode *inode = d_inode(child);
91040 +       struct ntfs_inode *ni = ntfs_i(inode);
91042 +       struct ATTR_LIST_ENTRY *le = NULL;
91043 +       struct ATTRIB *attr = NULL;
91044 +       struct ATTR_FILE_NAME *fname;
91046 +       while ((attr = ni_find_attr(ni, attr, &le, ATTR_NAME, NULL, 0, NULL,
91047 +                                   NULL))) {
91048 +               fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
91049 +               if (!fname)
91050 +                       continue;
91052 +               return d_obtain_alias(
91053 +                       ntfs_iget5(inode->i_sb, &fname->home, NULL));
91054 +       }
91056 +       return ERR_PTR(-ENOENT);
91059 +const struct inode_operations ntfs_dir_inode_operations = {
91060 +       .lookup = ntfs_lookup,
91061 +       .create = ntfs_create,
91062 +       .link = ntfs_link,
91063 +       .unlink = ntfs_unlink,
91064 +       .symlink = ntfs_symlink,
91065 +       .mkdir = ntfs_mkdir,
91066 +       .rmdir = ntfs_rmdir,
91067 +       .rename = ntfs_rename,
91068 +       .permission = ntfs_permission,
91069 +       .get_acl = ntfs_get_acl,
91070 +       .set_acl = ntfs_set_acl,
91071 +       .setattr = ntfs3_setattr,
91072 +       .getattr = ntfs_getattr,
91073 +       .listxattr = ntfs_listxattr,
91074 +       .atomic_open = ntfs_atomic_open,
91075 +       .fiemap = ntfs_fiemap,
91077 diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
91078 new file mode 100644
91079 index 000000000000..40398e6c39c9
91080 --- /dev/null
91081 +++ b/fs/ntfs3/ntfs.h
91082 @@ -0,0 +1,1238 @@
91083 +/* SPDX-License-Identifier: GPL-2.0 */
91085 + *
91086 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
91087 + *
91088 + * on-disk ntfs structs
91089 + */
91091 +// clang-format off
91093 +/* TODO:
91094 + * - Check 4K mft record and 512 bytes cluster
91095 + */
91098 + * Activate this define to use binary search in indexes
91099 + */
91100 +#define NTFS3_INDEX_BINARY_SEARCH
91103 + * Check each run for marked clusters
91104 + */
91105 +#define NTFS3_CHECK_FREE_CLST
91107 +#define NTFS_NAME_LEN 255
91110 + * ntfs.sys used 500 maximum links
91111 + * on-disk struct allows up to 0xffff
91112 + */
91113 +#define NTFS_LINK_MAX 0x400
91114 +//#define NTFS_LINK_MAX 0xffff
91117 + * Activate to use 64 bit clusters instead of 32 bits in ntfs.sys
91118 + * Logical and virtual cluster number
91119 + * If needed, may be redefined to use 64 bit value
91120 + */
91121 +//#define CONFIG_NTFS3_64BIT_CLUSTER
91123 +#define NTFS_LZNT_MAX_CLUSTER  4096
91124 +#define NTFS_LZNT_CUNIT                4
91125 +#define NTFS_LZNT_CLUSTERS     (1u<<NTFS_LZNT_CUNIT)
91127 +struct GUID {
91128 +       __le32 Data1;
91129 +       __le16 Data2;
91130 +       __le16 Data3;
91131 +       u8 Data4[8];
91135 + * this struct repeats layout of ATTR_FILE_NAME
91136 + * at offset 0x40
91137 + * it used to store global constants NAME_MFT/NAME_MIRROR...
91138 + * most constant names are shorter than 10
91139 + */
91140 +struct cpu_str {
91141 +       u8 len;
91142 +       u8 unused;
91143 +       u16 name[10];
91146 +struct le_str {
91147 +       u8 len;
91148 +       u8 unused;
91149 +       __le16 name[];
91152 +static_assert(SECTOR_SHIFT == 9);
91154 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
91155 +typedef u64 CLST;
91156 +static_assert(sizeof(size_t) == 8);
91157 +#else
91158 +typedef u32 CLST;
91159 +#endif
91161 +#define SPARSE_LCN64   ((u64)-1)
91162 +#define SPARSE_LCN     ((CLST)-1)
91163 +#define RESIDENT_LCN   ((CLST)-2)
91164 +#define COMPRESSED_LCN ((CLST)-3)
91166 +#define COMPRESSION_UNIT     4
91167 +#define COMPRESS_MAX_CLUSTER 0x1000
91168 +#define MFT_INCREASE_CHUNK   1024
91170 +enum RECORD_NUM {
91171 +       MFT_REC_MFT             = 0,
91172 +       MFT_REC_MIRR            = 1,
91173 +       MFT_REC_LOG             = 2,
91174 +       MFT_REC_VOL             = 3,
91175 +       MFT_REC_ATTR            = 4,
91176 +       MFT_REC_ROOT            = 5,
91177 +       MFT_REC_BITMAP          = 6,
91178 +       MFT_REC_BOOT            = 7,
91179 +       MFT_REC_BADCLUST        = 8,
91180 +       //MFT_REC_QUOTA         = 9,
91181 +       MFT_REC_SECURE          = 9, // NTFS 3.0
91182 +       MFT_REC_UPCASE          = 10,
91183 +       MFT_REC_EXTEND          = 11, // NTFS 3.0
91184 +       MFT_REC_RESERVED        = 11,
91185 +       MFT_REC_FREE            = 16,
91186 +       MFT_REC_USER            = 24,
91189 +enum ATTR_TYPE {
91190 +       ATTR_ZERO               = cpu_to_le32(0x00),
91191 +       ATTR_STD                = cpu_to_le32(0x10),
91192 +       ATTR_LIST               = cpu_to_le32(0x20),
91193 +       ATTR_NAME               = cpu_to_le32(0x30),
91194 +       // ATTR_VOLUME_VERSION on Nt4
91195 +       ATTR_ID                 = cpu_to_le32(0x40),
91196 +       ATTR_SECURE             = cpu_to_le32(0x50),
91197 +       ATTR_LABEL              = cpu_to_le32(0x60),
91198 +       ATTR_VOL_INFO           = cpu_to_le32(0x70),
91199 +       ATTR_DATA               = cpu_to_le32(0x80),
91200 +       ATTR_ROOT               = cpu_to_le32(0x90),
91201 +       ATTR_ALLOC              = cpu_to_le32(0xA0),
91202 +       ATTR_BITMAP             = cpu_to_le32(0xB0),
91203 +       // ATTR_SYMLINK on Nt4
91204 +       ATTR_REPARSE            = cpu_to_le32(0xC0),
91205 +       ATTR_EA_INFO            = cpu_to_le32(0xD0),
91206 +       ATTR_EA                 = cpu_to_le32(0xE0),
91207 +       ATTR_PROPERTYSET        = cpu_to_le32(0xF0),
91208 +       ATTR_LOGGED_UTILITY_STREAM = cpu_to_le32(0x100),
91209 +       ATTR_END                = cpu_to_le32(0xFFFFFFFF)
91212 +static_assert(sizeof(enum ATTR_TYPE) == 4);
91214 +enum FILE_ATTRIBUTE {
91215 +       FILE_ATTRIBUTE_READONLY         = cpu_to_le32(0x00000001),
91216 +       FILE_ATTRIBUTE_HIDDEN           = cpu_to_le32(0x00000002),
91217 +       FILE_ATTRIBUTE_SYSTEM           = cpu_to_le32(0x00000004),
91218 +       FILE_ATTRIBUTE_ARCHIVE          = cpu_to_le32(0x00000020),
91219 +       FILE_ATTRIBUTE_DEVICE           = cpu_to_le32(0x00000040),
91220 +       FILE_ATTRIBUTE_TEMPORARY        = cpu_to_le32(0x00000100),
91221 +       FILE_ATTRIBUTE_SPARSE_FILE      = cpu_to_le32(0x00000200),
91222 +       FILE_ATTRIBUTE_REPARSE_POINT    = cpu_to_le32(0x00000400),
91223 +       FILE_ATTRIBUTE_COMPRESSED       = cpu_to_le32(0x00000800),
91224 +       FILE_ATTRIBUTE_OFFLINE          = cpu_to_le32(0x00001000),
91225 +       FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = cpu_to_le32(0x00002000),
91226 +       FILE_ATTRIBUTE_ENCRYPTED        = cpu_to_le32(0x00004000),
91227 +       FILE_ATTRIBUTE_VALID_FLAGS      = cpu_to_le32(0x00007fb7),
91228 +       FILE_ATTRIBUTE_DIRECTORY        = cpu_to_le32(0x10000000),
91231 +static_assert(sizeof(enum FILE_ATTRIBUTE) == 4);
91233 +extern const struct cpu_str NAME_MFT;
91234 +extern const struct cpu_str NAME_MIRROR;
91235 +extern const struct cpu_str NAME_LOGFILE;
91236 +extern const struct cpu_str NAME_VOLUME;
91237 +extern const struct cpu_str NAME_ATTRDEF;
91238 +extern const struct cpu_str NAME_ROOT;
91239 +extern const struct cpu_str NAME_BITMAP;
91240 +extern const struct cpu_str NAME_BOOT;
91241 +extern const struct cpu_str NAME_BADCLUS;
91242 +extern const struct cpu_str NAME_QUOTA;
91243 +extern const struct cpu_str NAME_SECURE;
91244 +extern const struct cpu_str NAME_UPCASE;
91245 +extern const struct cpu_str NAME_EXTEND;
91246 +extern const struct cpu_str NAME_OBJID;
91247 +extern const struct cpu_str NAME_REPARSE;
91248 +extern const struct cpu_str NAME_USNJRNL;
91250 +extern const __le16 I30_NAME[4];
91251 +extern const __le16 SII_NAME[4];
91252 +extern const __le16 SDH_NAME[4];
91253 +extern const __le16 SO_NAME[2];
91254 +extern const __le16 SQ_NAME[2];
91255 +extern const __le16 SR_NAME[2];
91257 +extern const __le16 BAD_NAME[4];
91258 +extern const __le16 SDS_NAME[4];
91259 +extern const __le16 WOF_NAME[17];      /* WofCompressedData */
91261 +/* MFT record number structure */
91262 +struct MFT_REF {
91263 +       __le32 low;     // The low part of the number
91264 +       __le16 high;    // The high part of the number
91265 +       __le16 seq;     // The sequence number of MFT record
91268 +static_assert(sizeof(__le64) == sizeof(struct MFT_REF));
91270 +static inline CLST ino_get(const struct MFT_REF *ref)
91272 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
91273 +       return le32_to_cpu(ref->low) | ((u64)le16_to_cpu(ref->high) << 32);
91274 +#else
91275 +       return le32_to_cpu(ref->low);
91276 +#endif
91279 +struct NTFS_BOOT {
91280 +       u8 jump_code[3];        // 0x00: Jump to boot code
91281 +       u8 system_id[8];        // 0x03: System ID, equals "NTFS    "
91283 +       // NOTE: this member is not aligned(!)
91284 +       // bytes_per_sector[0] must be 0
91285 +       // bytes_per_sector[1] must be multiplied by 256
91286 +       u8 bytes_per_sector[2]; // 0x0B: Bytes per sector
91288 +       u8 sectors_per_clusters;// 0x0D: Sectors per cluster
91289 +       u8 unused1[7];
91290 +       u8 media_type;          // 0x15: Media type (0xF8 - harddisk)
91291 +       u8 unused2[2];
91292 +       __le16 sct_per_track;   // 0x18: number of sectors per track
91293 +       __le16 heads;           // 0x1A: number of heads per cylinder
91294 +       __le32 hidden_sectors;  // 0x1C: number of 'hidden' sectors
91295 +       u8 unused3[4];
91296 +       u8 bios_drive_num;      // 0x24: BIOS drive number =0x80
91297 +       u8 unused4;
91298 +       u8 signature_ex;        // 0x26: Extended BOOT signature =0x80
91299 +       u8 unused5;
91300 +       __le64 sectors_per_volume;// 0x28: size of volume in sectors
91301 +       __le64 mft_clst;        // 0x30: first cluster of $MFT
91302 +       __le64 mft2_clst;       // 0x38: first cluster of $MFTMirr
91303 +       s8 record_size;         // 0x40: size of MFT record in clusters(sectors)
91304 +       u8 unused6[3];
91305 +       s8 index_size;          // 0x44: size of INDX record in clusters(sectors)
91306 +       u8 unused7[3];
91307 +       __le64 serial_num;      // 0x48: Volume serial number
91308 +       __le32 check_sum;       // 0x50: Simple additive checksum of all
91309 +                               // of the u32's which precede the 'check_sum'
91311 +       u8 boot_code[0x200 - 0x50 - 2 - 4]; // 0x54:
91312 +       u8 boot_magic[2];       // 0x1FE: Boot signature =0x55 + 0xAA
91315 +static_assert(sizeof(struct NTFS_BOOT) == 0x200);
91317 +enum NTFS_SIGNATURE {
91318 +       NTFS_FILE_SIGNATURE = cpu_to_le32(0x454C4946), // 'FILE'
91319 +       NTFS_INDX_SIGNATURE = cpu_to_le32(0x58444E49), // 'INDX'
91320 +       NTFS_CHKD_SIGNATURE = cpu_to_le32(0x444B4843), // 'CHKD'
91321 +       NTFS_RSTR_SIGNATURE = cpu_to_le32(0x52545352), // 'RSTR'
91322 +       NTFS_RCRD_SIGNATURE = cpu_to_le32(0x44524352), // 'RCRD'
91323 +       NTFS_BAAD_SIGNATURE = cpu_to_le32(0x44414142), // 'BAAD'
91324 +       NTFS_HOLE_SIGNATURE = cpu_to_le32(0x454C4F48), // 'HOLE'
91325 +       NTFS_FFFF_SIGNATURE = cpu_to_le32(0xffffffff),
91328 +static_assert(sizeof(enum NTFS_SIGNATURE) == 4);
91330 +/* MFT Record header structure */
91331 +struct NTFS_RECORD_HEADER {
91332 +       /* Record magic number, equals 'FILE'/'INDX'/'RSTR'/'RCRD' */
91333 +       enum NTFS_SIGNATURE sign; // 0x00:
91334 +       __le16 fix_off;         // 0x04:
91335 +       __le16 fix_num;         // 0x06:
91336 +       __le64 lsn;             // 0x08: Log file sequence number
91339 +static_assert(sizeof(struct NTFS_RECORD_HEADER) == 0x10);
91341 +static inline int is_baad(const struct NTFS_RECORD_HEADER *hdr)
91343 +       return hdr->sign == NTFS_BAAD_SIGNATURE;
91346 +/* Possible bits in struct MFT_REC.flags */
91347 +enum RECORD_FLAG {
91348 +       RECORD_FLAG_IN_USE      = cpu_to_le16(0x0001),
91349 +       RECORD_FLAG_DIR         = cpu_to_le16(0x0002),
91350 +       RECORD_FLAG_SYSTEM      = cpu_to_le16(0x0004),
91351 +       RECORD_FLAG_UNKNOWN     = cpu_to_le16(0x0008),
91354 +/* MFT Record structure */
91355 +struct MFT_REC {
91356 +       struct NTFS_RECORD_HEADER rhdr; // 'FILE'
91358 +       __le16 seq;             // 0x10: Sequence number for this record
91359 +       __le16 hard_links;      // 0x12: The number of hard links to record
91360 +       __le16 attr_off;        // 0x14: Offset to attributes
91361 +       __le16 flags;           // 0x16: See RECORD_FLAG
91362 +       __le32 used;            // 0x18: The size of used part
91363 +       __le32 total;           // 0x1C: Total record size
91365 +       struct MFT_REF parent_ref; // 0x20: Parent MFT record
91366 +       __le16 next_attr_id;    // 0x28: The next attribute Id
91368 +       __le16 res;             // 0x2A: High part of mft record?
91369 +       __le32 mft_record;      // 0x2C: Current mft record number
91370 +       __le16 fixups[];        // 0x30:
91373 +#define MFTRECORD_FIXUP_OFFSET_1 offsetof(struct MFT_REC, res)
91374 +#define MFTRECORD_FIXUP_OFFSET_3 offsetof(struct MFT_REC, fixups)
91376 +static_assert(MFTRECORD_FIXUP_OFFSET_1 == 0x2A);
91377 +static_assert(MFTRECORD_FIXUP_OFFSET_3 == 0x30);
91379 +static inline bool is_rec_base(const struct MFT_REC *rec)
91381 +       const struct MFT_REF *r = &rec->parent_ref;
91383 +       return !r->low && !r->high && !r->seq;
91386 +static inline bool is_mft_rec5(const struct MFT_REC *rec)
91388 +       return le16_to_cpu(rec->rhdr.fix_off) >=
91389 +              offsetof(struct MFT_REC, fixups);
91392 +static inline bool is_rec_inuse(const struct MFT_REC *rec)
91394 +       return rec->flags & RECORD_FLAG_IN_USE;
91397 +static inline bool clear_rec_inuse(struct MFT_REC *rec)
91399 +       return rec->flags &= ~RECORD_FLAG_IN_USE;
91402 +/* Possible values of ATTR_RESIDENT.flags */
91403 +#define RESIDENT_FLAG_INDEXED 0x01
91405 +struct ATTR_RESIDENT {
91406 +       __le32 data_size;       // 0x10: The size of data
91407 +       __le16 data_off;        // 0x14: Offset to data
91408 +       u8 flags;               // 0x16: resident flags ( 1 - indexed )
91409 +       u8 res;                 // 0x17:
91410 +}; // sizeof() = 0x18
91412 +struct ATTR_NONRESIDENT {
91413 +       __le64 svcn;            // 0x10: Starting VCN of this segment
91414 +       __le64 evcn;            // 0x18: End VCN of this segment
91415 +       __le16 run_off;         // 0x20: Offset to packed runs
91416 +       //  Unit of Compression size for this stream, expressed
91417 +       //  as a log of the cluster size.
91418 +       //
91419 +       //      0 means file is not compressed
91420 +       //      1, 2, 3, and 4 are potentially legal values if the
91421 +       //          stream is compressed, however the implementation
91422 +       //          may only choose to use 4, or possibly 3.  Note
91423 +       //          that 4 means cluster size time 16.  If convenient
91424 +       //          the implementation may wish to accept a
91425 +       //          reasonable range of legal values here (1-5?),
91426 +       //          even if the implementation only generates
91427 +       //          a smaller set of values itself.
91428 +       u8 c_unit;              // 0x22
91429 +       u8 res1[5];             // 0x23:
91430 +       __le64 alloc_size;      // 0x28: The allocated size of attribute in bytes
91431 +                               // (multiple of cluster size)
91432 +       __le64 data_size;       // 0x30: The size of attribute  in bytes <= alloc_size
91433 +       __le64 valid_size;      // 0x38: The size of valid part in bytes <= data_size
91434 +       __le64 total_size;      // 0x40: The sum of the allocated clusters for a file
91435 +                               // (present only for the first segment (0 == vcn)
91436 +                               // of compressed attribute)
91438 +}; // sizeof()=0x40 or 0x48 (if compressed)
91440 +/* Possible values of ATTRIB.flags: */
91441 +#define ATTR_FLAG_COMPRESSED     cpu_to_le16(0x0001)
91442 +#define ATTR_FLAG_COMPRESSED_MASK cpu_to_le16(0x00FF)
91443 +#define ATTR_FLAG_ENCRYPTED      cpu_to_le16(0x4000)
91444 +#define ATTR_FLAG_SPARSED        cpu_to_le16(0x8000)
91446 +struct ATTRIB {
91447 +       enum ATTR_TYPE type;    // 0x00: The type of this attribute
91448 +       __le32 size;            // 0x04: The size of this attribute
91449 +       u8 non_res;             // 0x08: Is this attribute non-resident ?
91450 +       u8 name_len;            // 0x09: This attribute name length
91451 +       __le16 name_off;        // 0x0A: Offset to the attribute name
91452 +       __le16 flags;           // 0x0C: See ATTR_FLAG_XXX
91453 +       __le16 id;              // 0x0E: unique id (per record)
91455 +       union {
91456 +               struct ATTR_RESIDENT res;     // 0x10
91457 +               struct ATTR_NONRESIDENT nres; // 0x10
91458 +       };
91461 +/* Define attribute sizes */
91462 +#define SIZEOF_RESIDENT                        0x18
91463 +#define SIZEOF_NONRESIDENT_EX          0x48
91464 +#define SIZEOF_NONRESIDENT             0x40
91466 +#define SIZEOF_RESIDENT_LE             cpu_to_le16(0x18)
91467 +#define SIZEOF_NONRESIDENT_EX_LE       cpu_to_le16(0x48)
91468 +#define SIZEOF_NONRESIDENT_LE          cpu_to_le16(0x40)
91470 +static inline u64 attr_ondisk_size(const struct ATTRIB *attr)
91472 +       return attr->non_res ? ((attr->flags &
91473 +                                (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) ?
91474 +                                       le64_to_cpu(attr->nres.total_size) :
91475 +                                       le64_to_cpu(attr->nres.alloc_size)) :
91476 +                              QuadAlign(le32_to_cpu(attr->res.data_size));
91479 +static inline u64 attr_size(const struct ATTRIB *attr)
91481 +       return attr->non_res ? le64_to_cpu(attr->nres.data_size) :
91482 +                              le32_to_cpu(attr->res.data_size);
91485 +static inline bool is_attr_encrypted(const struct ATTRIB *attr)
91487 +       return attr->flags & ATTR_FLAG_ENCRYPTED;
91490 +static inline bool is_attr_sparsed(const struct ATTRIB *attr)
91492 +       return attr->flags & ATTR_FLAG_SPARSED;
91495 +static inline bool is_attr_compressed(const struct ATTRIB *attr)
91497 +       return attr->flags & ATTR_FLAG_COMPRESSED;
91500 +static inline bool is_attr_ext(const struct ATTRIB *attr)
91502 +       return attr->flags & (ATTR_FLAG_SPARSED | ATTR_FLAG_COMPRESSED);
91505 +static inline bool is_attr_indexed(const struct ATTRIB *attr)
91507 +       return !attr->non_res && (attr->res.flags & RESIDENT_FLAG_INDEXED);
91510 +static inline __le16 const *attr_name(const struct ATTRIB *attr)
91512 +       return Add2Ptr(attr, le16_to_cpu(attr->name_off));
91515 +static inline u64 attr_svcn(const struct ATTRIB *attr)
91517 +       return attr->non_res ? le64_to_cpu(attr->nres.svcn) : 0;
91520 +/* the size of resident attribute by its resident size */
91521 +#define BYTES_PER_RESIDENT(b) (0x18 + (b))
91523 +static_assert(sizeof(struct ATTRIB) == 0x48);
91524 +static_assert(sizeof(((struct ATTRIB *)NULL)->res) == 0x08);
91525 +static_assert(sizeof(((struct ATTRIB *)NULL)->nres) == 0x38);
91527 +static inline void *resident_data_ex(const struct ATTRIB *attr, u32 datasize)
91529 +       u32 asize, rsize;
91530 +       u16 off;
91532 +       if (attr->non_res)
91533 +               return NULL;
91535 +       asize = le32_to_cpu(attr->size);
91536 +       off = le16_to_cpu(attr->res.data_off);
91538 +       if (asize < datasize + off)
91539 +               return NULL;
91541 +       rsize = le32_to_cpu(attr->res.data_size);
91542 +       if (rsize < datasize)
91543 +               return NULL;
91545 +       return Add2Ptr(attr, off);
91548 +static inline void *resident_data(const struct ATTRIB *attr)
91550 +       return Add2Ptr(attr, le16_to_cpu(attr->res.data_off));
91553 +static inline void *attr_run(const struct ATTRIB *attr)
91555 +       return Add2Ptr(attr, le16_to_cpu(attr->nres.run_off));
91558 +/* Standard information attribute (0x10) */
91559 +struct ATTR_STD_INFO {
91560 +       __le64 cr_time;         // 0x00: File creation file
91561 +       __le64 m_time;          // 0x08: File modification time
91562 +       __le64 c_time;          // 0x10: Last time any attribute was modified
91563 +       __le64 a_time;          // 0x18: File last access time
91564 +       enum FILE_ATTRIBUTE fa; // 0x20: Standard DOS attributes & more
91565 +       __le32 max_ver_num;     // 0x24: Maximum Number of Versions
91566 +       __le32 ver_num;         // 0x28: Version Number
91567 +       __le32 class_id;        // 0x2C: Class Id from bidirectional Class Id index
91570 +static_assert(sizeof(struct ATTR_STD_INFO) == 0x30);
91572 +#define SECURITY_ID_INVALID 0x00000000
91573 +#define SECURITY_ID_FIRST 0x00000100
91575 +struct ATTR_STD_INFO5 {
91576 +       __le64 cr_time;         // 0x00: File creation file
91577 +       __le64 m_time;          // 0x08: File modification time
91578 +       __le64 c_time;          // 0x10: Last time any attribute was modified
91579 +       __le64 a_time;          // 0x18: File last access time
91580 +       enum FILE_ATTRIBUTE fa; // 0x20: Standard DOS attributes & more
91581 +       __le32 max_ver_num;     // 0x24: Maximum Number of Versions
91582 +       __le32 ver_num;         // 0x28: Version Number
91583 +       __le32 class_id;        // 0x2C: Class Id from bidirectional Class Id index
91585 +       __le32 owner_id;        // 0x30: Owner Id of the user owning the file.
91586 +       __le32 security_id;     // 0x34: The Security Id is a key in the $SII Index and $SDS
91587 +       __le64 quota_charge;    // 0x38:
91588 +       __le64 usn;             // 0x40: Last Update Sequence Number of the file. This is a direct
91589 +                               // index into the file $UsnJrnl. If zero, the USN Journal is
91590 +                               // disabled.
91593 +static_assert(sizeof(struct ATTR_STD_INFO5) == 0x48);
91595 +/* attribute list entry structure (0x20) */
91596 +struct ATTR_LIST_ENTRY {
91597 +       enum ATTR_TYPE type;    // 0x00: The type of attribute
91598 +       __le16 size;            // 0x04: The size of this record
91599 +       u8 name_len;            // 0x06: The length of attribute name
91600 +       u8 name_off;            // 0x07: The offset to attribute name
91601 +       __le64 vcn;             // 0x08: Starting VCN of this attribute
91602 +       struct MFT_REF ref;     // 0x10: MFT record number with attribute
91603 +       __le16 id;              // 0x18: struct ATTRIB ID
91604 +       __le16 name[3];         // 0x1A: Just to align. To get real name can use bNameOffset
91606 +}; // sizeof(0x20)
91608 +static_assert(sizeof(struct ATTR_LIST_ENTRY) == 0x20);
91610 +static inline u32 le_size(u8 name_len)
91612 +       return QuadAlign(offsetof(struct ATTR_LIST_ENTRY, name) +
91613 +                        name_len * sizeof(short));
91616 +/* returns 0 if 'attr' has the same type and name */
91617 +static inline int le_cmp(const struct ATTR_LIST_ENTRY *le,
91618 +                        const struct ATTRIB *attr)
91620 +       return le->type != attr->type || le->name_len != attr->name_len ||
91621 +              (!le->name_len &&
91622 +               memcmp(Add2Ptr(le, le->name_off),
91623 +                      Add2Ptr(attr, le16_to_cpu(attr->name_off)),
91624 +                      le->name_len * sizeof(short)));
91627 +static inline __le16 const *le_name(const struct ATTR_LIST_ENTRY *le)
91629 +       return Add2Ptr(le, le->name_off);
91632 +/* File name types (the field type in struct ATTR_FILE_NAME ) */
91633 +#define FILE_NAME_POSIX   0
91634 +#define FILE_NAME_UNICODE 1
91635 +#define FILE_NAME_DOS    2
91636 +#define FILE_NAME_UNICODE_AND_DOS (FILE_NAME_DOS | FILE_NAME_UNICODE)
91638 +/* Filename attribute structure (0x30) */
91639 +struct NTFS_DUP_INFO {
91640 +       __le64 cr_time;         // 0x00: File creation file
91641 +       __le64 m_time;          // 0x08: File modification time
91642 +       __le64 c_time;          // 0x10: Last time any attribute was modified
91643 +       __le64 a_time;          // 0x18: File last access time
91644 +       __le64 alloc_size;      // 0x20: Data attribute allocated size, multiple of cluster size
91645 +       __le64 data_size;       // 0x28: Data attribute size <= Dataalloc_size
91646 +       enum FILE_ATTRIBUTE fa; // 0x30: Standard DOS attributes & more
91647 +       __le16 ea_size;         // 0x34: Packed EAs
91648 +       __le16 reparse;         // 0x36: Used by Reparse
91650 +}; // 0x38
91652 +struct ATTR_FILE_NAME {
91653 +       struct MFT_REF home;    // 0x00: MFT record for directory
91654 +       struct NTFS_DUP_INFO dup;// 0x08
91655 +       u8 name_len;            // 0x40: File name length in words
91656 +       u8 type;                // 0x41: File name type
91657 +       __le16 name[];          // 0x42: File name
91660 +static_assert(sizeof(((struct ATTR_FILE_NAME *)NULL)->dup) == 0x38);
91661 +static_assert(offsetof(struct ATTR_FILE_NAME, name) == 0x42);
91662 +#define SIZEOF_ATTRIBUTE_FILENAME     0x44
91663 +#define SIZEOF_ATTRIBUTE_FILENAME_MAX (0x42 + 255 * 2)
91665 +static inline struct ATTRIB *attr_from_name(struct ATTR_FILE_NAME *fname)
91667 +       return (struct ATTRIB *)((char *)fname - SIZEOF_RESIDENT);
91670 +static inline u16 fname_full_size(const struct ATTR_FILE_NAME *fname)
91672 +       // don't return struct_size(fname, name, fname->name_len);
91673 +       return offsetof(struct ATTR_FILE_NAME, name) +
91674 +              fname->name_len * sizeof(short);
91677 +static inline u8 paired_name(u8 type)
91679 +       if (type == FILE_NAME_UNICODE)
91680 +               return FILE_NAME_DOS;
91681 +       if (type == FILE_NAME_DOS)
91682 +               return FILE_NAME_UNICODE;
91683 +       return FILE_NAME_POSIX;
91686 +/* Index entry defines ( the field flags in NtfsDirEntry ) */
91687 +#define NTFS_IE_HAS_SUBNODES   cpu_to_le16(1)
91688 +#define NTFS_IE_LAST           cpu_to_le16(2)
91690 +/* Directory entry structure */
91691 +struct NTFS_DE {
91692 +       union {
91693 +               struct MFT_REF ref; // 0x00: MFT record number with this file
91694 +               struct {
91695 +                       __le16 data_off;  // 0x00:
91696 +                       __le16 data_size; // 0x02:
91697 +                       __le32 res;       // 0x04: must be 0
91698 +               } view;
91699 +       };
91700 +       __le16 size;            // 0x08: The size of this entry
91701 +       __le16 key_size;        // 0x0A: The size of File name length in bytes + 0x42
91702 +       __le16 flags;           // 0x0C: Entry flags: NTFS_IE_XXX
91703 +       __le16 res;             // 0x0E:
91705 +       // Here any indexed attribute can be placed
91706 +       // One of them is:
91707 +       // struct ATTR_FILE_NAME AttrFileName;
91708 +       //
91710 +       // The last 8 bytes of this structure contains
91711 +       // the VBN of subnode
91712 +       // !!! Note !!!
91713 +       // This field is presented only if (flags & NTFS_IE_HAS_SUBNODES)
91714 +       // __le64 vbn;
91717 +static_assert(sizeof(struct NTFS_DE) == 0x10);
91719 +static inline void de_set_vbn_le(struct NTFS_DE *e, __le64 vcn)
91721 +       __le64 *v = Add2Ptr(e, le16_to_cpu(e->size) - sizeof(__le64));
91723 +       *v = vcn;
91726 +static inline void de_set_vbn(struct NTFS_DE *e, CLST vcn)
91728 +       __le64 *v = Add2Ptr(e, le16_to_cpu(e->size) - sizeof(__le64));
91730 +       *v = cpu_to_le64(vcn);
91733 +static inline __le64 de_get_vbn_le(const struct NTFS_DE *e)
91735 +       return *(__le64 *)Add2Ptr(e, le16_to_cpu(e->size) - sizeof(__le64));
91738 +static inline CLST de_get_vbn(const struct NTFS_DE *e)
91740 +       __le64 *v = Add2Ptr(e, le16_to_cpu(e->size) - sizeof(__le64));
91742 +       return le64_to_cpu(*v);
91745 +static inline struct NTFS_DE *de_get_next(const struct NTFS_DE *e)
91747 +       return Add2Ptr(e, le16_to_cpu(e->size));
91750 +static inline struct ATTR_FILE_NAME *de_get_fname(const struct NTFS_DE *e)
91752 +       return le16_to_cpu(e->key_size) >= SIZEOF_ATTRIBUTE_FILENAME ?
91753 +                      Add2Ptr(e, sizeof(struct NTFS_DE)) :
91754 +                      NULL;
91757 +static inline bool de_is_last(const struct NTFS_DE *e)
91759 +       return e->flags & NTFS_IE_LAST;
91762 +static inline bool de_has_vcn(const struct NTFS_DE *e)
91764 +       return e->flags & NTFS_IE_HAS_SUBNODES;
91767 +static inline bool de_has_vcn_ex(const struct NTFS_DE *e)
91769 +       return (e->flags & NTFS_IE_HAS_SUBNODES) &&
91770 +              (u64)(-1) != *((u64 *)Add2Ptr(e, le16_to_cpu(e->size) -
91771 +                                                       sizeof(__le64)));
91774 +#define MAX_BYTES_PER_NAME_ENTRY                                              \
91775 +       QuadAlign(sizeof(struct NTFS_DE) +                                     \
91776 +                 offsetof(struct ATTR_FILE_NAME, name) +                      \
91777 +                 NTFS_NAME_LEN * sizeof(short))
91779 +struct INDEX_HDR {
91780 +       __le32 de_off;  // 0x00: The offset from the start of this structure
91781 +                       // to the first NTFS_DE
91782 +       __le32 used;    // 0x04: The size of this structure plus all
91783 +                       // entries (quad-word aligned)
91784 +       __le32 total;   // 0x08: The allocated size of for this structure plus all entries
91785 +       u8 flags;       // 0x0C: 0x00 = Small directory, 0x01 = Large directory
91786 +       u8 res[3];
91788 +       //
91789 +       // de_off + used <= total
91790 +       //
91793 +static_assert(sizeof(struct INDEX_HDR) == 0x10);
91795 +static inline struct NTFS_DE *hdr_first_de(const struct INDEX_HDR *hdr)
91797 +       u32 de_off = le32_to_cpu(hdr->de_off);
91798 +       u32 used = le32_to_cpu(hdr->used);
91799 +       struct NTFS_DE *e = Add2Ptr(hdr, de_off);
91800 +       u16 esize;
91802 +       if (de_off >= used || de_off >= le32_to_cpu(hdr->total))
91803 +               return NULL;
91805 +       esize = le16_to_cpu(e->size);
91806 +       if (esize < sizeof(struct NTFS_DE) || de_off + esize > used)
91807 +               return NULL;
91809 +       return e;
91812 +static inline struct NTFS_DE *hdr_next_de(const struct INDEX_HDR *hdr,
91813 +                                         const struct NTFS_DE *e)
91815 +       size_t off = PtrOffset(hdr, e);
91816 +       u32 used = le32_to_cpu(hdr->used);
91817 +       u16 esize;
91819 +       if (off >= used)
91820 +               return NULL;
91822 +       esize = le16_to_cpu(e->size);
91824 +       if (esize < sizeof(struct NTFS_DE) ||
91825 +           off + esize + sizeof(struct NTFS_DE) > used)
91826 +               return NULL;
91828 +       return Add2Ptr(e, esize);
91831 +static inline bool hdr_has_subnode(const struct INDEX_HDR *hdr)
91833 +       return hdr->flags & 1;
91836 +struct INDEX_BUFFER {
91837 +       struct NTFS_RECORD_HEADER rhdr; // 'INDX'
91838 +       __le64 vbn; // 0x10: vcn if index >= cluster or vsn id index < cluster
91839 +       struct INDEX_HDR ihdr; // 0x18:
91842 +static_assert(sizeof(struct INDEX_BUFFER) == 0x28);
91844 +static inline bool ib_is_empty(const struct INDEX_BUFFER *ib)
91846 +       const struct NTFS_DE *first = hdr_first_de(&ib->ihdr);
91848 +       return !first || de_is_last(first);
91851 +static inline bool ib_is_leaf(const struct INDEX_BUFFER *ib)
91853 +       return !(ib->ihdr.flags & 1);
91856 +/* Index root structure ( 0x90 ) */
91857 +enum COLLATION_RULE {
91858 +       NTFS_COLLATION_TYPE_BINARY      = cpu_to_le32(0),
91859 +       // $I30
91860 +       NTFS_COLLATION_TYPE_FILENAME    = cpu_to_le32(0x01),
91861 +       // $SII of $Secure and $Q of Quota
91862 +       NTFS_COLLATION_TYPE_UINT        = cpu_to_le32(0x10),
91863 +       // $O of Quota
91864 +       NTFS_COLLATION_TYPE_SID         = cpu_to_le32(0x11),
91865 +       // $SDH of $Secure
91866 +       NTFS_COLLATION_TYPE_SECURITY_HASH = cpu_to_le32(0x12),
91867 +       // $O of ObjId and "$R" for Reparse
91868 +       NTFS_COLLATION_TYPE_UINTS       = cpu_to_le32(0x13)
91871 +static_assert(sizeof(enum COLLATION_RULE) == 4);
91874 +struct INDEX_ROOT {
91875 +       enum ATTR_TYPE type;    // 0x00: The type of attribute to index on
91876 +       enum COLLATION_RULE rule; // 0x04: The rule
91877 +       __le32 index_block_size;// 0x08: The size of index record
91878 +       u8 index_block_clst;    // 0x0C: The number of clusters or sectors per index
91879 +       u8 res[3];
91880 +       struct INDEX_HDR ihdr;  // 0x10:
91883 +static_assert(sizeof(struct INDEX_ROOT) == 0x20);
91884 +static_assert(offsetof(struct INDEX_ROOT, ihdr) == 0x10);
91886 +#define VOLUME_FLAG_DIRTY          cpu_to_le16(0x0001)
91887 +#define VOLUME_FLAG_RESIZE_LOG_FILE cpu_to_le16(0x0002)
91889 +struct VOLUME_INFO {
91890 +       __le64 res1;    // 0x00
91891 +       u8 major_ver;   // 0x08: NTFS major version number (before .)
91892 +       u8 minor_ver;   // 0x09: NTFS minor version number (after .)
91893 +       __le16 flags;   // 0x0A: Volume flags, see VOLUME_FLAG_XXX
91895 +}; // sizeof=0xC
91897 +#define SIZEOF_ATTRIBUTE_VOLUME_INFO 0xc
91899 +#define NTFS_LABEL_MAX_LENGTH          (0x100 / sizeof(short))
91900 +#define NTFS_ATTR_INDEXABLE            cpu_to_le32(0x00000002)
91901 +#define NTFS_ATTR_DUPALLOWED           cpu_to_le32(0x00000004)
91902 +#define NTFS_ATTR_MUST_BE_INDEXED      cpu_to_le32(0x00000010)
91903 +#define NTFS_ATTR_MUST_BE_NAMED                cpu_to_le32(0x00000020)
91904 +#define NTFS_ATTR_MUST_BE_RESIDENT     cpu_to_le32(0x00000040)
91905 +#define NTFS_ATTR_LOG_ALWAYS           cpu_to_le32(0x00000080)
91907 +/* $AttrDef file entry */
91908 +struct ATTR_DEF_ENTRY {
91909 +       __le16 name[0x40];      // 0x00: Attr name
91910 +       enum ATTR_TYPE type;    // 0x80: struct ATTRIB type
91911 +       __le32 res;             // 0x84:
91912 +       enum COLLATION_RULE rule; // 0x88:
91913 +       __le32 flags;           // 0x8C: NTFS_ATTR_XXX (see above)
91914 +       __le64 min_sz;          // 0x90: Minimum attribute data size
91915 +       __le64 max_sz;          // 0x98: Maximum attribute data size
91918 +static_assert(sizeof(struct ATTR_DEF_ENTRY) == 0xa0);
91920 +/* Object ID (0x40) */
91921 +struct OBJECT_ID {
91922 +       struct GUID ObjId;      // 0x00: Unique Id assigned to file
91923 +       struct GUID BirthVolumeId;// 0x10: Birth Volume Id is the Object Id of the Volume on
91924 +                               // which the Object Id was allocated. It never changes
91925 +       struct GUID BirthObjectId; // 0x20: Birth Object Id is the first Object Id that was
91926 +                               // ever assigned to this MFT Record. I.e. If the Object Id
91927 +                               // is changed for some reason, this field will reflect the
91928 +                               // original value of the Object Id.
91929 +       struct GUID DomainId;   // 0x30: Domain Id is currently unused but it is intended to be
91930 +                               // used in a network environment where the local machine is
91931 +                               // part of a Windows 2000 Domain. This may be used in a Windows
91932 +                               // 2000 Advanced Server managed domain.
91935 +static_assert(sizeof(struct OBJECT_ID) == 0x40);
91937 +/* O Directory entry structure ( rule = 0x13 ) */
91938 +struct NTFS_DE_O {
91939 +       struct NTFS_DE de;
91940 +       struct GUID ObjId;      // 0x10: Unique Id assigned to file
91941 +       struct MFT_REF ref;     // 0x20: MFT record number with this file
91942 +       struct GUID BirthVolumeId; // 0x28: Birth Volume Id is the Object Id of the Volume on
91943 +                               // which the Object Id was allocated. It never changes
91944 +       struct GUID BirthObjectId; // 0x38: Birth Object Id is the first Object Id that was
91945 +                               // ever assigned to this MFT Record. I.e. If the Object Id
91946 +                               // is changed for some reason, this field will reflect the
91947 +                               // original value of the Object Id.
91948 +                               // This field is valid if data_size == 0x48
91949 +       struct GUID BirthDomainId; // 0x48: Domain Id is currently unused but it is intended
91950 +                               // to be used in a network environment where the local
91951 +                               // machine is part of a Windows 2000 Domain. This may be
91952 +                               // used in a Windows 2000 Advanced Server managed domain.
91955 +static_assert(sizeof(struct NTFS_DE_O) == 0x58);
91957 +#define NTFS_OBJECT_ENTRY_DATA_SIZE1                                          \
91958 +       0x38 // struct NTFS_DE_O.BirthDomainId is not used
91959 +#define NTFS_OBJECT_ENTRY_DATA_SIZE2                                          \
91960 +       0x48 // struct NTFS_DE_O.BirthDomainId is used
91962 +/* Q Directory entry structure ( rule = 0x11 ) */
91963 +struct NTFS_DE_Q {
91964 +       struct NTFS_DE de;
91965 +       __le32 owner_id;        // 0x10: Unique Id assigned to file
91966 +       __le32 Version;         // 0x14: 0x02
91967 +       __le32 flags2;          // 0x18: Quota flags, see above
91968 +       __le64 BytesUsed;       // 0x1C:
91969 +       __le64 ChangeTime;      // 0x24:
91970 +       __le64 WarningLimit;    // 0x28:
91971 +       __le64 HardLimit;       // 0x34:
91972 +       __le64 ExceededTime;    // 0x3C:
91974 +       // SID is placed here
91975 +}; // sizeof() = 0x44
91977 +#define SIZEOF_NTFS_DE_Q 0x44
91979 +#define SecurityDescriptorsBlockSize 0x40000 // 256K
91980 +#define SecurityDescriptorMaxSize    0x20000 // 128K
91981 +#define Log2OfSecurityDescriptorsBlockSize 18
91983 +struct SECURITY_KEY {
91984 +       __le32 hash; //  Hash value for descriptor
91985 +       __le32 sec_id; //  Security Id (guaranteed unique)
91988 +/* Security descriptors (the content of $Secure::SDS data stream) */
91989 +struct SECURITY_HDR {
91990 +       struct SECURITY_KEY key;        // 0x00: Security Key
91991 +       __le64 off;                     // 0x08: Offset of this entry in the file
91992 +       __le32 size;                    // 0x10: Size of this entry, 8 byte aligned
91993 +       //
91994 +       // Security descriptor itself is placed here
91995 +       // Total size is 16 byte aligned
91996 +       //
91997 +} __packed;
91999 +#define SIZEOF_SECURITY_HDR 0x14
92001 +/* SII Directory entry structure */
92002 +struct NTFS_DE_SII {
92003 +       struct NTFS_DE de;
92004 +       __le32 sec_id;                  // 0x10: Key: sizeof(security_id) = wKeySize
92005 +       struct SECURITY_HDR sec_hdr;    // 0x14:
92006 +} __packed;
92008 +#define SIZEOF_SII_DIRENTRY 0x28
92010 +/* SDH Directory entry structure */
92011 +struct NTFS_DE_SDH {
92012 +       struct NTFS_DE de;
92013 +       struct SECURITY_KEY key;        // 0x10: Key
92014 +       struct SECURITY_HDR sec_hdr;    // 0x18: Data
92015 +       __le16 magic[2];                // 0x2C: 0x00490049 "I I"
92018 +#define SIZEOF_SDH_DIRENTRY 0x30
92020 +struct REPARSE_KEY {
92021 +       __le32 ReparseTag;              // 0x00: Reparse Tag
92022 +       struct MFT_REF ref;             // 0x04: MFT record number with this file
92023 +}; // sizeof() = 0x0C
92025 +static_assert(offsetof(struct REPARSE_KEY, ref) == 0x04);
92026 +#define SIZEOF_REPARSE_KEY 0x0C
92028 +/* Reparse Directory entry structure */
92029 +struct NTFS_DE_R {
92030 +       struct NTFS_DE de;
92031 +       struct REPARSE_KEY key;         // 0x10: Reparse Key
92032 +       u32 zero;                       // 0x1c
92033 +}; // sizeof() = 0x20
92035 +static_assert(sizeof(struct NTFS_DE_R) == 0x20);
92037 +/* CompressReparseBuffer.WofVersion */
92038 +#define WOF_CURRENT_VERSION            cpu_to_le32(1)
92039 +/* CompressReparseBuffer.WofProvider */
92040 +#define WOF_PROVIDER_WIM               cpu_to_le32(1)
92041 +/* CompressReparseBuffer.WofProvider */
92042 +#define WOF_PROVIDER_SYSTEM            cpu_to_le32(2)
92043 +/* CompressReparseBuffer.ProviderVer */
92044 +#define WOF_PROVIDER_CURRENT_VERSION   cpu_to_le32(1)
92046 +#define WOF_COMPRESSION_XPRESS4K       cpu_to_le32(0) // 4k
92047 +#define WOF_COMPRESSION_LZX32K         cpu_to_le32(1) // 32k
92048 +#define WOF_COMPRESSION_XPRESS8K       cpu_to_le32(2) // 8k
92049 +#define WOF_COMPRESSION_XPRESS16K      cpu_to_le32(3) // 16k
92052 + * ATTR_REPARSE (0xC0)
92053 + *
92054 + * The reparse struct GUID structure is used by all 3rd party layered drivers to
92055 + * store data in a reparse point. For non-Microsoft tags, The struct GUID field
92056 + * cannot be GUID_NULL.
92057 + * The constraints on reparse tags are defined below.
92058 + * Microsoft tags can also be used with this format of the reparse point buffer.
92059 + */
92060 +struct REPARSE_POINT {
92061 +       __le32 ReparseTag;      // 0x00:
92062 +       __le16 ReparseDataLength;// 0x04:
92063 +       __le16 Reserved;
92065 +       struct GUID Guid;       // 0x08:
92067 +       //
92068 +       // Here GenericReparseBuffer is placed
92069 +       //
92072 +static_assert(sizeof(struct REPARSE_POINT) == 0x18);
92075 +// Maximum allowed size of the reparse data.
92077 +#define MAXIMUM_REPARSE_DATA_BUFFER_SIZE       (16 * 1024)
92080 +// The value of the following constant needs to satisfy the following
92081 +// conditions:
92082 +//  (1) Be at least as large as the largest of the reserved tags.
92083 +//  (2) Be strictly smaller than all the tags in use.
92085 +#define IO_REPARSE_TAG_RESERVED_RANGE          1
92088 +// The reparse tags are a ULONG. The 32 bits are laid out as follows:
92090 +//   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
92091 +//   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
92092 +//  +-+-+-+-+-----------------------+-------------------------------+
92093 +//  |M|R|N|R|    Reserved bits     |       Reparse Tag Value       |
92094 +//  +-+-+-+-+-----------------------+-------------------------------+
92096 +// M is the Microsoft bit. When set to 1, it denotes a tag owned by Microsoft.
92097 +//   All ISVs must use a tag with a 0 in this position.
92098 +//   Note: If a Microsoft tag is used by non-Microsoft software, the
92099 +//   behavior is not defined.
92101 +// R is reserved.  Must be zero for non-Microsoft tags.
92103 +// N is name surrogate. When set to 1, the file represents another named
92104 +//   entity in the system.
92106 +// The M and N bits are OR-able.
92107 +// The following macros check for the M and N bit values:
92111 +// Macro to determine whether a reparse point tag corresponds to a tag
92112 +// owned by Microsoft.
92114 +#define IsReparseTagMicrosoft(_tag)    (((_tag)&IO_REPARSE_TAG_MICROSOFT))
92117 +// Macro to determine whether a reparse point tag is a name surrogate
92119 +#define IsReparseTagNameSurrogate(_tag)        (((_tag)&IO_REPARSE_TAG_NAME_SURROGATE))
92122 +// The following constant represents the bits that are valid to use in
92123 +// reparse tags.
92125 +#define IO_REPARSE_TAG_VALID_VALUES    0xF000FFFF
92128 +// Macro to determine whether a reparse tag is a valid tag.
92130 +#define IsReparseTagValid(_tag)                                                       \
92131 +       (!((_tag) & ~IO_REPARSE_TAG_VALID_VALUES) &&                           \
92132 +        ((_tag) > IO_REPARSE_TAG_RESERVED_RANGE))
92135 +// Microsoft tags for reparse points.
92138 +enum IO_REPARSE_TAG {
92139 +       IO_REPARSE_TAG_SYMBOLIC_LINK    = cpu_to_le32(0),
92140 +       IO_REPARSE_TAG_NAME_SURROGATE   = cpu_to_le32(0x20000000),
92141 +       IO_REPARSE_TAG_MICROSOFT        = cpu_to_le32(0x80000000),
92142 +       IO_REPARSE_TAG_MOUNT_POINT      = cpu_to_le32(0xA0000003),
92143 +       IO_REPARSE_TAG_SYMLINK          = cpu_to_le32(0xA000000C),
92144 +       IO_REPARSE_TAG_HSM              = cpu_to_le32(0xC0000004),
92145 +       IO_REPARSE_TAG_SIS              = cpu_to_le32(0x80000007),
92146 +       IO_REPARSE_TAG_DEDUP            = cpu_to_le32(0x80000013),
92147 +       IO_REPARSE_TAG_COMPRESS         = cpu_to_le32(0x80000017),
92149 +       //
92150 +       // The reparse tag 0x80000008 is reserved for Microsoft internal use
92151 +       // (may be published in the future)
92152 +       //
92154 +       //
92155 +       // Microsoft reparse tag reserved for DFS
92156 +       //
92157 +       IO_REPARSE_TAG_DFS              = cpu_to_le32(0x8000000A),
92159 +       //
92160 +       // Microsoft reparse tag reserved for the file system filter manager
92161 +       //
92162 +       IO_REPARSE_TAG_FILTER_MANAGER   = cpu_to_le32(0x8000000B),
92164 +       //
92165 +       // Non-Microsoft tags for reparse points
92166 +       //
92168 +       //
92169 +       // Tag allocated to CONGRUENT, May 2000. Used by IFSTEST
92170 +       //
92171 +       IO_REPARSE_TAG_IFSTEST_CONGRUENT = cpu_to_le32(0x00000009),
92173 +       //
92174 +       // Tag allocated to ARKIVIO
92175 +       //
92176 +       IO_REPARSE_TAG_ARKIVIO          = cpu_to_le32(0x0000000C),
92178 +       //
92179 +       //  Tag allocated to SOLUTIONSOFT
92180 +       //
92181 +       IO_REPARSE_TAG_SOLUTIONSOFT     = cpu_to_le32(0x2000000D),
92183 +       //
92184 +       //  Tag allocated to COMMVAULT
92185 +       //
92186 +       IO_REPARSE_TAG_COMMVAULT        = cpu_to_le32(0x0000000E),
92188 +       // OneDrive??
92189 +       IO_REPARSE_TAG_CLOUD            = cpu_to_le32(0x9000001A),
92190 +       IO_REPARSE_TAG_CLOUD_1          = cpu_to_le32(0x9000101A),
92191 +       IO_REPARSE_TAG_CLOUD_2          = cpu_to_le32(0x9000201A),
92192 +       IO_REPARSE_TAG_CLOUD_3          = cpu_to_le32(0x9000301A),
92193 +       IO_REPARSE_TAG_CLOUD_4          = cpu_to_le32(0x9000401A),
92194 +       IO_REPARSE_TAG_CLOUD_5          = cpu_to_le32(0x9000501A),
92195 +       IO_REPARSE_TAG_CLOUD_6          = cpu_to_le32(0x9000601A),
92196 +       IO_REPARSE_TAG_CLOUD_7          = cpu_to_le32(0x9000701A),
92197 +       IO_REPARSE_TAG_CLOUD_8          = cpu_to_le32(0x9000801A),
92198 +       IO_REPARSE_TAG_CLOUD_9          = cpu_to_le32(0x9000901A),
92199 +       IO_REPARSE_TAG_CLOUD_A          = cpu_to_le32(0x9000A01A),
92200 +       IO_REPARSE_TAG_CLOUD_B          = cpu_to_le32(0x9000B01A),
92201 +       IO_REPARSE_TAG_CLOUD_C          = cpu_to_le32(0x9000C01A),
92202 +       IO_REPARSE_TAG_CLOUD_D          = cpu_to_le32(0x9000D01A),
92203 +       IO_REPARSE_TAG_CLOUD_E          = cpu_to_le32(0x9000E01A),
92204 +       IO_REPARSE_TAG_CLOUD_F          = cpu_to_le32(0x9000F01A),
92208 +#define SYMLINK_FLAG_RELATIVE          1
92210 +/* Microsoft reparse buffer. (see DDK for details) */
92211 +struct REPARSE_DATA_BUFFER {
92212 +       __le32 ReparseTag;              // 0x00:
92213 +       __le16 ReparseDataLength;       // 0x04:
92214 +       __le16 Reserved;
92216 +       union {
92217 +               // If ReparseTag == 0xA0000003 (IO_REPARSE_TAG_MOUNT_POINT)
92218 +               struct {
92219 +                       __le16 SubstituteNameOffset; // 0x08
92220 +                       __le16 SubstituteNameLength; // 0x0A
92221 +                       __le16 PrintNameOffset;      // 0x0C
92222 +                       __le16 PrintNameLength;      // 0x0E
92223 +                       __le16 PathBuffer[];         // 0x10
92224 +               } MountPointReparseBuffer;
92226 +               // If ReparseTag == 0xA000000C (IO_REPARSE_TAG_SYMLINK)
92227 +               // https://msdn.microsoft.com/en-us/library/cc232006.aspx
92228 +               struct {
92229 +                       __le16 SubstituteNameOffset; // 0x08
92230 +                       __le16 SubstituteNameLength; // 0x0A
92231 +                       __le16 PrintNameOffset;      // 0x0C
92232 +                       __le16 PrintNameLength;      // 0x0E
92233 +                       // 0-absolute path 1- relative path, SYMLINK_FLAG_RELATIVE
92234 +                       __le32 Flags;                // 0x10
92235 +                       __le16 PathBuffer[];         // 0x14
92236 +               } SymbolicLinkReparseBuffer;
92238 +               // If ReparseTag == 0x80000017U
92239 +               struct {
92240 +                       __le32 WofVersion;  // 0x08 == 1
92241 +                       /* 1 - WIM backing provider ("WIMBoot"),
92242 +                        * 2 - System compressed file provider
92243 +                        */
92244 +                       __le32 WofProvider; // 0x0C
92245 +                       __le32 ProviderVer; // 0x10: == 1 WOF_FILE_PROVIDER_CURRENT_VERSION == 1
92246 +                       __le32 CompressionFormat; // 0x14: 0, 1, 2, 3. See WOF_COMPRESSION_XXX
92247 +               } CompressReparseBuffer;
92249 +               struct {
92250 +                       u8 DataBuffer[1];   // 0x08
92251 +               } GenericReparseBuffer;
92252 +       };
92255 +/* ATTR_EA_INFO (0xD0) */
92257 +#define FILE_NEED_EA 0x80 // See ntifs.h
92258 +/* FILE_NEED_EA, indicates that the file to which the EA belongs cannot be
92259 + * interpreted without understanding the associated extended attributes.
92260 + */
92261 +struct EA_INFO {
92262 +       __le16 size_pack;       // 0x00: Size of buffer to hold in packed form
92263 +       __le16 count;           // 0x02: Count of EA's with FILE_NEED_EA bit set
92264 +       __le32 size;            // 0x04: Size of buffer to hold in unpacked form
92267 +static_assert(sizeof(struct EA_INFO) == 8);
92269 +/* ATTR_EA (0xE0) */
92270 +struct EA_FULL {
92271 +       __le32 size;            // 0x00: (not in packed)
92272 +       u8 flags;               // 0x04
92273 +       u8 name_len;            // 0x05
92274 +       __le16 elength;         // 0x06
92275 +       u8 name[];              // 0x08
92278 +static_assert(offsetof(struct EA_FULL, name) == 8);
92280 +#define ACL_REVISION   2
92281 +#define ACL_REVISION_DS 4
92283 +#define SE_SELF_RELATIVE cpu_to_le16(0x8000)
92285 +struct SECURITY_DESCRIPTOR_RELATIVE {
92286 +       u8 Revision;
92287 +       u8 Sbz1;
92288 +       __le16 Control;
92289 +       __le32 Owner;
92290 +       __le32 Group;
92291 +       __le32 Sacl;
92292 +       __le32 Dacl;
92294 +static_assert(sizeof(struct SECURITY_DESCRIPTOR_RELATIVE) == 0x14);
92296 +struct ACE_HEADER {
92297 +       u8 AceType;
92298 +       u8 AceFlags;
92299 +       __le16 AceSize;
92301 +static_assert(sizeof(struct ACE_HEADER) == 4);
92303 +struct ACL {
92304 +       u8 AclRevision;
92305 +       u8 Sbz1;
92306 +       __le16 AclSize;
92307 +       __le16 AceCount;
92308 +       __le16 Sbz2;
92310 +static_assert(sizeof(struct ACL) == 8);
92312 +struct SID {
92313 +       u8 Revision;
92314 +       u8 SubAuthorityCount;
92315 +       u8 IdentifierAuthority[6];
92316 +       __le32 SubAuthority[];
92318 +static_assert(offsetof(struct SID, SubAuthority) == 8);
92320 +// clang-format on
92321 diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
92322 new file mode 100644
92323 index 000000000000..5e1dd628d3cc
92324 --- /dev/null
92325 +++ b/fs/ntfs3/ntfs_fs.h
92326 @@ -0,0 +1,1085 @@
92327 +/* SPDX-License-Identifier: GPL-2.0 */
92329 + *
92330 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
92331 + *
92332 + */
92334 +// clang-format off
92335 +#define MINUS_ONE_T                    ((size_t)(-1))
92336 +/* Biggest MFT / smallest cluster */
92337 +#define MAXIMUM_BYTES_PER_MFT          4096
92338 +#define NTFS_BLOCKS_PER_MFT_RECORD     (MAXIMUM_BYTES_PER_MFT / 512)
92340 +#define MAXIMUM_BYTES_PER_INDEX                4096
92341 +#define NTFS_BLOCKS_PER_INODE          (MAXIMUM_BYTES_PER_INDEX / 512)
92343 +/* ntfs specific error code when fixup failed*/
92344 +#define E_NTFS_FIXUP                   555
92345 +/* ntfs specific error code about resident->nonresident*/
92346 +#define E_NTFS_NONRESIDENT             556
92348 +/* sbi->flags */
92349 +#define NTFS_FLAGS_NODISCARD           0x00000001
92350 +/* Set when LogFile is replaying */
92351 +#define NTFS_FLAGS_LOG_REPLAYING       0x00000008
92352 +/* Set when we changed first MFT's which copy must be updated in $MftMirr */
92353 +#define NTFS_FLAGS_MFTMIRR             0x00001000
92354 +#define NTFS_FLAGS_NEED_REPLAY         0x04000000
92357 +/* ni->ni_flags */
92359 + * Data attribute is external compressed (lzx/xpress)
92360 + * 1 - WOF_COMPRESSION_XPRESS4K
92361 + * 2 - WOF_COMPRESSION_XPRESS8K
92362 + * 3 - WOF_COMPRESSION_XPRESS16K
92363 + * 4 - WOF_COMPRESSION_LZX32K
92364 + */
92365 +#define NI_FLAG_COMPRESSED_MASK                0x0000000f
92366 +/* Data attribute is deduplicated */
92367 +#define NI_FLAG_DEDUPLICATED           0x00000010
92368 +#define NI_FLAG_EA                     0x00000020
92369 +#define NI_FLAG_DIR                    0x00000040
92370 +#define NI_FLAG_RESIDENT               0x00000080
92371 +#define NI_FLAG_UPDATE_PARENT          0x00000100
92372 +// clang-format on
92374 +struct ntfs_mount_options {
92375 +       struct nls_table *nls;
92377 +       kuid_t fs_uid;
92378 +       kgid_t fs_gid;
92379 +       u16 fs_fmask_inv;
92380 +       u16 fs_dmask_inv;
92382 +       unsigned uid : 1, /* uid was set */
92383 +               gid : 1, /* gid was set */
92384 +               fmask : 1, /* fmask was set */
92385 +               dmask : 1, /*dmask was set*/
92386 +               sys_immutable : 1, /* immutable system files */
92387 +               discard : 1, /* issue discard requests on deletions */
92388 +               sparse : 1, /*create sparse files*/
92389 +               showmeta : 1, /*show meta files*/
92390 +               nohidden : 1, /*do not show hidden files*/
92391 +               force : 1, /*rw mount dirty volume*/
92392 +               no_acs_rules : 1, /*exclude acs rules*/
92393 +               prealloc : 1 /*preallocate space when file is growing*/
92394 +               ;
92397 +/* special value to unpack and deallocate*/
92398 +#define RUN_DEALLOCATE ((struct runs_tree *)(size_t)1)
92400 +/* TODO: use rb tree instead of array */
92401 +struct runs_tree {
92402 +       struct ntfs_run *runs;
92403 +       size_t count; // Currently used size a ntfs_run storage.
92404 +       size_t allocated; // Currently allocated ntfs_run storage size.
92407 +struct ntfs_buffers {
92408 +       /* Biggest MFT / smallest cluster = 4096 / 512 = 8 */
92409 +       /* Biggest index / smallest cluster = 4096 / 512 = 8 */
92410 +       struct buffer_head *bh[PAGE_SIZE >> SECTOR_SHIFT];
92411 +       u32 bytes;
92412 +       u32 nbufs;
92413 +       u32 off;
92416 +enum ALLOCATE_OPT {
92417 +       ALLOCATE_DEF = 0, // Allocate all clusters
92418 +       ALLOCATE_MFT = 1, // Allocate for MFT
92421 +enum bitmap_mutex_classes {
92422 +       BITMAP_MUTEX_CLUSTERS = 0,
92423 +       BITMAP_MUTEX_MFT = 1,
92426 +struct wnd_bitmap {
92427 +       struct super_block *sb;
92428 +       struct rw_semaphore rw_lock;
92430 +       struct runs_tree run;
92431 +       size_t nbits;
92433 +       size_t total_zeroes; // total number of free bits
92434 +       u16 *free_bits; // free bits in each window
92435 +       size_t nwnd;
92436 +       u32 bits_last; // bits in last window
92438 +       struct rb_root start_tree; // extents, sorted by 'start'
92439 +       struct rb_root count_tree; // extents, sorted by 'count + start'
92440 +       size_t count; // extents count
92442 +       /*
92443 +        * -1 Tree is activated but not updated (too many fragments)
92444 +        * 0 - Tree is not activated
92445 +        * 1 - Tree is activated and updated
92446 +        */
92447 +       int uptodated;
92448 +       size_t extent_min; // Minimal extent used while building
92449 +       size_t extent_max; // Upper estimate of biggest free block
92451 +       /* Zone [bit, end) */
92452 +       size_t zone_bit;
92453 +       size_t zone_end;
92455 +       bool set_tail; // not necessary in driver
92456 +       bool inited;
92459 +typedef int (*NTFS_CMP_FUNC)(const void *key1, size_t len1, const void *key2,
92460 +                            size_t len2, const void *param);
92462 +enum index_mutex_classed {
92463 +       INDEX_MUTEX_I30 = 0,
92464 +       INDEX_MUTEX_SII = 1,
92465 +       INDEX_MUTEX_SDH = 2,
92466 +       INDEX_MUTEX_SO = 3,
92467 +       INDEX_MUTEX_SQ = 4,
92468 +       INDEX_MUTEX_SR = 5,
92469 +       INDEX_MUTEX_TOTAL
92472 +/* ntfs_index - allocation unit inside directory */
92473 +struct ntfs_index {
92474 +       struct runs_tree bitmap_run;
92475 +       struct runs_tree alloc_run;
92476 +       /* read/write access to 'bitmap_run'/'alloc_run' while ntfs_readdir */
92477 +       struct rw_semaphore run_lock;
92479 +       /*TODO: remove 'cmp'*/
92480 +       NTFS_CMP_FUNC cmp;
92482 +       u8 index_bits; // log2(root->index_block_size)
92483 +       u8 idx2vbn_bits; // log2(root->index_block_clst)
92484 +       u8 vbn2vbo_bits; // index_block_size < cluster? 9 : cluster_bits
92485 +       u8 type; // index_mutex_classed
92488 +/* Minimum mft zone */
92489 +#define NTFS_MIN_MFT_ZONE 100
92491 +/* ntfs file system in-core superblock data */
92492 +struct ntfs_sb_info {
92493 +       struct super_block *sb;
92495 +       u32 discard_granularity;
92496 +       u64 discard_granularity_mask_inv; // ~(discard_granularity_mask_inv-1)
92498 +       u32 cluster_size; // bytes per cluster
92499 +       u32 cluster_mask; // == cluster_size - 1
92500 +       u64 cluster_mask_inv; // ~(cluster_size - 1)
92501 +       u32 block_mask; // sb->s_blocksize - 1
92502 +       u32 blocks_per_cluster; // cluster_size / sb->s_blocksize
92504 +       u32 record_size;
92505 +       u32 sector_size;
92506 +       u32 index_size;
92508 +       u8 sector_bits;
92509 +       u8 cluster_bits;
92510 +       u8 record_bits;
92512 +       u64 maxbytes; // Maximum size for normal files
92513 +       u64 maxbytes_sparse; // Maximum size for sparse file
92515 +       u32 flags; // See NTFS_FLAGS_XXX
92517 +       CLST bad_clusters; // The count of marked bad clusters
92519 +       u16 max_bytes_per_attr; // maximum attribute size in record
92520 +       u16 attr_size_tr; // attribute size threshold (320 bytes)
92522 +       /* Records in $Extend */
92523 +       CLST objid_no;
92524 +       CLST quota_no;
92525 +       CLST reparse_no;
92526 +       CLST usn_jrnl_no;
92528 +       struct ATTR_DEF_ENTRY *def_table; // attribute definition table
92529 +       u32 def_entries;
92530 +       u32 ea_max_size;
92532 +       struct MFT_REC *new_rec;
92534 +       u16 *upcase;
92536 +       struct {
92537 +               u64 lbo, lbo2;
92538 +               struct ntfs_inode *ni;
92539 +               struct wnd_bitmap bitmap; // $MFT::Bitmap
92540 +               /*
92541 +                * MFT records [11-24) used to expand MFT itself
92542 +                * They always marked as used in $MFT::Bitmap
92543 +                * 'reserved_bitmap' contains real bitmap of these records
92544 +                */
92545 +               ulong reserved_bitmap; // bitmap of used records [11 - 24)
92546 +               size_t next_free; // The next record to allocate from
92547 +               size_t used; // mft valid size in records
92548 +               u32 recs_mirr; // Number of records in MFTMirr
92549 +               u8 next_reserved;
92550 +               u8 reserved_bitmap_inited;
92551 +       } mft;
92553 +       struct {
92554 +               struct wnd_bitmap bitmap; // $Bitmap::Data
92555 +               CLST next_free_lcn;
92556 +       } used;
92558 +       struct {
92559 +               u64 size; // in bytes
92560 +               u64 blocks; // in blocks
92561 +               u64 ser_num;
92562 +               struct ntfs_inode *ni;
92563 +               __le16 flags; // cached current VOLUME_INFO::flags, VOLUME_FLAG_DIRTY
92564 +               u8 major_ver;
92565 +               u8 minor_ver;
92566 +               char label[65];
92567 +               bool real_dirty; /* real fs state*/
92568 +       } volume;
92570 +       struct {
92571 +               struct ntfs_index index_sii;
92572 +               struct ntfs_index index_sdh;
92573 +               struct ntfs_inode *ni;
92574 +               u32 next_id;
92575 +               u64 next_off;
92577 +               __le32 def_security_id;
92578 +       } security;
92580 +       struct {
92581 +               struct ntfs_index index_r;
92582 +               struct ntfs_inode *ni;
92583 +               u64 max_size; // 16K
92584 +       } reparse;
92586 +       struct {
92587 +               struct ntfs_index index_o;
92588 +               struct ntfs_inode *ni;
92589 +       } objid;
92591 +       struct {
92592 +               struct mutex mtx_lznt;
92593 +               struct lznt *lznt;
92594 +#ifdef CONFIG_NTFS3_LZX_XPRESS
92595 +               struct mutex mtx_xpress;
92596 +               struct xpress_decompressor *xpress;
92597 +               struct mutex mtx_lzx;
92598 +               struct lzx_decompressor *lzx;
92599 +#endif
92600 +       } compress;
92602 +       struct ntfs_mount_options options;
92603 +       struct ratelimit_state msg_ratelimit;
92607 + * one MFT record(usually 1024 bytes), consists of attributes
92608 + */
92609 +struct mft_inode {
92610 +       struct rb_node node;
92611 +       struct ntfs_sb_info *sbi;
92613 +       struct MFT_REC *mrec;
92614 +       struct ntfs_buffers nb;
92616 +       CLST rno;
92617 +       bool dirty;
92620 +/* nested class for ntfs_inode::ni_lock */
92621 +enum ntfs_inode_mutex_lock_class {
92622 +       NTFS_INODE_MUTEX_DIRTY,
92623 +       NTFS_INODE_MUTEX_SECURITY,
92624 +       NTFS_INODE_MUTEX_OBJID,
92625 +       NTFS_INODE_MUTEX_REPARSE,
92626 +       NTFS_INODE_MUTEX_NORMAL,
92627 +       NTFS_INODE_MUTEX_PARENT,
92631 + * ntfs inode - extends linux inode. consists of one or more mft inodes
92632 + */
92633 +struct ntfs_inode {
92634 +       struct mft_inode mi; // base record
92636 +       /*
92637 +        * Valid size: [0 - i_valid) - these range in file contains valid data
92638 +        * Range [i_valid - inode->i_size) - contains 0
92639 +        * Usually i_valid <= inode->i_size
92640 +        */
92641 +       u64 i_valid;
92642 +       struct timespec64 i_crtime;
92644 +       struct mutex ni_lock;
92646 +       /* file attributes from std */
92647 +       enum FILE_ATTRIBUTE std_fa;
92648 +       __le32 std_security_id;
92650 +       /*
92651 +        * tree of mft_inode
92652 +        * not empty when primary MFT record (usually 1024 bytes) can't save all attributes
92653 +        * e.g. file becomes too fragmented or contains a lot of names
92654 +        */
92655 +       struct rb_root mi_tree;
92657 +       /*
92658 +        * This member is used in ntfs_readdir to ensure that all subrecords are loaded
92659 +        */
92660 +       u8 mi_loaded;
92662 +       union {
92663 +               struct ntfs_index dir;
92664 +               struct {
92665 +                       struct rw_semaphore run_lock;
92666 +                       struct runs_tree run;
92667 +#ifdef CONFIG_NTFS3_LZX_XPRESS
92668 +                       struct page *offs_page;
92669 +#endif
92670 +               } file;
92671 +       };
92673 +       struct {
92674 +               struct runs_tree run;
92675 +               struct ATTR_LIST_ENTRY *le; // 1K aligned memory
92676 +               size_t size;
92677 +               bool dirty;
92678 +       } attr_list;
92680 +       size_t ni_flags; // NI_FLAG_XXX
92682 +       struct inode vfs_inode;
92685 +struct indx_node {
92686 +       struct ntfs_buffers nb;
92687 +       struct INDEX_BUFFER *index;
92690 +struct ntfs_fnd {
92691 +       int level;
92692 +       struct indx_node *nodes[20];
92693 +       struct NTFS_DE *de[20];
92694 +       struct NTFS_DE *root_de;
92697 +enum REPARSE_SIGN {
92698 +       REPARSE_NONE = 0,
92699 +       REPARSE_COMPRESSED = 1,
92700 +       REPARSE_DEDUPLICATED = 2,
92701 +       REPARSE_LINK = 3
92704 +/* functions from attrib.c*/
92705 +int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
92706 +                  struct runs_tree *run, const CLST *vcn);
92707 +int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
92708 +                          CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
92709 +                          enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
92710 +                          CLST *new_lcn);
92711 +int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
92712 +                         struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
92713 +                         u64 new_size, struct runs_tree *run,
92714 +                         struct ATTRIB **ins_attr, struct page *page);
92715 +int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
92716 +                 const __le16 *name, u8 name_len, struct runs_tree *run,
92717 +                 u64 new_size, const u64 *new_valid, bool keep_prealloc,
92718 +                 struct ATTRIB **ret);
92719 +int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
92720 +                       CLST *len, bool *new);
92721 +int attr_data_read_resident(struct ntfs_inode *ni, struct page *page);
92722 +int attr_data_write_resident(struct ntfs_inode *ni, struct page *page);
92723 +int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
92724 +                      const __le16 *name, u8 name_len, struct runs_tree *run,
92725 +                      CLST vcn);
92726 +int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
92727 +                        const __le16 *name, u8 name_len, struct runs_tree *run,
92728 +                        u64 from, u64 to);
92729 +int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
92730 +                       struct runs_tree *run, u64 frame, u64 frames,
92731 +                       u8 frame_bits, u32 *ondisk_size, u64 *vbo_data);
92732 +int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
92733 +                            CLST frame, CLST *clst_data);
92734 +int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
92735 +                       u64 new_valid);
92736 +int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes);
92737 +int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes);
92739 +/* functions from attrlist.c*/
92740 +void al_destroy(struct ntfs_inode *ni);
92741 +bool al_verify(struct ntfs_inode *ni);
92742 +int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr);
92743 +struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
92744 +                                    struct ATTR_LIST_ENTRY *le);
92745 +struct ATTR_LIST_ENTRY *al_find_le(struct ntfs_inode *ni,
92746 +                                  struct ATTR_LIST_ENTRY *le,
92747 +                                  const struct ATTRIB *attr);
92748 +struct ATTR_LIST_ENTRY *al_find_ex(struct ntfs_inode *ni,
92749 +                                  struct ATTR_LIST_ENTRY *le,
92750 +                                  enum ATTR_TYPE type, const __le16 *name,
92751 +                                  u8 name_len, const CLST *vcn);
92752 +int al_add_le(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name,
92753 +             u8 name_len, CLST svcn, __le16 id, const struct MFT_REF *ref,
92754 +             struct ATTR_LIST_ENTRY **new_le);
92755 +bool al_remove_le(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le);
92756 +bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn,
92757 +                 const __le16 *name, size_t name_len,
92758 +                 const struct MFT_REF *ref);
92759 +int al_update(struct ntfs_inode *ni);
92760 +static inline size_t al_aligned(size_t size)
92762 +       return (size + 1023) & ~(size_t)1023;
92765 +/* globals from bitfunc.c */
92766 +bool are_bits_clear(const ulong *map, size_t bit, size_t nbits);
92767 +bool are_bits_set(const ulong *map, size_t bit, size_t nbits);
92768 +size_t get_set_bits_ex(const ulong *map, size_t bit, size_t nbits);
92770 +/* globals from dir.c */
92771 +int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const struct le_str *uni,
92772 +                     u8 *buf, int buf_len);
92773 +int ntfs_nls_to_utf16(struct ntfs_sb_info *sbi, const u8 *name, u32 name_len,
92774 +                     struct cpu_str *uni, u32 max_ulen,
92775 +                     enum utf16_endian endian);
92776 +struct inode *dir_search_u(struct inode *dir, const struct cpu_str *uni,
92777 +                          struct ntfs_fnd *fnd);
92778 +bool dir_is_empty(struct inode *dir);
92779 +extern const struct file_operations ntfs_dir_operations;
92781 +/* globals from file.c*/
92782 +int ntfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
92783 +                struct kstat *stat, u32 request_mask, u32 flags);
92784 +void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn,
92785 +                        CLST len);
92786 +int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
92787 +                 struct iattr *attr);
92788 +int ntfs_file_open(struct inode *inode, struct file *file);
92789 +int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
92790 +               __u64 start, __u64 len);
92791 +extern const struct inode_operations ntfs_special_inode_operations;
92792 +extern const struct inode_operations ntfs_file_inode_operations;
92793 +extern const struct file_operations ntfs_file_operations;
92795 +/* globals from frecord.c */
92796 +void ni_remove_mi(struct ntfs_inode *ni, struct mft_inode *mi);
92797 +struct ATTR_STD_INFO *ni_std(struct ntfs_inode *ni);
92798 +struct ATTR_STD_INFO5 *ni_std5(struct ntfs_inode *ni);
92799 +void ni_clear(struct ntfs_inode *ni);
92800 +int ni_load_mi_ex(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi);
92801 +int ni_load_mi(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
92802 +              struct mft_inode **mi);
92803 +struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
92804 +                           struct ATTR_LIST_ENTRY **entry_o,
92805 +                           enum ATTR_TYPE type, const __le16 *name,
92806 +                           u8 name_len, const CLST *vcn,
92807 +                           struct mft_inode **mi);
92808 +struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr,
92809 +                              struct ATTR_LIST_ENTRY **le,
92810 +                              struct mft_inode **mi);
92811 +struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
92812 +                           const __le16 *name, u8 name_len, CLST vcn,
92813 +                           struct mft_inode **pmi);
92814 +int ni_load_all_mi(struct ntfs_inode *ni);
92815 +bool ni_add_subrecord(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi);
92816 +int ni_remove_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
92817 +                  const __le16 *name, size_t name_len, bool base_only,
92818 +                  const __le16 *id);
92819 +int ni_create_attr_list(struct ntfs_inode *ni);
92820 +int ni_expand_list(struct ntfs_inode *ni);
92821 +int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
92822 +                         const __le16 *name, u8 name_len,
92823 +                         const struct runs_tree *run, CLST svcn, CLST len,
92824 +                         __le16 flags, struct ATTRIB **new_attr,
92825 +                         struct mft_inode **mi);
92826 +int ni_insert_resident(struct ntfs_inode *ni, u32 data_size,
92827 +                      enum ATTR_TYPE type, const __le16 *name, u8 name_len,
92828 +                      struct ATTRIB **new_attr, struct mft_inode **mi);
92829 +int ni_remove_attr_le(struct ntfs_inode *ni, struct ATTRIB *attr,
92830 +                     struct ATTR_LIST_ENTRY *le);
92831 +int ni_delete_all(struct ntfs_inode *ni);
92832 +struct ATTR_FILE_NAME *ni_fname_name(struct ntfs_inode *ni,
92833 +                                    const struct cpu_str *uni,
92834 +                                    const struct MFT_REF *home,
92835 +                                    struct ATTR_LIST_ENTRY **entry);
92836 +struct ATTR_FILE_NAME *ni_fname_type(struct ntfs_inode *ni, u8 name_type,
92837 +                                    struct ATTR_LIST_ENTRY **entry);
92838 +int ni_new_attr_flags(struct ntfs_inode *ni, enum FILE_ATTRIBUTE new_fa);
92839 +enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
92840 +                                  void *buffer);
92841 +int ni_write_inode(struct inode *inode, int sync, const char *hint);
92842 +#define _ni_write_inode(i, w) ni_write_inode(i, w, __func__)
92843 +int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
92844 +             __u64 vbo, __u64 len);
92845 +int ni_readpage_cmpr(struct ntfs_inode *ni, struct page *page);
92846 +int ni_decompress_file(struct ntfs_inode *ni);
92847 +int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
92848 +                 u32 pages_per_frame);
92849 +int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
92850 +                  u32 pages_per_frame);
92852 +/* globals from fslog.c */
92853 +int log_replay(struct ntfs_inode *ni, bool *initialized);
92855 +/* globals from fsntfs.c */
92856 +bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes);
92857 +int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
92858 +                      bool simple);
92859 +int ntfs_extend_init(struct ntfs_sb_info *sbi);
92860 +int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi);
92861 +const struct ATTR_DEF_ENTRY *ntfs_query_def(struct ntfs_sb_info *sbi,
92862 +                                           enum ATTR_TYPE Type);
92863 +int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
92864 +                            CLST *new_lcn, CLST *new_len,
92865 +                            enum ALLOCATE_OPT opt);
92866 +int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
92867 +                      struct ntfs_inode *ni, struct mft_inode **mi);
92868 +void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno);
92869 +int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to);
92870 +int ntfs_refresh_zone(struct ntfs_sb_info *sbi);
92871 +int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait);
92872 +enum NTFS_DIRTY_FLAGS {
92873 +       NTFS_DIRTY_CLEAR = 0,
92874 +       NTFS_DIRTY_DIRTY = 1,
92875 +       NTFS_DIRTY_ERROR = 2,
92877 +int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty);
92878 +int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer);
92879 +int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
92880 +                 const void *buffer, int wait);
92881 +int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
92882 +                     u64 vbo, const void *buf, size_t bytes);
92883 +struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
92884 +                                  const struct runs_tree *run, u64 vbo);
92885 +int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
92886 +                    u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb);
92887 +int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
92888 +                struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
92889 +                struct ntfs_buffers *nb);
92890 +int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
92891 +               u32 bytes, struct ntfs_buffers *nb);
92892 +int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
92893 +                 struct ntfs_buffers *nb, int sync);
92894 +int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
92895 +                  struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
92896 +                  u32 op);
92897 +int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run);
92898 +int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
92899 +                   u64 vbo, u64 *lbo, u64 *bytes);
92900 +struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST nRec,
92901 +                                 bool dir);
92902 +extern const u8 s_default_security[0x50];
92903 +bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len);
92904 +int ntfs_security_init(struct ntfs_sb_info *sbi);
92905 +int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
92906 +                           struct SECURITY_DESCRIPTOR_RELATIVE **sd,
92907 +                           size_t *size);
92908 +int ntfs_insert_security(struct ntfs_sb_info *sbi,
92909 +                        const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
92910 +                        u32 size, __le32 *security_id, bool *inserted);
92911 +int ntfs_reparse_init(struct ntfs_sb_info *sbi);
92912 +int ntfs_objid_init(struct ntfs_sb_info *sbi);
92913 +int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid);
92914 +int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
92915 +                       const struct MFT_REF *ref);
92916 +int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
92917 +                       const struct MFT_REF *ref);
92918 +void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim);
92919 +int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim);
92921 +/* globals from index.c */
92922 +int indx_used_bit(struct ntfs_index *indx, struct ntfs_inode *ni, size_t *bit);
92923 +void fnd_clear(struct ntfs_fnd *fnd);
92924 +static inline struct ntfs_fnd *fnd_get(void)
92926 +       return ntfs_zalloc(sizeof(struct ntfs_fnd));
92928 +static inline void fnd_put(struct ntfs_fnd *fnd)
92930 +       if (fnd) {
92931 +               fnd_clear(fnd);
92932 +               ntfs_free(fnd);
92933 +       }
92935 +void indx_clear(struct ntfs_index *idx);
92936 +int indx_init(struct ntfs_index *indx, struct ntfs_sb_info *sbi,
92937 +             const struct ATTRIB *attr, enum index_mutex_classed type);
92938 +struct INDEX_ROOT *indx_get_root(struct ntfs_index *indx, struct ntfs_inode *ni,
92939 +                                struct ATTRIB **attr, struct mft_inode **mi);
92940 +int indx_read(struct ntfs_index *idx, struct ntfs_inode *ni, CLST vbn,
92941 +             struct indx_node **node);
92942 +int indx_find(struct ntfs_index *indx, struct ntfs_inode *dir,
92943 +             const struct INDEX_ROOT *root, const void *Key, size_t KeyLen,
92944 +             const void *param, int *diff, struct NTFS_DE **entry,
92945 +             struct ntfs_fnd *fnd);
92946 +int indx_find_sort(struct ntfs_index *indx, struct ntfs_inode *ni,
92947 +                  const struct INDEX_ROOT *root, struct NTFS_DE **entry,
92948 +                  struct ntfs_fnd *fnd);
92949 +int indx_find_raw(struct ntfs_index *indx, struct ntfs_inode *ni,
92950 +                 const struct INDEX_ROOT *root, struct NTFS_DE **entry,
92951 +                 size_t *off, struct ntfs_fnd *fnd);
92952 +int indx_insert_entry(struct ntfs_index *indx, struct ntfs_inode *ni,
92953 +                     const struct NTFS_DE *new_de, const void *param,
92954 +                     struct ntfs_fnd *fnd);
92955 +int indx_delete_entry(struct ntfs_index *indx, struct ntfs_inode *ni,
92956 +                     const void *key, u32 key_len, const void *param);
92957 +int indx_update_dup(struct ntfs_inode *ni, struct ntfs_sb_info *sbi,
92958 +                   const struct ATTR_FILE_NAME *fname,
92959 +                   const struct NTFS_DUP_INFO *dup, int sync);
92961 +/* globals from inode.c */
92962 +struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
92963 +                        const struct cpu_str *name);
92964 +int ntfs_set_size(struct inode *inode, u64 new_size);
92965 +int reset_log_file(struct inode *inode);
92966 +int ntfs_get_block(struct inode *inode, sector_t vbn,
92967 +                  struct buffer_head *bh_result, int create);
92968 +int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc);
92969 +int ntfs_sync_inode(struct inode *inode);
92970 +int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
92971 +                     struct inode *i2);
92972 +int inode_write_data(struct inode *inode, const void *data, size_t bytes);
92973 +struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
92974 +                               struct inode *dir, struct dentry *dentry,
92975 +                               const struct cpu_str *uni, umode_t mode,
92976 +                               dev_t dev, const char *symname, u32 size,
92977 +                               int excl, struct ntfs_fnd *fnd);
92978 +int ntfs_link_inode(struct inode *inode, struct dentry *dentry);
92979 +int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry);
92980 +void ntfs_evict_inode(struct inode *inode);
92981 +extern const struct inode_operations ntfs_link_inode_operations;
92982 +extern const struct address_space_operations ntfs_aops;
92983 +extern const struct address_space_operations ntfs_aops_cmpr;
92985 +/* globals from name_i.c*/
92986 +int fill_name_de(struct ntfs_sb_info *sbi, void *buf, const struct qstr *name,
92987 +                const struct cpu_str *uni);
92988 +struct dentry *ntfs3_get_parent(struct dentry *child);
92990 +extern const struct inode_operations ntfs_dir_inode_operations;
92992 +/* globals from record.c */
92993 +int mi_get(struct ntfs_sb_info *sbi, CLST rno, struct mft_inode **mi);
92994 +void mi_put(struct mft_inode *mi);
92995 +int mi_init(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno);
92996 +int mi_read(struct mft_inode *mi, bool is_mft);
92997 +struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr);
92998 +// TODO: id?
92999 +struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
93000 +                           enum ATTR_TYPE type, const __le16 *name,
93001 +                           size_t name_len, const __le16 *id);
93002 +static inline struct ATTRIB *rec_find_attr_le(struct mft_inode *rec,
93003 +                                             struct ATTR_LIST_ENTRY *le)
93005 +       return mi_find_attr(rec, NULL, le->type, le_name(le), le->name_len,
93006 +                           &le->id);
93008 +int mi_write(struct mft_inode *mi, int wait);
93009 +int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
93010 +                 __le16 flags, bool is_mft);
93011 +void mi_mark_free(struct mft_inode *mi);
93012 +struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
93013 +                             const __le16 *name, u8 name_len, u32 asize,
93014 +                             u16 name_off);
93016 +bool mi_remove_attr(struct mft_inode *mi, struct ATTRIB *attr);
93017 +bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes);
93018 +int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr,
93019 +                struct runs_tree *run, CLST len);
93020 +static inline bool mi_is_ref(const struct mft_inode *mi,
93021 +                            const struct MFT_REF *ref)
93023 +       if (le32_to_cpu(ref->low) != mi->rno)
93024 +               return false;
93025 +       if (ref->seq != mi->mrec->seq)
93026 +               return false;
93028 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
93029 +       return le16_to_cpu(ref->high) == (mi->rno >> 32);
93030 +#else
93031 +       return !ref->high;
93032 +#endif
93035 +static inline void mi_get_ref(const struct mft_inode *mi, struct MFT_REF *ref)
93037 +       ref->low = cpu_to_le32(mi->rno);
93038 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
93039 +       ref->high = cpu_to_le16(mi->rno >> 32);
93040 +#else
93041 +       ref->high = 0;
93042 +#endif
93043 +       ref->seq = mi->mrec->seq;
93046 +/* globals from run.c */
93047 +bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn,
93048 +                     CLST *len, size_t *index);
93049 +void run_truncate(struct runs_tree *run, CLST vcn);
93050 +void run_truncate_head(struct runs_tree *run, CLST vcn);
93051 +void run_truncate_around(struct runs_tree *run, CLST vcn);
93052 +bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *Index);
93053 +bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
93054 +                  bool is_mft);
93055 +bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len);
93056 +bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
93057 +                  CLST *lcn, CLST *len);
93058 +bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn);
93060 +int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
93061 +            u32 run_buf_size, CLST *packed_vcns);
93062 +int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
93063 +              CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
93064 +              u32 run_buf_size);
93066 +#ifdef NTFS3_CHECK_FREE_CLST
93067 +int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
93068 +                 CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
93069 +                 u32 run_buf_size);
93070 +#else
93071 +#define run_unpack_ex run_unpack
93072 +#endif
93073 +int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn);
93075 +/* globals from super.c */
93076 +void *ntfs_set_shared(void *ptr, u32 bytes);
93077 +void *ntfs_put_shared(void *ptr);
93078 +void ntfs_unmap_meta(struct super_block *sb, CLST lcn, CLST len);
93079 +int ntfs_discard(struct ntfs_sb_info *sbi, CLST Lcn, CLST Len);
93081 +/* globals from bitmap.c*/
93082 +int __init ntfs3_init_bitmap(void);
93083 +void ntfs3_exit_bitmap(void);
93084 +void wnd_close(struct wnd_bitmap *wnd);
93085 +static inline size_t wnd_zeroes(const struct wnd_bitmap *wnd)
93087 +       return wnd->total_zeroes;
93089 +int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits);
93090 +int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits);
93091 +int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits);
93092 +bool wnd_is_free(struct wnd_bitmap *wnd, size_t bit, size_t bits);
93093 +bool wnd_is_used(struct wnd_bitmap *wnd, size_t bit, size_t bits);
93095 +/* Possible values for 'flags' 'wnd_find' */
93096 +#define BITMAP_FIND_MARK_AS_USED 0x01
93097 +#define BITMAP_FIND_FULL 0x02
93098 +size_t wnd_find(struct wnd_bitmap *wnd, size_t to_alloc, size_t hint,
93099 +               size_t flags, size_t *allocated);
93100 +int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits);
93101 +void wnd_zone_set(struct wnd_bitmap *wnd, size_t Lcn, size_t Len);
93102 +int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range);
93104 +/* globals from upcase.c */
93105 +int ntfs_cmp_names(const __le16 *s1, size_t l1, const __le16 *s2, size_t l2,
93106 +                  const u16 *upcase, bool bothcase);
93107 +int ntfs_cmp_names_cpu(const struct cpu_str *uni1, const struct le_str *uni2,
93108 +                      const u16 *upcase, bool bothcase);
93110 +/* globals from xattr.c */
93111 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
93112 +struct posix_acl *ntfs_get_acl(struct inode *inode, int type);
93113 +int ntfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
93114 +                struct posix_acl *acl, int type);
93115 +int ntfs_init_acl(struct user_namespace *mnt_userns, struct inode *inode,
93116 +                 struct inode *dir);
93117 +#else
93118 +#define ntfs_get_acl NULL
93119 +#define ntfs_set_acl NULL
93120 +#endif
93122 +int ntfs_acl_chmod(struct user_namespace *mnt_userns, struct inode *inode);
93123 +int ntfs_permission(struct user_namespace *mnt_userns, struct inode *inode,
93124 +                   int mask);
93125 +ssize_t ntfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
93126 +extern const struct xattr_handler *ntfs_xattr_handlers[];
93128 +/* globals from lznt.c */
93129 +struct lznt *get_lznt_ctx(int level);
93130 +size_t compress_lznt(const void *uncompressed, size_t uncompressed_size,
93131 +                    void *compressed, size_t compressed_size,
93132 +                    struct lznt *ctx);
93133 +ssize_t decompress_lznt(const void *compressed, size_t compressed_size,
93134 +                       void *uncompressed, size_t uncompressed_size);
93136 +static inline bool is_ntfs3(struct ntfs_sb_info *sbi)
93138 +       return sbi->volume.major_ver >= 3;
93141 +/*(sb->s_flags & SB_ACTIVE)*/
93142 +static inline bool is_mounted(struct ntfs_sb_info *sbi)
93144 +       return !!sbi->sb->s_root;
93147 +static inline bool ntfs_is_meta_file(struct ntfs_sb_info *sbi, CLST rno)
93149 +       return rno < MFT_REC_FREE || rno == sbi->objid_no ||
93150 +              rno == sbi->quota_no || rno == sbi->reparse_no ||
93151 +              rno == sbi->usn_jrnl_no;
93154 +static inline void ntfs_unmap_page(struct page *page)
93156 +       kunmap(page);
93157 +       put_page(page);
93160 +static inline struct page *ntfs_map_page(struct address_space *mapping,
93161 +                                        unsigned long index)
93163 +       struct page *page = read_mapping_page(mapping, index, NULL);
93165 +       if (!IS_ERR(page)) {
93166 +               kmap(page);
93167 +               if (!PageError(page))
93168 +                       return page;
93169 +               ntfs_unmap_page(page);
93170 +               return ERR_PTR(-EIO);
93171 +       }
93172 +       return page;
93175 +static inline size_t wnd_zone_bit(const struct wnd_bitmap *wnd)
93177 +       return wnd->zone_bit;
93180 +static inline size_t wnd_zone_len(const struct wnd_bitmap *wnd)
93182 +       return wnd->zone_end - wnd->zone_bit;
93185 +static inline void run_init(struct runs_tree *run)
93187 +       run->runs = NULL;
93188 +       run->count = 0;
93189 +       run->allocated = 0;
93192 +static inline struct runs_tree *run_alloc(void)
93194 +       return ntfs_zalloc(sizeof(struct runs_tree));
93197 +static inline void run_close(struct runs_tree *run)
93199 +       ntfs_vfree(run->runs);
93200 +       memset(run, 0, sizeof(*run));
93203 +static inline void run_free(struct runs_tree *run)
93205 +       if (run) {
93206 +               ntfs_vfree(run->runs);
93207 +               ntfs_free(run);
93208 +       }
93211 +static inline bool run_is_empty(struct runs_tree *run)
93213 +       return !run->count;
93216 +/* NTFS uses quad aligned bitmaps */
93217 +static inline size_t bitmap_size(size_t bits)
93219 +       return QuadAlign((bits + 7) >> 3);
93222 +#define _100ns2seconds 10000000
93223 +#define SecondsToStartOf1970 0x00000002B6109100
93225 +#define NTFS_TIME_GRAN 100
93228 + * kernel2nt
93229 + *
93230 + * converts in-memory kernel timestamp into nt time
93231 + */
93232 +static inline __le64 kernel2nt(const struct timespec64 *ts)
93234 +       // 10^7 units of 100 nanoseconds one second
93235 +       return cpu_to_le64(_100ns2seconds *
93236 +                                  (ts->tv_sec + SecondsToStartOf1970) +
93237 +                          ts->tv_nsec / NTFS_TIME_GRAN);
93241 + * nt2kernel
93242 + *
93243 + * converts on-disk nt time into kernel timestamp
93244 + */
93245 +static inline void nt2kernel(const __le64 tm, struct timespec64 *ts)
93247 +       u64 t = le64_to_cpu(tm) - _100ns2seconds * SecondsToStartOf1970;
93249 +       // WARNING: do_div changes its first argument(!)
93250 +       ts->tv_nsec = do_div(t, _100ns2seconds) * 100;
93251 +       ts->tv_sec = t;
93254 +static inline struct ntfs_sb_info *ntfs_sb(struct super_block *sb)
93256 +       return sb->s_fs_info;
93259 +/* Align up on cluster boundary */
93260 +static inline u64 ntfs_up_cluster(const struct ntfs_sb_info *sbi, u64 size)
93262 +       return (size + sbi->cluster_mask) & sbi->cluster_mask_inv;
93265 +/* Align up on cluster boundary */
93266 +static inline u64 ntfs_up_block(const struct super_block *sb, u64 size)
93268 +       return (size + sb->s_blocksize - 1) & ~(u64)(sb->s_blocksize - 1);
93271 +static inline CLST bytes_to_cluster(const struct ntfs_sb_info *sbi, u64 size)
93273 +       return (size + sbi->cluster_mask) >> sbi->cluster_bits;
93276 +static inline u64 bytes_to_block(const struct super_block *sb, u64 size)
93278 +       return (size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
93281 +static inline struct buffer_head *ntfs_bread(struct super_block *sb,
93282 +                                            sector_t block)
93284 +       struct buffer_head *bh = sb_bread(sb, block);
93286 +       if (bh)
93287 +               return bh;
93289 +       ntfs_err(sb, "failed to read volume at offset 0x%llx",
93290 +                (u64)block << sb->s_blocksize_bits);
93291 +       return NULL;
93294 +static inline bool is_power_of2(size_t v)
93296 +       return v && !(v & (v - 1));
93299 +static inline struct ntfs_inode *ntfs_i(struct inode *inode)
93301 +       return container_of(inode, struct ntfs_inode, vfs_inode);
93304 +static inline bool is_compressed(const struct ntfs_inode *ni)
93306 +       return (ni->std_fa & FILE_ATTRIBUTE_COMPRESSED) ||
93307 +              (ni->ni_flags & NI_FLAG_COMPRESSED_MASK);
93310 +static inline int ni_ext_compress_bits(const struct ntfs_inode *ni)
93312 +       return 0xb + (ni->ni_flags & NI_FLAG_COMPRESSED_MASK);
93315 +/* bits - 0xc, 0xd, 0xe, 0xf, 0x10 */
93316 +static inline void ni_set_ext_compress_bits(struct ntfs_inode *ni, u8 bits)
93318 +       ni->ni_flags |= (bits - 0xb) & NI_FLAG_COMPRESSED_MASK;
93321 +static inline bool is_dedup(const struct ntfs_inode *ni)
93323 +       return ni->ni_flags & NI_FLAG_DEDUPLICATED;
93326 +static inline bool is_encrypted(const struct ntfs_inode *ni)
93328 +       return ni->std_fa & FILE_ATTRIBUTE_ENCRYPTED;
93331 +static inline bool is_sparsed(const struct ntfs_inode *ni)
93333 +       return ni->std_fa & FILE_ATTRIBUTE_SPARSE_FILE;
93336 +static inline int is_resident(struct ntfs_inode *ni)
93338 +       return ni->ni_flags & NI_FLAG_RESIDENT;
93341 +static inline void le16_sub_cpu(__le16 *var, u16 val)
93343 +       *var = cpu_to_le16(le16_to_cpu(*var) - val);
93346 +static inline void le32_sub_cpu(__le32 *var, u32 val)
93348 +       *var = cpu_to_le32(le32_to_cpu(*var) - val);
93351 +static inline void nb_put(struct ntfs_buffers *nb)
93353 +       u32 i, nbufs = nb->nbufs;
93355 +       if (!nbufs)
93356 +               return;
93358 +       for (i = 0; i < nbufs; i++)
93359 +               put_bh(nb->bh[i]);
93360 +       nb->nbufs = 0;
93363 +static inline void put_indx_node(struct indx_node *in)
93365 +       if (!in)
93366 +               return;
93368 +       ntfs_free(in->index);
93369 +       nb_put(&in->nb);
93370 +       ntfs_free(in);
93373 +static inline void mi_clear(struct mft_inode *mi)
93375 +       nb_put(&mi->nb);
93376 +       ntfs_free(mi->mrec);
93377 +       mi->mrec = NULL;
93380 +static inline void ni_lock(struct ntfs_inode *ni)
93382 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_NORMAL);
93385 +static inline void ni_lock_dir(struct ntfs_inode *ni)
93387 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_PARENT);
93390 +static inline void ni_unlock(struct ntfs_inode *ni)
93392 +       mutex_unlock(&ni->ni_lock);
93395 +static inline int ni_trylock(struct ntfs_inode *ni)
93397 +       return mutex_trylock(&ni->ni_lock);
93400 +static inline int attr_load_runs_attr(struct ntfs_inode *ni,
93401 +                                     struct ATTRIB *attr,
93402 +                                     struct runs_tree *run, CLST vcn)
93404 +       return attr_load_runs_vcn(ni, attr->type, attr_name(attr),
93405 +                                 attr->name_len, run, vcn);
93408 +static inline void le64_sub_cpu(__le64 *var, u64 val)
93410 +       *var = cpu_to_le64(le64_to_cpu(*var) - val);
93412 diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
93413 new file mode 100644
93414 index 000000000000..0d4a6251bddc
93415 --- /dev/null
93416 +++ b/fs/ntfs3/record.c
93417 @@ -0,0 +1,609 @@
93418 +// SPDX-License-Identifier: GPL-2.0
93420 + *
93421 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
93422 + *
93423 + */
93425 +#include <linux/blkdev.h>
93426 +#include <linux/buffer_head.h>
93427 +#include <linux/fs.h>
93428 +#include <linux/nls.h>
93430 +#include "debug.h"
93431 +#include "ntfs.h"
93432 +#include "ntfs_fs.h"
93434 +static inline int compare_attr(const struct ATTRIB *left, enum ATTR_TYPE type,
93435 +                              const __le16 *name, u8 name_len,
93436 +                              const u16 *upcase)
93438 +       /* First, compare the type codes: */
93439 +       int diff = le32_to_cpu(left->type) - le32_to_cpu(type);
93441 +       if (diff)
93442 +               return diff;
93444 +       /*
93445 +        * They have the same type code, so we have to compare the names.
93446 +        */
93447 +       return ntfs_cmp_names(attr_name(left), left->name_len, name, name_len,
93448 +                             upcase, true);
93452 + * mi_new_attt_id
93453 + *
93454 + * returns unused attribute id that is less than mrec->next_attr_id
93455 + */
93456 +static __le16 mi_new_attt_id(struct mft_inode *mi)
93458 +       u16 free_id, max_id, t16;
93459 +       struct MFT_REC *rec = mi->mrec;
93460 +       struct ATTRIB *attr;
93461 +       __le16 id;
93463 +       id = rec->next_attr_id;
93464 +       free_id = le16_to_cpu(id);
93465 +       if (free_id < 0x7FFF) {
93466 +               rec->next_attr_id = cpu_to_le16(free_id + 1);
93467 +               return id;
93468 +       }
93470 +       /* One record can store up to 1024/24 ~= 42 attributes */
93471 +       free_id = 0;
93472 +       max_id = 0;
93474 +       attr = NULL;
93476 +       for (;;) {
93477 +               attr = mi_enum_attr(mi, attr);
93478 +               if (!attr) {
93479 +                       rec->next_attr_id = cpu_to_le16(max_id + 1);
93480 +                       mi->dirty = true;
93481 +                       return cpu_to_le16(free_id);
93482 +               }
93484 +               t16 = le16_to_cpu(attr->id);
93485 +               if (t16 == free_id) {
93486 +                       free_id += 1;
93487 +                       attr = NULL;
93488 +               } else if (max_id < t16)
93489 +                       max_id = t16;
93490 +       }
93493 +int mi_get(struct ntfs_sb_info *sbi, CLST rno, struct mft_inode **mi)
93495 +       int err;
93496 +       struct mft_inode *m = ntfs_zalloc(sizeof(struct mft_inode));
93498 +       if (!m)
93499 +               return -ENOMEM;
93501 +       err = mi_init(m, sbi, rno);
93502 +       if (err) {
93503 +               ntfs_free(m);
93504 +               return err;
93505 +       }
93507 +       err = mi_read(m, false);
93508 +       if (err) {
93509 +               mi_put(m);
93510 +               return err;
93511 +       }
93513 +       *mi = m;
93514 +       return 0;
93517 +void mi_put(struct mft_inode *mi)
93519 +       mi_clear(mi);
93520 +       ntfs_free(mi);
93523 +int mi_init(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno)
93525 +       mi->sbi = sbi;
93526 +       mi->rno = rno;
93527 +       mi->mrec = ntfs_malloc(sbi->record_size);
93528 +       if (!mi->mrec)
93529 +               return -ENOMEM;
93531 +       return 0;
93535 + * mi_read
93536 + *
93537 + * reads MFT data
93538 + */
93539 +int mi_read(struct mft_inode *mi, bool is_mft)
93541 +       int err;
93542 +       struct MFT_REC *rec = mi->mrec;
93543 +       struct ntfs_sb_info *sbi = mi->sbi;
93544 +       u32 bpr = sbi->record_size;
93545 +       u64 vbo = (u64)mi->rno << sbi->record_bits;
93546 +       struct ntfs_inode *mft_ni = sbi->mft.ni;
93547 +       struct runs_tree *run = mft_ni ? &mft_ni->file.run : NULL;
93548 +       struct rw_semaphore *rw_lock = NULL;
93550 +       if (is_mounted(sbi)) {
93551 +               if (!is_mft) {
93552 +                       rw_lock = &mft_ni->file.run_lock;
93553 +                       down_read(rw_lock);
93554 +               }
93555 +       }
93557 +       err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb);
93558 +       if (rw_lock)
93559 +               up_read(rw_lock);
93560 +       if (!err)
93561 +               goto ok;
93563 +       if (err == -E_NTFS_FIXUP) {
93564 +               mi->dirty = true;
93565 +               goto ok;
93566 +       }
93568 +       if (err != -ENOENT)
93569 +               goto out;
93571 +       if (rw_lock) {
93572 +               ni_lock(mft_ni);
93573 +               down_write(rw_lock);
93574 +       }
93575 +       err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, &mft_ni->file.run,
93576 +                                vbo >> sbi->cluster_bits);
93577 +       if (rw_lock) {
93578 +               up_write(rw_lock);
93579 +               ni_unlock(mft_ni);
93580 +       }
93581 +       if (err)
93582 +               goto out;
93584 +       if (rw_lock)
93585 +               down_read(rw_lock);
93586 +       err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb);
93587 +       if (rw_lock)
93588 +               up_read(rw_lock);
93590 +       if (err == -E_NTFS_FIXUP) {
93591 +               mi->dirty = true;
93592 +               goto ok;
93593 +       }
93594 +       if (err)
93595 +               goto out;
93597 +ok:
93598 +       /* check field 'total' only here */
93599 +       if (le32_to_cpu(rec->total) != bpr) {
93600 +               err = -EINVAL;
93601 +               goto out;
93602 +       }
93604 +       return 0;
93606 +out:
93607 +       return err;
93610 +struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
93612 +       const struct MFT_REC *rec = mi->mrec;
93613 +       u32 used = le32_to_cpu(rec->used);
93614 +       u32 t32, off, asize;
93615 +       u16 t16;
93617 +       if (!attr) {
93618 +               u32 total = le32_to_cpu(rec->total);
93620 +               off = le16_to_cpu(rec->attr_off);
93622 +               if (used > total)
93623 +                       return NULL;
93625 +               if (off >= used || off < MFTRECORD_FIXUP_OFFSET_1 ||
93626 +                   !IsDwordAligned(off)) {
93627 +                       return NULL;
93628 +               }
93630 +               /* Skip non-resident records */
93631 +               if (!is_rec_inuse(rec))
93632 +                       return NULL;
93634 +               attr = Add2Ptr(rec, off);
93635 +       } else {
93636 +               /* Check if input attr inside record */
93637 +               off = PtrOffset(rec, attr);
93638 +               if (off >= used)
93639 +                       return NULL;
93641 +               asize = le32_to_cpu(attr->size);
93642 +               if (asize < SIZEOF_RESIDENT) {
93643 +                       /* Impossible 'cause we should not return such attribute */
93644 +                       return NULL;
93645 +               }
93647 +               attr = Add2Ptr(attr, asize);
93648 +               off += asize;
93649 +       }
93651 +       asize = le32_to_cpu(attr->size);
93653 +       /* Can we use the first field (attr->type) */
93654 +       if (off + 8 > used) {
93655 +               static_assert(QuadAlign(sizeof(enum ATTR_TYPE)) == 8);
93656 +               return NULL;
93657 +       }
93659 +       if (attr->type == ATTR_END) {
93660 +               /* end of enumeration */
93661 +               return NULL;
93662 +       }
93664 +       /* 0x100 is last known attribute for now*/
93665 +       t32 = le32_to_cpu(attr->type);
93666 +       if ((t32 & 0xf) || (t32 > 0x100))
93667 +               return NULL;
93669 +       /* Check boundary */
93670 +       if (off + asize > used)
93671 +               return NULL;
93673 +       /* Check size of attribute */
93674 +       if (!attr->non_res) {
93675 +               if (asize < SIZEOF_RESIDENT)
93676 +                       return NULL;
93678 +               t16 = le16_to_cpu(attr->res.data_off);
93680 +               if (t16 > asize)
93681 +                       return NULL;
93683 +               t32 = le32_to_cpu(attr->res.data_size);
93684 +               if (t16 + t32 > asize)
93685 +                       return NULL;
93687 +               return attr;
93688 +       }
93690 +       /* Check some nonresident fields */
93691 +       if (attr->name_len &&
93692 +           le16_to_cpu(attr->name_off) + sizeof(short) * attr->name_len >
93693 +                   le16_to_cpu(attr->nres.run_off)) {
93694 +               return NULL;
93695 +       }
93697 +       if (attr->nres.svcn || !is_attr_ext(attr)) {
93698 +               if (asize + 8 < SIZEOF_NONRESIDENT)
93699 +                       return NULL;
93701 +               if (attr->nres.c_unit)
93702 +                       return NULL;
93703 +       } else if (asize + 8 < SIZEOF_NONRESIDENT_EX)
93704 +               return NULL;
93706 +       return attr;
93710 + * mi_find_attr
93711 + *
93712 + * finds the attribute by type and name and id
93713 + */
93714 +struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
93715 +                           enum ATTR_TYPE type, const __le16 *name,
93716 +                           size_t name_len, const __le16 *id)
93718 +       u32 type_in = le32_to_cpu(type);
93719 +       u32 atype;
93721 +next_attr:
93722 +       attr = mi_enum_attr(mi, attr);
93723 +       if (!attr)
93724 +               return NULL;
93726 +       atype = le32_to_cpu(attr->type);
93727 +       if (atype > type_in)
93728 +               return NULL;
93730 +       if (atype < type_in)
93731 +               goto next_attr;
93733 +       if (attr->name_len != name_len)
93734 +               goto next_attr;
93736 +       if (name_len && memcmp(attr_name(attr), name, name_len * sizeof(short)))
93737 +               goto next_attr;
93739 +       if (id && *id != attr->id)
93740 +               goto next_attr;
93742 +       return attr;
93745 +int mi_write(struct mft_inode *mi, int wait)
93747 +       struct MFT_REC *rec;
93748 +       int err;
93749 +       struct ntfs_sb_info *sbi;
93751 +       if (!mi->dirty)
93752 +               return 0;
93754 +       sbi = mi->sbi;
93755 +       rec = mi->mrec;
93757 +       err = ntfs_write_bh(sbi, &rec->rhdr, &mi->nb, wait);
93758 +       if (err)
93759 +               return err;
93761 +       if (mi->rno < sbi->mft.recs_mirr)
93762 +               sbi->flags |= NTFS_FLAGS_MFTMIRR;
93764 +       mi->dirty = false;
93766 +       return 0;
93769 +int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
93770 +                 __le16 flags, bool is_mft)
93772 +       int err;
93773 +       u16 seq = 1;
93774 +       struct MFT_REC *rec;
93775 +       u64 vbo = (u64)rno << sbi->record_bits;
93777 +       err = mi_init(mi, sbi, rno);
93778 +       if (err)
93779 +               return err;
93781 +       rec = mi->mrec;
93783 +       if (rno == MFT_REC_MFT) {
93784 +               ;
93785 +       } else if (rno < MFT_REC_FREE) {
93786 +               seq = rno;
93787 +       } else if (rno >= sbi->mft.used) {
93788 +               ;
93789 +       } else if (mi_read(mi, is_mft)) {
93790 +               ;
93791 +       } else if (rec->rhdr.sign == NTFS_FILE_SIGNATURE) {
93792 +               /* Record is reused. Update its sequence number */
93793 +               seq = le16_to_cpu(rec->seq) + 1;
93794 +               if (!seq)
93795 +                       seq = 1;
93796 +       }
93798 +       memcpy(rec, sbi->new_rec, sbi->record_size);
93800 +       rec->seq = cpu_to_le16(seq);
93801 +       rec->flags = RECORD_FLAG_IN_USE | flags;
93803 +       mi->dirty = true;
93805 +       if (!mi->nb.nbufs) {
93806 +               struct ntfs_inode *ni = sbi->mft.ni;
93807 +               bool lock = false;
93809 +               if (is_mounted(sbi) && !is_mft) {
93810 +                       down_read(&ni->file.run_lock);
93811 +                       lock = true;
93812 +               }
93814 +               err = ntfs_get_bh(sbi, &ni->file.run, vbo, sbi->record_size,
93815 +                                 &mi->nb);
93816 +               if (lock)
93817 +                       up_read(&ni->file.run_lock);
93818 +       }
93820 +       return err;
93824 + * mi_mark_free
93825 + *
93826 + * marks record as unused and marks it as free in bitmap
93827 + */
93828 +void mi_mark_free(struct mft_inode *mi)
93830 +       CLST rno = mi->rno;
93831 +       struct ntfs_sb_info *sbi = mi->sbi;
93833 +       if (rno >= MFT_REC_RESERVED && rno < MFT_REC_FREE) {
93834 +               ntfs_clear_mft_tail(sbi, rno, rno + 1);
93835 +               mi->dirty = false;
93836 +               return;
93837 +       }
93839 +       if (mi->mrec) {
93840 +               clear_rec_inuse(mi->mrec);
93841 +               mi->dirty = true;
93842 +               mi_write(mi, 0);
93843 +       }
93844 +       ntfs_mark_rec_free(sbi, rno);
93848 + * mi_insert_attr
93849 + *
93850 + * reserves space for new attribute
93851 + * returns not full constructed attribute or NULL if not possible to create
93852 + */
93853 +struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
93854 +                             const __le16 *name, u8 name_len, u32 asize,
93855 +                             u16 name_off)
93857 +       size_t tail;
93858 +       struct ATTRIB *attr;
93859 +       __le16 id;
93860 +       struct MFT_REC *rec = mi->mrec;
93861 +       struct ntfs_sb_info *sbi = mi->sbi;
93862 +       u32 used = le32_to_cpu(rec->used);
93863 +       const u16 *upcase = sbi->upcase;
93864 +       int diff;
93866 +       /* Can we insert mi attribute? */
93867 +       if (used + asize > mi->sbi->record_size)
93868 +               return NULL;
93870 +       /*
93871 +        * Scan through the list of attributes to find the point
93872 +        * at which we should insert it.
93873 +        */
93874 +       attr = NULL;
93875 +       while ((attr = mi_enum_attr(mi, attr))) {
93876 +               diff = compare_attr(attr, type, name, name_len, upcase);
93877 +               if (diff > 0)
93878 +                       break;
93879 +               if (diff < 0)
93880 +                       continue;
93882 +               if (!is_attr_indexed(attr))
93883 +                       return NULL;
93884 +               break;
93885 +       }
93887 +       if (!attr) {
93888 +               tail = 8; /* not used, just to suppress warning */
93889 +               attr = Add2Ptr(rec, used - 8);
93890 +       } else {
93891 +               tail = used - PtrOffset(rec, attr);
93892 +       }
93894 +       id = mi_new_attt_id(mi);
93896 +       memmove(Add2Ptr(attr, asize), attr, tail);
93897 +       memset(attr, 0, asize);
93899 +       attr->type = type;
93900 +       attr->size = cpu_to_le32(asize);
93901 +       attr->name_len = name_len;
93902 +       attr->name_off = cpu_to_le16(name_off);
93903 +       attr->id = id;
93905 +       memmove(Add2Ptr(attr, name_off), name, name_len * sizeof(short));
93906 +       rec->used = cpu_to_le32(used + asize);
93908 +       mi->dirty = true;
93910 +       return attr;
93914 + * mi_remove_attr
93915 + *
93916 + * removes the attribute from record
93917 + * NOTE: The source attr will point to next attribute
93918 + */
93919 +bool mi_remove_attr(struct mft_inode *mi, struct ATTRIB *attr)
93921 +       struct MFT_REC *rec = mi->mrec;
93922 +       u32 aoff = PtrOffset(rec, attr);
93923 +       u32 used = le32_to_cpu(rec->used);
93924 +       u32 asize = le32_to_cpu(attr->size);
93926 +       if (aoff + asize > used)
93927 +               return false;
93929 +       used -= asize;
93930 +       memmove(attr, Add2Ptr(attr, asize), used - aoff);
93931 +       rec->used = cpu_to_le32(used);
93932 +       mi->dirty = true;
93934 +       return true;
93937 +/* bytes = "new attribute size" - "old attribute size" */
93938 +bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes)
93940 +       struct MFT_REC *rec = mi->mrec;
93941 +       u32 aoff = PtrOffset(rec, attr);
93942 +       u32 total, used = le32_to_cpu(rec->used);
93943 +       u32 nsize, asize = le32_to_cpu(attr->size);
93944 +       u32 rsize = le32_to_cpu(attr->res.data_size);
93945 +       int tail = (int)(used - aoff - asize);
93946 +       int dsize;
93947 +       char *next;
93949 +       if (tail < 0 || aoff >= used)
93950 +               return false;
93952 +       if (!bytes)
93953 +               return true;
93955 +       total = le32_to_cpu(rec->total);
93956 +       next = Add2Ptr(attr, asize);
93958 +       if (bytes > 0) {
93959 +               dsize = QuadAlign(bytes);
93960 +               if (used + dsize > total)
93961 +                       return false;
93962 +               nsize = asize + dsize;
93963 +               // move tail
93964 +               memmove(next + dsize, next, tail);
93965 +               memset(next, 0, dsize);
93966 +               used += dsize;
93967 +               rsize += dsize;
93968 +       } else {
93969 +               dsize = QuadAlign(-bytes);
93970 +               if (dsize > asize)
93971 +                       return false;
93972 +               nsize = asize - dsize;
93973 +               memmove(next - dsize, next, tail);
93974 +               used -= dsize;
93975 +               rsize -= dsize;
93976 +       }
93978 +       rec->used = cpu_to_le32(used);
93979 +       attr->size = cpu_to_le32(nsize);
93980 +       if (!attr->non_res)
93981 +               attr->res.data_size = cpu_to_le32(rsize);
93982 +       mi->dirty = true;
93984 +       return true;
93987 +int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr,
93988 +                struct runs_tree *run, CLST len)
93990 +       int err = 0;
93991 +       struct ntfs_sb_info *sbi = mi->sbi;
93992 +       u32 new_run_size;
93993 +       CLST plen;
93994 +       struct MFT_REC *rec = mi->mrec;
93995 +       CLST svcn = le64_to_cpu(attr->nres.svcn);
93996 +       u32 used = le32_to_cpu(rec->used);
93997 +       u32 aoff = PtrOffset(rec, attr);
93998 +       u32 asize = le32_to_cpu(attr->size);
93999 +       char *next = Add2Ptr(attr, asize);
94000 +       u16 run_off = le16_to_cpu(attr->nres.run_off);
94001 +       u32 run_size = asize - run_off;
94002 +       u32 tail = used - aoff - asize;
94003 +       u32 dsize = sbi->record_size - used;
94005 +       /* Make a maximum gap in current record */
94006 +       memmove(next + dsize, next, tail);
94008 +       /* Pack as much as possible */
94009 +       err = run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_size + dsize,
94010 +                      &plen);
94011 +       if (err < 0) {
94012 +               memmove(next, next + dsize, tail);
94013 +               return err;
94014 +       }
94016 +       new_run_size = QuadAlign(err);
94018 +       memmove(next + new_run_size - run_size, next + dsize, tail);
94020 +       attr->size = cpu_to_le32(asize + new_run_size - run_size);
94021 +       attr->nres.evcn = cpu_to_le64(svcn + plen - 1);
94022 +       rec->used = cpu_to_le32(used + new_run_size - run_size);
94023 +       mi->dirty = true;
94025 +       return 0;
94027 diff --git a/fs/ntfs3/run.c b/fs/ntfs3/run.c
94028 new file mode 100644
94029 index 000000000000..5cdf6efe67e0
94030 --- /dev/null
94031 +++ b/fs/ntfs3/run.c
94032 @@ -0,0 +1,1111 @@
94033 +// SPDX-License-Identifier: GPL-2.0
94035 + *
94036 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
94037 + *
94038 + * TODO: try to use extents tree (instead of array)
94039 + */
94041 +#include <linux/blkdev.h>
94042 +#include <linux/buffer_head.h>
94043 +#include <linux/fs.h>
94044 +#include <linux/nls.h>
94046 +#include "debug.h"
94047 +#include "ntfs.h"
94048 +#include "ntfs_fs.h"
94050 +/* runs_tree is a continues memory. Try to avoid big size  */
94051 +#define NTFS3_RUN_MAX_BYTES 0x10000
94053 +struct ntfs_run {
94054 +       CLST vcn; /* virtual cluster number */
94055 +       CLST len; /* length in clusters */
94056 +       CLST lcn; /* logical cluster number */
94060 + * run_lookup
94061 + *
94062 + * Lookup the index of a MCB entry that is first <= vcn.
94063 + * case of success it will return non-zero value and set
94064 + * 'index' parameter to index of entry been found.
94065 + * case of entry missing from list 'index' will be set to
94066 + * point to insertion position for the entry question.
94067 + */
94068 +bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index)
94070 +       size_t min_idx, max_idx, mid_idx;
94071 +       struct ntfs_run *r;
94073 +       if (!run->count) {
94074 +               *index = 0;
94075 +               return false;
94076 +       }
94078 +       min_idx = 0;
94079 +       max_idx = run->count - 1;
94081 +       /* Check boundary cases specially, 'cause they cover the often requests */
94082 +       r = run->runs;
94083 +       if (vcn < r->vcn) {
94084 +               *index = 0;
94085 +               return false;
94086 +       }
94088 +       if (vcn < r->vcn + r->len) {
94089 +               *index = 0;
94090 +               return true;
94091 +       }
94093 +       r += max_idx;
94094 +       if (vcn >= r->vcn + r->len) {
94095 +               *index = run->count;
94096 +               return false;
94097 +       }
94099 +       if (vcn >= r->vcn) {
94100 +               *index = max_idx;
94101 +               return true;
94102 +       }
94104 +       do {
94105 +               mid_idx = min_idx + ((max_idx - min_idx) >> 1);
94106 +               r = run->runs + mid_idx;
94108 +               if (vcn < r->vcn) {
94109 +                       max_idx = mid_idx - 1;
94110 +                       if (!mid_idx)
94111 +                               break;
94112 +               } else if (vcn >= r->vcn + r->len) {
94113 +                       min_idx = mid_idx + 1;
94114 +               } else {
94115 +                       *index = mid_idx;
94116 +                       return true;
94117 +               }
94118 +       } while (min_idx <= max_idx);
94120 +       *index = max_idx + 1;
94121 +       return false;
94125 + * run_consolidate
94126 + *
94127 + * consolidate runs starting from a given one.
94128 + */
94129 +static void run_consolidate(struct runs_tree *run, size_t index)
94131 +       size_t i;
94132 +       struct ntfs_run *r = run->runs + index;
94134 +       while (index + 1 < run->count) {
94135 +               /*
94136 +                * I should merge current run with next
94137 +                * if start of the next run lies inside one being tested.
94138 +                */
94139 +               struct ntfs_run *n = r + 1;
94140 +               CLST end = r->vcn + r->len;
94141 +               CLST dl;
94143 +               /* Stop if runs are not aligned one to another. */
94144 +               if (n->vcn > end)
94145 +                       break;
94147 +               dl = end - n->vcn;
94149 +               /*
94150 +                * If range at index overlaps with next one
94151 +                * then I will either adjust it's start position
94152 +                * or (if completely matches) dust remove one from the list.
94153 +                */
94154 +               if (dl > 0) {
94155 +                       if (n->len <= dl)
94156 +                               goto remove_next_range;
94158 +                       n->len -= dl;
94159 +                       n->vcn += dl;
94160 +                       if (n->lcn != SPARSE_LCN)
94161 +                               n->lcn += dl;
94162 +                       dl = 0;
94163 +               }
94165 +               /*
94166 +                * Stop if sparse mode does not match
94167 +                * both current and next runs.
94168 +                */
94169 +               if ((n->lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) {
94170 +                       index += 1;
94171 +                       r = n;
94172 +                       continue;
94173 +               }
94175 +               /*
94176 +                * Check if volume block
94177 +                * of a next run lcn does not match
94178 +                * last volume block of the current run.
94179 +                */
94180 +               if (n->lcn != SPARSE_LCN && n->lcn != r->lcn + r->len)
94181 +                       break;
94183 +               /*
94184 +                * Next and current are siblings.
94185 +                * Eat/join.
94186 +                */
94187 +               r->len += n->len - dl;
94189 +remove_next_range:
94190 +               i = run->count - (index + 1);
94191 +               if (i > 1)
94192 +                       memmove(n, n + 1, sizeof(*n) * (i - 1));
94194 +               run->count -= 1;
94195 +       }
94198 +/* returns true if range [svcn - evcn] is mapped*/
94199 +bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn)
94201 +       size_t i;
94202 +       const struct ntfs_run *r, *end;
94203 +       CLST next_vcn;
94205 +       if (!run_lookup(run, svcn, &i))
94206 +               return false;
94208 +       end = run->runs + run->count;
94209 +       r = run->runs + i;
94211 +       for (;;) {
94212 +               next_vcn = r->vcn + r->len;
94213 +               if (next_vcn > evcn)
94214 +                       return true;
94216 +               if (++r >= end)
94217 +                       return false;
94219 +               if (r->vcn != next_vcn)
94220 +                       return false;
94221 +       }
94224 +bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn,
94225 +                     CLST *len, size_t *index)
94227 +       size_t idx;
94228 +       CLST gap;
94229 +       struct ntfs_run *r;
94231 +       /* Fail immediately if nrun was not touched yet. */
94232 +       if (!run->runs)
94233 +               return false;
94235 +       if (!run_lookup(run, vcn, &idx))
94236 +               return false;
94238 +       r = run->runs + idx;
94240 +       if (vcn >= r->vcn + r->len)
94241 +               return false;
94243 +       gap = vcn - r->vcn;
94244 +       if (r->len <= gap)
94245 +               return false;
94247 +       *lcn = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + gap);
94249 +       if (len)
94250 +               *len = r->len - gap;
94251 +       if (index)
94252 +               *index = idx;
94254 +       return true;
94258 + * run_truncate_head
94259 + *
94260 + * decommit the range before vcn
94261 + */
94262 +void run_truncate_head(struct runs_tree *run, CLST vcn)
94264 +       size_t index;
94265 +       struct ntfs_run *r;
94267 +       if (run_lookup(run, vcn, &index)) {
94268 +               r = run->runs + index;
94270 +               if (vcn > r->vcn) {
94271 +                       CLST dlen = vcn - r->vcn;
94273 +                       r->vcn = vcn;
94274 +                       r->len -= dlen;
94275 +                       if (r->lcn != SPARSE_LCN)
94276 +                               r->lcn += dlen;
94277 +               }
94279 +               if (!index)
94280 +                       return;
94281 +       }
94282 +       r = run->runs;
94283 +       memmove(r, r + index, sizeof(*r) * (run->count - index));
94285 +       run->count -= index;
94287 +       if (!run->count) {
94288 +               ntfs_vfree(run->runs);
94289 +               run->runs = NULL;
94290 +               run->allocated = 0;
94291 +       }
94295 + * run_truncate
94296 + *
94297 + * decommit the range after vcn
94298 + */
94299 +void run_truncate(struct runs_tree *run, CLST vcn)
94301 +       size_t index;
94303 +       /*
94304 +        * If I hit the range then
94305 +        * I have to truncate one.
94306 +        * If range to be truncated is becoming empty
94307 +        * then it will entirely be removed.
94308 +        */
94309 +       if (run_lookup(run, vcn, &index)) {
94310 +               struct ntfs_run *r = run->runs + index;
94312 +               r->len = vcn - r->vcn;
94314 +               if (r->len > 0)
94315 +                       index += 1;
94316 +       }
94318 +       /*
94319 +        * At this point 'index' is set to
94320 +        * position that should be thrown away (including index itself)
94321 +        * Simple one - just set the limit.
94322 +        */
94323 +       run->count = index;
94325 +       /* Do not reallocate array 'runs'. Only free if possible */
94326 +       if (!index) {
94327 +               ntfs_vfree(run->runs);
94328 +               run->runs = NULL;
94329 +               run->allocated = 0;
94330 +       }
94333 +/* trim head and tail if necessary*/
94334 +void run_truncate_around(struct runs_tree *run, CLST vcn)
94336 +       run_truncate_head(run, vcn);
94338 +       if (run->count >= NTFS3_RUN_MAX_BYTES / sizeof(struct ntfs_run) / 2)
94339 +               run_truncate(run, (run->runs + (run->count >> 1))->vcn);
94343 + * run_add_entry
94344 + *
94345 + * sets location to known state.
94346 + * run to be added may overlap with existing location.
94347 + * returns false if of memory
94348 + */
94349 +bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
94350 +                  bool is_mft)
94352 +       size_t used, index;
94353 +       struct ntfs_run *r;
94354 +       bool inrange;
94355 +       CLST tail_vcn = 0, tail_len = 0, tail_lcn = 0;
94356 +       bool should_add_tail = false;
94358 +       /*
94359 +        * Lookup the insertion point.
94360 +        *
94361 +        * Execute bsearch for the entry containing
94362 +        * start position question.
94363 +        */
94364 +       inrange = run_lookup(run, vcn, &index);
94366 +       /*
94367 +        * Shortcut here would be case of
94368 +        * range not been found but one been added
94369 +        * continues previous run.
94370 +        * this case I can directly make use of
94371 +        * existing range as my start point.
94372 +        */
94373 +       if (!inrange && index > 0) {
94374 +               struct ntfs_run *t = run->runs + index - 1;
94376 +               if (t->vcn + t->len == vcn &&
94377 +                   (t->lcn == SPARSE_LCN) == (lcn == SPARSE_LCN) &&
94378 +                   (lcn == SPARSE_LCN || lcn == t->lcn + t->len)) {
94379 +                       inrange = true;
94380 +                       index -= 1;
94381 +               }
94382 +       }
94384 +       /*
94385 +        * At this point 'index' either points to the range
94386 +        * containing start position or to the insertion position
94387 +        * for a new range.
94388 +        * So first let's check if range I'm probing is here already.
94389 +        */
94390 +       if (!inrange) {
94391 +requires_new_range:
94392 +               /*
94393 +                * Range was not found.
94394 +                * Insert at position 'index'
94395 +                */
94396 +               used = run->count * sizeof(struct ntfs_run);
94398 +               /*
94399 +                * Check allocated space.
94400 +                * If one is not enough to get one more entry
94401 +                * then it will be reallocated
94402 +                */
94403 +               if (run->allocated < used + sizeof(struct ntfs_run)) {
94404 +                       size_t bytes;
94405 +                       struct ntfs_run *new_ptr;
94407 +                       /* Use power of 2 for 'bytes'*/
94408 +                       if (!used) {
94409 +                               bytes = 64;
94410 +                       } else if (used <= 16 * PAGE_SIZE) {
94411 +                               if (is_power_of2(run->allocated))
94412 +                                       bytes = run->allocated << 1;
94413 +                               else
94414 +                                       bytes = (size_t)1
94415 +                                               << (2 + blksize_bits(used));
94416 +                       } else {
94417 +                               bytes = run->allocated + (16 * PAGE_SIZE);
94418 +                       }
94420 +                       WARN_ON(!is_mft && bytes > NTFS3_RUN_MAX_BYTES);
94422 +                       new_ptr = ntfs_vmalloc(bytes);
94424 +                       if (!new_ptr)
94425 +                               return false;
94427 +                       r = new_ptr + index;
94428 +                       memcpy(new_ptr, run->runs,
94429 +                              index * sizeof(struct ntfs_run));
94430 +                       memcpy(r + 1, run->runs + index,
94431 +                              sizeof(struct ntfs_run) * (run->count - index));
94433 +                       ntfs_vfree(run->runs);
94434 +                       run->runs = new_ptr;
94435 +                       run->allocated = bytes;
94437 +               } else {
94438 +                       size_t i = run->count - index;
94440 +                       r = run->runs + index;
94442 +                       /* memmove appears to be a bottle neck here... */
94443 +                       if (i > 0)
94444 +                               memmove(r + 1, r, sizeof(struct ntfs_run) * i);
94445 +               }
94447 +               r->vcn = vcn;
94448 +               r->lcn = lcn;
94449 +               r->len = len;
94450 +               run->count += 1;
94451 +       } else {
94452 +               r = run->runs + index;
94454 +               /*
94455 +                * If one of ranges was not allocated
94456 +                * then I have to split location I just matched.
94457 +                * and insert current one
94458 +                * a common case this requires tail to be reinserted
94459 +                * a recursive call.
94460 +                */
94461 +               if (((lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) ||
94462 +                   (lcn != SPARSE_LCN && lcn != r->lcn + (vcn - r->vcn))) {
94463 +                       CLST to_eat = vcn - r->vcn;
94464 +                       CLST Tovcn = to_eat + len;
94466 +                       should_add_tail = Tovcn < r->len;
94468 +                       if (should_add_tail) {
94469 +                               tail_lcn = r->lcn == SPARSE_LCN
94470 +                                                  ? SPARSE_LCN
94471 +                                                  : (r->lcn + Tovcn);
94472 +                               tail_vcn = r->vcn + Tovcn;
94473 +                               tail_len = r->len - Tovcn;
94474 +                       }
94476 +                       if (to_eat > 0) {
94477 +                               r->len = to_eat;
94478 +                               inrange = false;
94479 +                               index += 1;
94480 +                               goto requires_new_range;
94481 +                       }
94483 +                       /* lcn should match one I'm going to add. */
94484 +                       r->lcn = lcn;
94485 +               }
94487 +               /*
94488 +                * If existing range fits then I'm done.
94489 +                * Otherwise extend found one and fall back to range jocode.
94490 +                */
94491 +               if (r->vcn + r->len < vcn + len)
94492 +                       r->len += len - ((r->vcn + r->len) - vcn);
94493 +       }
94495 +       /*
94496 +        * And normalize it starting from insertion point.
94497 +        * It's possible that no insertion needed case if
94498 +        * start point lies within the range of an entry
94499 +        * that 'index' points to.
94500 +        */
94501 +       if (inrange && index > 0)
94502 +               index -= 1;
94503 +       run_consolidate(run, index);
94504 +       run_consolidate(run, index + 1);
94506 +       /*
94507 +        * a special case
94508 +        * I have to add extra range a tail.
94509 +        */
94510 +       if (should_add_tail &&
94511 +           !run_add_entry(run, tail_vcn, tail_lcn, tail_len, is_mft))
94512 +               return false;
94514 +       return true;
94517 +/*helper for attr_collapse_range, which is helper for fallocate(collapse_range)*/
94518 +bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
94520 +       size_t index, eat;
94521 +       struct ntfs_run *r, *e, *eat_start, *eat_end;
94522 +       CLST end;
94524 +       if (WARN_ON(!run_lookup(run, vcn, &index)))
94525 +               return true; /* should never be here */
94527 +       e = run->runs + run->count;
94528 +       r = run->runs + index;
94529 +       end = vcn + len;
94531 +       if (vcn > r->vcn) {
94532 +               if (r->vcn + r->len <= end) {
94533 +                       /* collapse tail of run */
94534 +                       r->len = vcn - r->vcn;
94535 +               } else if (r->lcn == SPARSE_LCN) {
94536 +                       /* collapse a middle part of sparsed run */
94537 +                       r->len -= len;
94538 +               } else {
94539 +                       /* collapse a middle part of normal run, split */
94540 +                       if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
94541 +                               return false;
94542 +                       return run_collapse_range(run, vcn, len);
94543 +               }
94545 +               r += 1;
94546 +       }
94548 +       eat_start = r;
94549 +       eat_end = r;
94551 +       for (; r < e; r++) {
94552 +               CLST d;
94554 +               if (r->vcn >= end) {
94555 +                       r->vcn -= len;
94556 +                       continue;
94557 +               }
94559 +               if (r->vcn + r->len <= end) {
94560 +                       /* eat this run */
94561 +                       eat_end = r + 1;
94562 +                       continue;
94563 +               }
94565 +               d = end - r->vcn;
94566 +               if (r->lcn != SPARSE_LCN)
94567 +                       r->lcn += d;
94568 +               r->len -= d;
94569 +               r->vcn -= len - d;
94570 +       }
94572 +       eat = eat_end - eat_start;
94573 +       memmove(eat_start, eat_end, (e - eat_end) * sizeof(*r));
94574 +       run->count -= eat;
94576 +       return true;
94580 + * run_get_entry
94581 + *
94582 + * returns index-th mapped region
94583 + */
94584 +bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
94585 +                  CLST *lcn, CLST *len)
94587 +       const struct ntfs_run *r;
94589 +       if (index >= run->count)
94590 +               return false;
94592 +       r = run->runs + index;
94594 +       if (!r->len)
94595 +               return false;
94597 +       if (vcn)
94598 +               *vcn = r->vcn;
94599 +       if (lcn)
94600 +               *lcn = r->lcn;
94601 +       if (len)
94602 +               *len = r->len;
94603 +       return true;
94607 + * run_packed_size
94608 + *
94609 + * calculates the size of packed int64
94610 + */
94611 +#ifdef __BIG_ENDIAN
94612 +static inline int run_packed_size(const s64 n)
94614 +       const u8 *p = (const u8 *)&n + sizeof(n) - 1;
94616 +       if (n >= 0) {
94617 +               if (p[-7] || p[-6] || p[-5] || p[-4])
94618 +                       p -= 4;
94619 +               if (p[-3] || p[-2])
94620 +                       p -= 2;
94621 +               if (p[-1])
94622 +                       p -= 1;
94623 +               if (p[0] & 0x80)
94624 +                       p -= 1;
94625 +       } else {
94626 +               if (p[-7] != 0xff || p[-6] != 0xff || p[-5] != 0xff ||
94627 +                   p[-4] != 0xff)
94628 +                       p -= 4;
94629 +               if (p[-3] != 0xff || p[-2] != 0xff)
94630 +                       p -= 2;
94631 +               if (p[-1] != 0xff)
94632 +                       p -= 1;
94633 +               if (!(p[0] & 0x80))
94634 +                       p -= 1;
94635 +       }
94636 +       return (const u8 *)&n + sizeof(n) - p;
94639 +/* full trusted function. It does not check 'size' for errors */
94640 +static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
94642 +       const u8 *p = (u8 *)&v;
94644 +       switch (size) {
94645 +       case 8:
94646 +               run_buf[7] = p[0];
94647 +               fallthrough;
94648 +       case 7:
94649 +               run_buf[6] = p[1];
94650 +               fallthrough;
94651 +       case 6:
94652 +               run_buf[5] = p[2];
94653 +               fallthrough;
94654 +       case 5:
94655 +               run_buf[4] = p[3];
94656 +               fallthrough;
94657 +       case 4:
94658 +               run_buf[3] = p[4];
94659 +               fallthrough;
94660 +       case 3:
94661 +               run_buf[2] = p[5];
94662 +               fallthrough;
94663 +       case 2:
94664 +               run_buf[1] = p[6];
94665 +               fallthrough;
94666 +       case 1:
94667 +               run_buf[0] = p[7];
94668 +       }
94671 +/* full trusted function. It does not check 'size' for errors */
94672 +static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
94674 +       u8 *p = (u8 *)&v;
94676 +       switch (size) {
94677 +       case 8:
94678 +               p[0] = run_buf[7];
94679 +               fallthrough;
94680 +       case 7:
94681 +               p[1] = run_buf[6];
94682 +               fallthrough;
94683 +       case 6:
94684 +               p[2] = run_buf[5];
94685 +               fallthrough;
94686 +       case 5:
94687 +               p[3] = run_buf[4];
94688 +               fallthrough;
94689 +       case 4:
94690 +               p[4] = run_buf[3];
94691 +               fallthrough;
94692 +       case 3:
94693 +               p[5] = run_buf[2];
94694 +               fallthrough;
94695 +       case 2:
94696 +               p[6] = run_buf[1];
94697 +               fallthrough;
94698 +       case 1:
94699 +               p[7] = run_buf[0];
94700 +       }
94701 +       return v;
94704 +#else
94706 +static inline int run_packed_size(const s64 n)
94708 +       const u8 *p = (const u8 *)&n;
94710 +       if (n >= 0) {
94711 +               if (p[7] || p[6] || p[5] || p[4])
94712 +                       p += 4;
94713 +               if (p[3] || p[2])
94714 +                       p += 2;
94715 +               if (p[1])
94716 +                       p += 1;
94717 +               if (p[0] & 0x80)
94718 +                       p += 1;
94719 +       } else {
94720 +               if (p[7] != 0xff || p[6] != 0xff || p[5] != 0xff ||
94721 +                   p[4] != 0xff)
94722 +                       p += 4;
94723 +               if (p[3] != 0xff || p[2] != 0xff)
94724 +                       p += 2;
94725 +               if (p[1] != 0xff)
94726 +                       p += 1;
94727 +               if (!(p[0] & 0x80))
94728 +                       p += 1;
94729 +       }
94731 +       return 1 + p - (const u8 *)&n;
94734 +/* full trusted function. It does not check 'size' for errors */
94735 +static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
94737 +       const u8 *p = (u8 *)&v;
94739 +       /* memcpy( run_buf, &v, size); is it faster? */
94740 +       switch (size) {
94741 +       case 8:
94742 +               run_buf[7] = p[7];
94743 +               fallthrough;
94744 +       case 7:
94745 +               run_buf[6] = p[6];
94746 +               fallthrough;
94747 +       case 6:
94748 +               run_buf[5] = p[5];
94749 +               fallthrough;
94750 +       case 5:
94751 +               run_buf[4] = p[4];
94752 +               fallthrough;
94753 +       case 4:
94754 +               run_buf[3] = p[3];
94755 +               fallthrough;
94756 +       case 3:
94757 +               run_buf[2] = p[2];
94758 +               fallthrough;
94759 +       case 2:
94760 +               run_buf[1] = p[1];
94761 +               fallthrough;
94762 +       case 1:
94763 +               run_buf[0] = p[0];
94764 +       }
94767 +/* full trusted function. It does not check 'size' for errors */
94768 +static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
94770 +       u8 *p = (u8 *)&v;
94772 +       /* memcpy( &v, run_buf, size); is it faster? */
94773 +       switch (size) {
94774 +       case 8:
94775 +               p[7] = run_buf[7];
94776 +               fallthrough;
94777 +       case 7:
94778 +               p[6] = run_buf[6];
94779 +               fallthrough;
94780 +       case 6:
94781 +               p[5] = run_buf[5];
94782 +               fallthrough;
94783 +       case 5:
94784 +               p[4] = run_buf[4];
94785 +               fallthrough;
94786 +       case 4:
94787 +               p[3] = run_buf[3];
94788 +               fallthrough;
94789 +       case 3:
94790 +               p[2] = run_buf[2];
94791 +               fallthrough;
94792 +       case 2:
94793 +               p[1] = run_buf[1];
94794 +               fallthrough;
94795 +       case 1:
94796 +               p[0] = run_buf[0];
94797 +       }
94798 +       return v;
94800 +#endif
94803 + * run_pack
94804 + *
94805 + * packs runs into buffer
94806 + * packed_vcns - how much runs we have packed
94807 + * packed_size - how much bytes we have used run_buf
94808 + */
94809 +int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
94810 +            u32 run_buf_size, CLST *packed_vcns)
94812 +       CLST next_vcn, vcn, lcn;
94813 +       CLST prev_lcn = 0;
94814 +       CLST evcn1 = svcn + len;
94815 +       int packed_size = 0;
94816 +       size_t i;
94817 +       bool ok;
94818 +       s64 dlcn;
94819 +       int offset_size, size_size, tmp;
94821 +       next_vcn = vcn = svcn;
94823 +       *packed_vcns = 0;
94825 +       if (!len)
94826 +               goto out;
94828 +       ok = run_lookup_entry(run, vcn, &lcn, &len, &i);
94830 +       if (!ok)
94831 +               goto error;
94833 +       if (next_vcn != vcn)
94834 +               goto error;
94836 +       for (;;) {
94837 +               next_vcn = vcn + len;
94838 +               if (next_vcn > evcn1)
94839 +                       len = evcn1 - vcn;
94841 +               /* how much bytes required to pack len */
94842 +               size_size = run_packed_size(len);
94844 +               /* offset_size - how much bytes is packed dlcn */
94845 +               if (lcn == SPARSE_LCN) {
94846 +                       offset_size = 0;
94847 +                       dlcn = 0;
94848 +               } else {
94849 +                       /* NOTE: lcn can be less than prev_lcn! */
94850 +                       dlcn = (s64)lcn - prev_lcn;
94851 +                       offset_size = run_packed_size(dlcn);
94852 +                       prev_lcn = lcn;
94853 +               }
94855 +               tmp = run_buf_size - packed_size - 2 - offset_size;
94856 +               if (tmp <= 0)
94857 +                       goto out;
94859 +               /* can we store this entire run */
94860 +               if (tmp < size_size)
94861 +                       goto out;
94863 +               if (run_buf) {
94864 +                       /* pack run header */
94865 +                       run_buf[0] = ((u8)(size_size | (offset_size << 4)));
94866 +                       run_buf += 1;
94868 +                       /* Pack the length of run */
94869 +                       run_pack_s64(run_buf, size_size, len);
94871 +                       run_buf += size_size;
94872 +                       /* Pack the offset from previous lcn */
94873 +                       run_pack_s64(run_buf, offset_size, dlcn);
94874 +                       run_buf += offset_size;
94875 +               }
94877 +               packed_size += 1 + offset_size + size_size;
94878 +               *packed_vcns += len;
94880 +               if (packed_size + 1 >= run_buf_size || next_vcn >= evcn1)
94881 +                       goto out;
94883 +               ok = run_get_entry(run, ++i, &vcn, &lcn, &len);
94884 +               if (!ok)
94885 +                       goto error;
94887 +               if (next_vcn != vcn)
94888 +                       goto error;
94889 +       }
94891 +out:
94892 +       /* Store last zero */
94893 +       if (run_buf)
94894 +               run_buf[0] = 0;
94896 +       return packed_size + 1;
94898 +error:
94899 +       return -EOPNOTSUPP;
94903 + * run_unpack
94904 + *
94905 + * unpacks packed runs from "run_buf"
94906 + * returns error, if negative, or real used bytes
94907 + */
94908 +int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
94909 +              CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
94910 +              u32 run_buf_size)
94912 +       u64 prev_lcn, vcn64, lcn, next_vcn;
94913 +       const u8 *run_last, *run_0;
94914 +       bool is_mft = ino == MFT_REC_MFT;
94916 +       /* Check for empty */
94917 +       if (evcn + 1 == svcn)
94918 +               return 0;
94920 +       if (evcn < svcn)
94921 +               return -EINVAL;
94923 +       run_0 = run_buf;
94924 +       run_last = run_buf + run_buf_size;
94925 +       prev_lcn = 0;
94926 +       vcn64 = svcn;
94928 +       /* Read all runs the chain */
94929 +       /* size_size - how much bytes is packed len */
94930 +       while (run_buf < run_last) {
94931 +               /* size_size - how much bytes is packed len */
94932 +               u8 size_size = *run_buf & 0xF;
94933 +               /* offset_size - how much bytes is packed dlcn */
94934 +               u8 offset_size = *run_buf++ >> 4;
94935 +               u64 len;
94937 +               if (!size_size)
94938 +                       break;
94940 +               /*
94941 +                * Unpack runs.
94942 +                * NOTE: runs are stored little endian order
94943 +                * "len" is unsigned value, "dlcn" is signed
94944 +                * Large positive number requires to store 5 bytes
94945 +                * e.g.: 05 FF 7E FF FF 00 00 00
94946 +                */
94947 +               if (size_size > 8)
94948 +                       return -EINVAL;
94950 +               len = run_unpack_s64(run_buf, size_size, 0);
94951 +               /* skip size_size */
94952 +               run_buf += size_size;
94954 +               if (!len)
94955 +                       return -EINVAL;
94957 +               if (!offset_size)
94958 +                       lcn = SPARSE_LCN64;
94959 +               else if (offset_size <= 8) {
94960 +                       s64 dlcn;
94962 +                       /* initial value of dlcn is -1 or 0 */
94963 +                       dlcn = (run_buf[offset_size - 1] & 0x80) ? (s64)-1 : 0;
94964 +                       dlcn = run_unpack_s64(run_buf, offset_size, dlcn);
94965 +                       /* skip offset_size */
94966 +                       run_buf += offset_size;
94968 +                       if (!dlcn)
94969 +                               return -EINVAL;
94970 +                       lcn = prev_lcn + dlcn;
94971 +                       prev_lcn = lcn;
94972 +               } else
94973 +                       return -EINVAL;
94975 +               next_vcn = vcn64 + len;
94976 +               /* check boundary */
94977 +               if (next_vcn > evcn + 1)
94978 +                       return -EINVAL;
94980 +#ifndef CONFIG_NTFS3_64BIT_CLUSTER
94981 +               if (next_vcn > 0x100000000ull || (lcn + len) > 0x100000000ull) {
94982 +                       ntfs_err(
94983 +                               sbi->sb,
94984 +                               "This driver is compiled whitout CONFIG_NTFS3_64BIT_CLUSTER (like windows driver).\n"
94985 +                               "Volume contains 64 bits run: vcn %llx, lcn %llx, len %llx.\n"
94986 +                               "Activate CONFIG_NTFS3_64BIT_CLUSTER to process this case",
94987 +                               vcn64, lcn, len);
94988 +                       return -EOPNOTSUPP;
94989 +               }
94990 +#endif
94991 +               if (lcn != SPARSE_LCN64 && lcn + len > sbi->used.bitmap.nbits) {
94992 +                       /* lcn range is out of volume */
94993 +                       return -EINVAL;
94994 +               }
94996 +               if (!run)
94997 +                       ; /* called from check_attr(fslog.c) to check run */
94998 +               else if (run == RUN_DEALLOCATE) {
94999 +                       /* called from ni_delete_all to free clusters without storing in run */
95000 +                       if (lcn != SPARSE_LCN64)
95001 +                               mark_as_free_ex(sbi, lcn, len, true);
95002 +               } else if (vcn64 >= vcn) {
95003 +                       if (!run_add_entry(run, vcn64, lcn, len, is_mft))
95004 +                               return -ENOMEM;
95005 +               } else if (next_vcn > vcn) {
95006 +                       u64 dlen = vcn - vcn64;
95008 +                       if (!run_add_entry(run, vcn, lcn + dlen, len - dlen,
95009 +                                          is_mft))
95010 +                               return -ENOMEM;
95011 +               }
95013 +               vcn64 = next_vcn;
95014 +       }
95016 +       if (vcn64 != evcn + 1) {
95017 +               /* not expected length of unpacked runs */
95018 +               return -EINVAL;
95019 +       }
95021 +       return run_buf - run_0;
95024 +#ifdef NTFS3_CHECK_FREE_CLST
95026 + * run_unpack_ex
95027 + *
95028 + * unpacks packed runs from "run_buf"
95029 + * checks unpacked runs to be used in bitmap
95030 + * returns error, if negative, or real used bytes
95031 + */
95032 +int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
95033 +                 CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
95034 +                 u32 run_buf_size)
95036 +       int ret, err;
95037 +       CLST next_vcn, lcn, len;
95038 +       size_t index;
95039 +       bool ok;
95040 +       struct wnd_bitmap *wnd;
95042 +       ret = run_unpack(run, sbi, ino, svcn, evcn, vcn, run_buf, run_buf_size);
95043 +       if (ret <= 0)
95044 +               return ret;
95046 +       if (!sbi->used.bitmap.sb || !run || run == RUN_DEALLOCATE)
95047 +               return ret;
95049 +       if (ino == MFT_REC_BADCLUST)
95050 +               return ret;
95052 +       next_vcn = vcn = svcn;
95053 +       wnd = &sbi->used.bitmap;
95055 +       for (ok = run_lookup_entry(run, vcn, &lcn, &len, &index);
95056 +            next_vcn <= evcn;
95057 +            ok = run_get_entry(run, ++index, &vcn, &lcn, &len)) {
95058 +               if (!ok || next_vcn != vcn)
95059 +                       return -EINVAL;
95061 +               next_vcn = vcn + len;
95063 +               if (lcn == SPARSE_LCN)
95064 +                       continue;
95066 +               if (sbi->flags & NTFS_FLAGS_NEED_REPLAY)
95067 +                       continue;
95069 +               down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
95070 +               /* Check for free blocks */
95071 +               ok = wnd_is_used(wnd, lcn, len);
95072 +               up_read(&wnd->rw_lock);
95073 +               if (ok)
95074 +                       continue;
95076 +               /* Looks like volume is corrupted */
95077 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
95079 +               if (down_write_trylock(&wnd->rw_lock)) {
95080 +                       /* mark all zero bits as used in range [lcn, lcn+len) */
95081 +                       CLST i, lcn_f = 0, len_f = 0;
95083 +                       err = 0;
95084 +                       for (i = 0; i < len; i++) {
95085 +                               if (wnd_is_free(wnd, lcn + i, 1)) {
95086 +                                       if (!len_f)
95087 +                                               lcn_f = lcn + i;
95088 +                                       len_f += 1;
95089 +                               } else if (len_f) {
95090 +                                       err = wnd_set_used(wnd, lcn_f, len_f);
95091 +                                       len_f = 0;
95092 +                                       if (err)
95093 +                                               break;
95094 +                               }
95095 +                       }
95097 +                       if (len_f)
95098 +                               err = wnd_set_used(wnd, lcn_f, len_f);
95100 +                       up_write(&wnd->rw_lock);
95101 +                       if (err)
95102 +                               return err;
95103 +               }
95104 +       }
95106 +       return ret;
95108 +#endif
95111 + * run_get_highest_vcn
95112 + *
95113 + * returns the highest vcn from a mapping pairs array
95114 + * it used while replaying log file
95115 + */
95116 +int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn)
95118 +       u64 vcn64 = vcn;
95119 +       u8 size_size;
95121 +       while ((size_size = *run_buf & 0xF)) {
95122 +               u8 offset_size = *run_buf++ >> 4;
95123 +               u64 len;
95125 +               if (size_size > 8 || offset_size > 8)
95126 +                       return -EINVAL;
95128 +               len = run_unpack_s64(run_buf, size_size, 0);
95129 +               if (!len)
95130 +                       return -EINVAL;
95132 +               run_buf += size_size + offset_size;
95133 +               vcn64 += len;
95135 +#ifndef CONFIG_NTFS3_64BIT_CLUSTER
95136 +               if (vcn64 > 0x100000000ull)
95137 +                       return -EINVAL;
95138 +#endif
95139 +       }
95141 +       *highest_vcn = vcn64 - 1;
95142 +       return 0;
95144 diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
95145 new file mode 100644
95146 index 000000000000..c563431248bf
95147 --- /dev/null
95148 +++ b/fs/ntfs3/super.c
95149 @@ -0,0 +1,1500 @@
95150 +// SPDX-License-Identifier: GPL-2.0
95152 + *
95153 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
95154 + *
95155 + *
95156 + *                 terminology
95157 + *
95158 + * cluster - allocation unit     - 512,1K,2K,4K,...,2M
95159 + * vcn - virtual cluster number  - offset inside the file in clusters
95160 + * vbo - virtual byte offset     - offset inside the file in bytes
95161 + * lcn - logical cluster number  - 0 based cluster in clusters heap
95162 + * lbo - logical byte offset     - absolute position inside volume
95163 + * run - maps vcn to lcn         - stored in attributes in packed form
95164 + * attr - attribute segment      - std/name/data etc records inside MFT
95165 + * mi  - mft inode               - one MFT record(usually 1024 bytes or 4K), consists of attributes
95166 + * ni  - ntfs inode              - extends linux inode. consists of one or more mft inodes
95167 + * index - unit inside directory - 2K, 4K, <=page size, does not depend on cluster size
95168 + *
95169 + * TODO: Implement
95170 + * https://docs.microsoft.com/en-us/windows/wsl/file-permissions
95171 + */
95173 +#include <linux/backing-dev.h>
95174 +#include <linux/blkdev.h>
95175 +#include <linux/buffer_head.h>
95176 +#include <linux/exportfs.h>
95177 +#include <linux/fs.h>
95178 +#include <linux/iversion.h>
95179 +#include <linux/module.h>
95180 +#include <linux/nls.h>
95181 +#include <linux/parser.h>
95182 +#include <linux/seq_file.h>
95183 +#include <linux/statfs.h>
95185 +#include "debug.h"
95186 +#include "ntfs.h"
95187 +#include "ntfs_fs.h"
95188 +#ifdef CONFIG_NTFS3_LZX_XPRESS
95189 +#include "lib/lib.h"
95190 +#endif
95192 +#ifdef CONFIG_PRINTK
95194 + * Trace warnings/notices/errors
95195 + * Thanks Joe Perches <joe@perches.com> for implementation
95196 + */
95197 +void ntfs_printk(const struct super_block *sb, const char *fmt, ...)
95199 +       struct va_format vaf;
95200 +       va_list args;
95201 +       int level;
95202 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
95204 +       /*should we use different ratelimits for warnings/notices/errors? */
95205 +       if (!___ratelimit(&sbi->msg_ratelimit, "ntfs3"))
95206 +               return;
95208 +       va_start(args, fmt);
95210 +       level = printk_get_level(fmt);
95211 +       vaf.fmt = printk_skip_level(fmt);
95212 +       vaf.va = &args;
95213 +       printk("%c%cntfs3: %s: %pV\n", KERN_SOH_ASCII, level, sb->s_id, &vaf);
95215 +       va_end(args);
95218 +static char s_name_buf[512];
95219 +static atomic_t s_name_buf_cnt = ATOMIC_INIT(1); // 1 means 'free s_name_buf'
95221 +/* print warnings/notices/errors about inode using name or inode number */
95222 +void ntfs_inode_printk(struct inode *inode, const char *fmt, ...)
95224 +       struct super_block *sb = inode->i_sb;
95225 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
95226 +       char *name;
95227 +       va_list args;
95228 +       struct va_format vaf;
95229 +       int level;
95231 +       if (!___ratelimit(&sbi->msg_ratelimit, "ntfs3"))
95232 +               return;
95234 +       /* use static allocated buffer, if possible */
95235 +       name = atomic_dec_and_test(&s_name_buf_cnt)
95236 +                      ? s_name_buf
95237 +                      : kmalloc(sizeof(s_name_buf), GFP_NOFS);
95239 +       if (name) {
95240 +               struct dentry *de = d_find_alias(inode);
95241 +               const u32 name_len = ARRAY_SIZE(s_name_buf) - 1;
95243 +               if (de) {
95244 +                       spin_lock(&de->d_lock);
95245 +                       snprintf(name, name_len, " \"%s\"", de->d_name.name);
95246 +                       spin_unlock(&de->d_lock);
95247 +                       name[name_len] = 0; /* to be sure*/
95248 +               } else {
95249 +                       name[0] = 0;
95250 +               }
95251 +               dput(de); /* cocci warns if placed in branch "if (de)" */
95252 +       }
95254 +       va_start(args, fmt);
95256 +       level = printk_get_level(fmt);
95257 +       vaf.fmt = printk_skip_level(fmt);
95258 +       vaf.va = &args;
95260 +       printk("%c%cntfs3: %s: ino=%lx,%s %pV\n", KERN_SOH_ASCII, level,
95261 +              sb->s_id, inode->i_ino, name ? name : "", &vaf);
95263 +       va_end(args);
95265 +       atomic_inc(&s_name_buf_cnt);
95266 +       if (name != s_name_buf)
95267 +               kfree(name);
95269 +#endif
95272 + * Shared memory struct.
95273 + *
95274 + * on-disk ntfs's upcase table is created by ntfs formater
95275 + * 'upcase' table is 128K bytes of memory
95276 + * we should read it into memory when mounting
95277 + * Several ntfs volumes likely use the same 'upcase' table
95278 + * It is good idea to share in-memory 'upcase' table between different volumes
95279 + * Unfortunately winxp/vista/win7 use different upcase tables
95280 + */
95281 +static DEFINE_SPINLOCK(s_shared_lock);
95283 +static struct {
95284 +       void *ptr;
95285 +       u32 len;
95286 +       int cnt;
95287 +} s_shared[8];
95290 + * ntfs_set_shared
95291 + *
95292 + * Returns 'ptr' if pointer was saved in shared memory
95293 + * Returns NULL if pointer was not shared
95294 + */
95295 +void *ntfs_set_shared(void *ptr, u32 bytes)
95297 +       void *ret = NULL;
95298 +       int i, j = -1;
95300 +       spin_lock(&s_shared_lock);
95301 +       for (i = 0; i < ARRAY_SIZE(s_shared); i++) {
95302 +               if (!s_shared[i].cnt) {
95303 +                       j = i;
95304 +               } else if (bytes == s_shared[i].len &&
95305 +                          !memcmp(s_shared[i].ptr, ptr, bytes)) {
95306 +                       s_shared[i].cnt += 1;
95307 +                       ret = s_shared[i].ptr;
95308 +                       break;
95309 +               }
95310 +       }
95312 +       if (!ret && j != -1) {
95313 +               s_shared[j].ptr = ptr;
95314 +               s_shared[j].len = bytes;
95315 +               s_shared[j].cnt = 1;
95316 +               ret = ptr;
95317 +       }
95318 +       spin_unlock(&s_shared_lock);
95320 +       return ret;
95324 + * ntfs_put_shared
95325 + *
95326 + * Returns 'ptr' if pointer is not shared anymore
95327 + * Returns NULL if pointer is still shared
95328 + */
95329 +void *ntfs_put_shared(void *ptr)
95331 +       void *ret = ptr;
95332 +       int i;
95334 +       spin_lock(&s_shared_lock);
95335 +       for (i = 0; i < ARRAY_SIZE(s_shared); i++) {
95336 +               if (s_shared[i].cnt && s_shared[i].ptr == ptr) {
95337 +                       if (--s_shared[i].cnt)
95338 +                               ret = NULL;
95339 +                       break;
95340 +               }
95341 +       }
95342 +       spin_unlock(&s_shared_lock);
95344 +       return ret;
95347 +static inline void clear_mount_options(struct ntfs_mount_options *options)
95349 +       unload_nls(options->nls);
95352 +enum Opt {
95353 +       Opt_uid,
95354 +       Opt_gid,
95355 +       Opt_umask,
95356 +       Opt_dmask,
95357 +       Opt_fmask,
95358 +       Opt_immutable,
95359 +       Opt_discard,
95360 +       Opt_force,
95361 +       Opt_sparse,
95362 +       Opt_nohidden,
95363 +       Opt_showmeta,
95364 +       Opt_acl,
95365 +       Opt_noatime,
95366 +       Opt_nls,
95367 +       Opt_prealloc,
95368 +       Opt_no_acs_rules,
95369 +       Opt_err,
95372 +static const match_table_t ntfs_tokens = {
95373 +       { Opt_uid, "uid=%u" },
95374 +       { Opt_gid, "gid=%u" },
95375 +       { Opt_umask, "umask=%o" },
95376 +       { Opt_dmask, "dmask=%o" },
95377 +       { Opt_fmask, "fmask=%o" },
95378 +       { Opt_immutable, "sys_immutable" },
95379 +       { Opt_discard, "discard" },
95380 +       { Opt_force, "force" },
95381 +       { Opt_sparse, "sparse" },
95382 +       { Opt_nohidden, "nohidden" },
95383 +       { Opt_acl, "acl" },
95384 +       { Opt_noatime, "noatime" },
95385 +       { Opt_showmeta, "showmeta" },
95386 +       { Opt_nls, "nls=%s" },
95387 +       { Opt_prealloc, "prealloc" },
95388 +       { Opt_no_acs_rules, "no_acs_rules" },
95389 +       { Opt_err, NULL },
95392 +static noinline int ntfs_parse_options(struct super_block *sb, char *options,
95393 +                                      int silent,
95394 +                                      struct ntfs_mount_options *opts)
95396 +       char *p;
95397 +       substring_t args[MAX_OPT_ARGS];
95398 +       int option;
95399 +       char nls_name[30];
95400 +       struct nls_table *nls;
95402 +       opts->fs_uid = current_uid();
95403 +       opts->fs_gid = current_gid();
95404 +       opts->fs_fmask_inv = opts->fs_dmask_inv = ~current_umask();
95405 +       nls_name[0] = 0;
95407 +       if (!options)
95408 +               goto out;
95410 +       while ((p = strsep(&options, ","))) {
95411 +               int token;
95413 +               if (!*p)
95414 +                       continue;
95416 +               token = match_token(p, ntfs_tokens, args);
95417 +               switch (token) {
95418 +               case Opt_immutable:
95419 +                       opts->sys_immutable = 1;
95420 +                       break;
95421 +               case Opt_uid:
95422 +                       if (match_int(&args[0], &option))
95423 +                               return -EINVAL;
95424 +                       opts->fs_uid = make_kuid(current_user_ns(), option);
95425 +                       if (!uid_valid(opts->fs_uid))
95426 +                               return -EINVAL;
95427 +                       opts->uid = 1;
95428 +                       break;
95429 +               case Opt_gid:
95430 +                       if (match_int(&args[0], &option))
95431 +                               return -EINVAL;
95432 +                       opts->fs_gid = make_kgid(current_user_ns(), option);
95433 +                       if (!gid_valid(opts->fs_gid))
95434 +                               return -EINVAL;
95435 +                       opts->gid = 1;
95436 +                       break;
95437 +               case Opt_umask:
95438 +                       if (match_octal(&args[0], &option))
95439 +                               return -EINVAL;
95440 +                       opts->fs_fmask_inv = opts->fs_dmask_inv = ~option;
95441 +                       opts->fmask = opts->dmask = 1;
95442 +                       break;
95443 +               case Opt_dmask:
95444 +                       if (match_octal(&args[0], &option))
95445 +                               return -EINVAL;
95446 +                       opts->fs_dmask_inv = ~option;
95447 +                       opts->dmask = 1;
95448 +                       break;
95449 +               case Opt_fmask:
95450 +                       if (match_octal(&args[0], &option))
95451 +                               return -EINVAL;
95452 +                       opts->fs_fmask_inv = ~option;
95453 +                       opts->fmask = 1;
95454 +                       break;
95455 +               case Opt_discard:
95456 +                       opts->discard = 1;
95457 +                       break;
95458 +               case Opt_force:
95459 +                       opts->force = 1;
95460 +                       break;
95461 +               case Opt_sparse:
95462 +                       opts->sparse = 1;
95463 +                       break;
95464 +               case Opt_nohidden:
95465 +                       opts->nohidden = 1;
95466 +                       break;
95467 +               case Opt_acl:
95468 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
95469 +                       sb->s_flags |= SB_POSIXACL;
95470 +                       break;
95471 +#else
95472 +                       ntfs_err(sb, "support for ACL not compiled in!");
95473 +                       return -EINVAL;
95474 +#endif
95475 +               case Opt_noatime:
95476 +                       sb->s_flags |= SB_NOATIME;
95477 +                       break;
95478 +               case Opt_showmeta:
95479 +                       opts->showmeta = 1;
95480 +                       break;
95481 +               case Opt_nls:
95482 +                       match_strlcpy(nls_name, &args[0], sizeof(nls_name));
95483 +                       break;
95484 +               case Opt_prealloc:
95485 +                       opts->prealloc = 1;
95486 +                       break;
95487 +               case Opt_no_acs_rules:
95488 +                       opts->no_acs_rules = 1;
95489 +                       break;
95490 +               default:
95491 +                       if (!silent)
95492 +                               ntfs_err(
95493 +                                       sb,
95494 +                                       "Unrecognized mount option \"%s\" or missing value",
95495 +                                       p);
95496 +                       //return -EINVAL;
95497 +               }
95498 +       }
95500 +out:
95501 +       if (!strcmp(nls_name[0] ? nls_name : CONFIG_NLS_DEFAULT, "utf8")) {
95502 +               /* For UTF-8 use utf16s_to_utf8s/utf8s_to_utf16s instead of nls */
95503 +               nls = NULL;
95504 +       } else if (nls_name[0]) {
95505 +               nls = load_nls(nls_name);
95506 +               if (!nls) {
95507 +                       ntfs_err(sb, "failed to load \"%s\"", nls_name);
95508 +                       return -EINVAL;
95509 +               }
95510 +       } else {
95511 +               nls = load_nls_default();
95512 +               if (!nls) {
95513 +                       ntfs_err(sb, "failed to load default nls");
95514 +                       return -EINVAL;
95515 +               }
95516 +       }
95517 +       opts->nls = nls;
95519 +       return 0;
95522 +static int ntfs_remount(struct super_block *sb, int *flags, char *data)
95524 +       int err, ro_rw;
95525 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
95526 +       struct ntfs_mount_options old_opts;
95527 +       char *orig_data = kstrdup(data, GFP_KERNEL);
95529 +       if (data && !orig_data)
95530 +               return -ENOMEM;
95532 +       /* Store  original options */
95533 +       memcpy(&old_opts, &sbi->options, sizeof(old_opts));
95534 +       clear_mount_options(&sbi->options);
95535 +       memset(&sbi->options, 0, sizeof(sbi->options));
95537 +       err = ntfs_parse_options(sb, data, 0, &sbi->options);
95538 +       if (err)
95539 +               goto restore_opts;
95541 +       ro_rw = sb_rdonly(sb) && !(*flags & SB_RDONLY);
95542 +       if (ro_rw && (sbi->flags & NTFS_FLAGS_NEED_REPLAY)) {
95543 +               ntfs_warn(
95544 +                       sb,
95545 +                       "Couldn't remount rw because journal is not replayed. Please umount/remount instead\n");
95546 +               err = -EINVAL;
95547 +               goto restore_opts;
95548 +       }
95550 +       sync_filesystem(sb);
95552 +       if (ro_rw && (sbi->volume.flags & VOLUME_FLAG_DIRTY) &&
95553 +           !sbi->options.force) {
95554 +               ntfs_warn(sb, "volume is dirty and \"force\" flag is not set!");
95555 +               err = -EINVAL;
95556 +               goto restore_opts;
95557 +       }
95559 +       clear_mount_options(&old_opts);
95561 +       *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME) |
95562 +                SB_NODIRATIME | SB_NOATIME;
95563 +       ntfs_info(sb, "re-mounted. Opts: %s", orig_data);
95564 +       err = 0;
95565 +       goto out;
95567 +restore_opts:
95568 +       clear_mount_options(&sbi->options);
95569 +       memcpy(&sbi->options, &old_opts, sizeof(old_opts));
95571 +out:
95572 +       kfree(orig_data);
95573 +       return err;
95576 +static struct kmem_cache *ntfs_inode_cachep;
95578 +static struct inode *ntfs_alloc_inode(struct super_block *sb)
95580 +       struct ntfs_inode *ni = kmem_cache_alloc(ntfs_inode_cachep, GFP_NOFS);
95582 +       if (!ni)
95583 +               return NULL;
95585 +       memset(ni, 0, offsetof(struct ntfs_inode, vfs_inode));
95587 +       mutex_init(&ni->ni_lock);
95589 +       return &ni->vfs_inode;
95592 +static void ntfs_i_callback(struct rcu_head *head)
95594 +       struct inode *inode = container_of(head, struct inode, i_rcu);
95595 +       struct ntfs_inode *ni = ntfs_i(inode);
95597 +       mutex_destroy(&ni->ni_lock);
95599 +       kmem_cache_free(ntfs_inode_cachep, ni);
95602 +static void ntfs_destroy_inode(struct inode *inode)
95604 +       call_rcu(&inode->i_rcu, ntfs_i_callback);
95607 +static void init_once(void *foo)
95609 +       struct ntfs_inode *ni = foo;
95611 +       inode_init_once(&ni->vfs_inode);
95614 +/* noinline to reduce binary size*/
95615 +static noinline void put_ntfs(struct ntfs_sb_info *sbi)
95617 +       ntfs_free(sbi->new_rec);
95618 +       ntfs_vfree(ntfs_put_shared(sbi->upcase));
95619 +       ntfs_free(sbi->def_table);
95621 +       wnd_close(&sbi->mft.bitmap);
95622 +       wnd_close(&sbi->used.bitmap);
95624 +       if (sbi->mft.ni)
95625 +               iput(&sbi->mft.ni->vfs_inode);
95627 +       if (sbi->security.ni)
95628 +               iput(&sbi->security.ni->vfs_inode);
95630 +       if (sbi->reparse.ni)
95631 +               iput(&sbi->reparse.ni->vfs_inode);
95633 +       if (sbi->objid.ni)
95634 +               iput(&sbi->objid.ni->vfs_inode);
95636 +       if (sbi->volume.ni)
95637 +               iput(&sbi->volume.ni->vfs_inode);
95639 +       ntfs_update_mftmirr(sbi, 0);
95641 +       indx_clear(&sbi->security.index_sii);
95642 +       indx_clear(&sbi->security.index_sdh);
95643 +       indx_clear(&sbi->reparse.index_r);
95644 +       indx_clear(&sbi->objid.index_o);
95645 +       ntfs_free(sbi->compress.lznt);
95646 +#ifdef CONFIG_NTFS3_LZX_XPRESS
95647 +       xpress_free_decompressor(sbi->compress.xpress);
95648 +       lzx_free_decompressor(sbi->compress.lzx);
95649 +#endif
95650 +       clear_mount_options(&sbi->options);
95652 +       ntfs_free(sbi);
95655 +static void ntfs_put_super(struct super_block *sb)
95657 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
95659 +       /*mark rw ntfs as clear, if possible*/
95660 +       ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
95662 +       put_ntfs(sbi);
95664 +       sync_blockdev(sb->s_bdev);
95667 +static int ntfs_statfs(struct dentry *dentry, struct kstatfs *buf)
95669 +       struct super_block *sb = dentry->d_sb;
95670 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
95671 +       struct wnd_bitmap *wnd = &sbi->used.bitmap;
95673 +       buf->f_type = sb->s_magic;
95674 +       buf->f_bsize = sbi->cluster_size;
95675 +       buf->f_blocks = wnd->nbits;
95677 +       buf->f_bfree = buf->f_bavail = wnd_zeroes(wnd);
95678 +       buf->f_fsid.val[0] = sbi->volume.ser_num;
95679 +       buf->f_fsid.val[1] = (sbi->volume.ser_num >> 32);
95680 +       buf->f_namelen = NTFS_NAME_LEN;
95682 +       return 0;
95685 +static int ntfs_show_options(struct seq_file *m, struct dentry *root)
95687 +       struct super_block *sb = root->d_sb;
95688 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
95689 +       struct ntfs_mount_options *opts = &sbi->options;
95690 +       struct user_namespace *user_ns = seq_user_ns(m);
95692 +       if (opts->uid)
95693 +               seq_printf(m, ",uid=%u",
95694 +                          from_kuid_munged(user_ns, opts->fs_uid));
95695 +       if (opts->gid)
95696 +               seq_printf(m, ",gid=%u",
95697 +                          from_kgid_munged(user_ns, opts->fs_gid));
95698 +       if (opts->fmask)
95699 +               seq_printf(m, ",fmask=%04o", ~opts->fs_fmask_inv);
95700 +       if (opts->dmask)
95701 +               seq_printf(m, ",dmask=%04o", ~opts->fs_dmask_inv);
95702 +       if (opts->nls)
95703 +               seq_printf(m, ",nls=%s", opts->nls->charset);
95704 +       else
95705 +               seq_puts(m, ",nls=utf8");
95706 +       if (opts->sys_immutable)
95707 +               seq_puts(m, ",sys_immutable");
95708 +       if (opts->discard)
95709 +               seq_puts(m, ",discard");
95710 +       if (opts->sparse)
95711 +               seq_puts(m, ",sparse");
95712 +       if (opts->showmeta)
95713 +               seq_puts(m, ",showmeta");
95714 +       if (opts->nohidden)
95715 +               seq_puts(m, ",nohidden");
95716 +       if (opts->force)
95717 +               seq_puts(m, ",force");
95718 +       if (opts->no_acs_rules)
95719 +               seq_puts(m, ",no_acs_rules");
95720 +       if (opts->prealloc)
95721 +               seq_puts(m, ",prealloc");
95722 +       if (sb->s_flags & SB_POSIXACL)
95723 +               seq_puts(m, ",acl");
95724 +       if (sb->s_flags & SB_NOATIME)
95725 +               seq_puts(m, ",noatime");
95727 +       return 0;
95730 +/*super_operations::sync_fs*/
95731 +static int ntfs_sync_fs(struct super_block *sb, int wait)
95733 +       int err = 0, err2;
95734 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
95735 +       struct ntfs_inode *ni;
95736 +       struct inode *inode;
95738 +       ni = sbi->security.ni;
95739 +       if (ni) {
95740 +               inode = &ni->vfs_inode;
95741 +               err2 = _ni_write_inode(inode, wait);
95742 +               if (err2 && !err)
95743 +                       err = err2;
95744 +       }
95746 +       ni = sbi->objid.ni;
95747 +       if (ni) {
95748 +               inode = &ni->vfs_inode;
95749 +               err2 = _ni_write_inode(inode, wait);
95750 +               if (err2 && !err)
95751 +                       err = err2;
95752 +       }
95754 +       ni = sbi->reparse.ni;
95755 +       if (ni) {
95756 +               inode = &ni->vfs_inode;
95757 +               err2 = _ni_write_inode(inode, wait);
95758 +               if (err2 && !err)
95759 +                       err = err2;
95760 +       }
95762 +       if (!err)
95763 +               ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
95765 +       ntfs_update_mftmirr(sbi, wait);
95767 +       return err;
95770 +static const struct super_operations ntfs_sops = {
95771 +       .alloc_inode = ntfs_alloc_inode,
95772 +       .destroy_inode = ntfs_destroy_inode,
95773 +       .evict_inode = ntfs_evict_inode,
95774 +       .put_super = ntfs_put_super,
95775 +       .statfs = ntfs_statfs,
95776 +       .show_options = ntfs_show_options,
95777 +       .sync_fs = ntfs_sync_fs,
95778 +       .remount_fs = ntfs_remount,
95779 +       .write_inode = ntfs3_write_inode,
95782 +static struct inode *ntfs_export_get_inode(struct super_block *sb, u64 ino,
95783 +                                          u32 generation)
95785 +       struct MFT_REF ref;
95786 +       struct inode *inode;
95788 +       ref.low = cpu_to_le32(ino);
95789 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
95790 +       ref.high = cpu_to_le16(ino >> 32);
95791 +#else
95792 +       ref.high = 0;
95793 +#endif
95794 +       ref.seq = cpu_to_le16(generation);
95796 +       inode = ntfs_iget5(sb, &ref, NULL);
95797 +       if (!IS_ERR(inode) && is_bad_inode(inode)) {
95798 +               iput(inode);
95799 +               inode = ERR_PTR(-ESTALE);
95800 +       }
95802 +       return inode;
95805 +static struct dentry *ntfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
95806 +                                       int fh_len, int fh_type)
95808 +       return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
95809 +                                   ntfs_export_get_inode);
95812 +static struct dentry *ntfs_fh_to_parent(struct super_block *sb, struct fid *fid,
95813 +                                       int fh_len, int fh_type)
95815 +       return generic_fh_to_parent(sb, fid, fh_len, fh_type,
95816 +                                   ntfs_export_get_inode);
95819 +/* TODO: == ntfs_sync_inode */
95820 +static int ntfs_nfs_commit_metadata(struct inode *inode)
95822 +       return _ni_write_inode(inode, 1);
95825 +static const struct export_operations ntfs_export_ops = {
95826 +       .fh_to_dentry = ntfs_fh_to_dentry,
95827 +       .fh_to_parent = ntfs_fh_to_parent,
95828 +       .get_parent = ntfs3_get_parent,
95829 +       .commit_metadata = ntfs_nfs_commit_metadata,
95832 +/* Returns Gb,Mb to print with "%u.%02u Gb" */
95833 +static u32 format_size_gb(const u64 bytes, u32 *mb)
95835 +       /* Do simple right 30 bit shift of 64 bit value */
95836 +       u64 kbytes = bytes >> 10;
95837 +       u32 kbytes32 = kbytes;
95839 +       *mb = (100 * (kbytes32 & 0xfffff) + 0x7ffff) >> 20;
95840 +       if (*mb >= 100)
95841 +               *mb = 99;
95843 +       return (kbytes32 >> 20) | (((u32)(kbytes >> 32)) << 12);
95846 +static u32 true_sectors_per_clst(const struct NTFS_BOOT *boot)
95848 +       return boot->sectors_per_clusters <= 0x80
95849 +                      ? boot->sectors_per_clusters
95850 +                      : (1u << (0 - boot->sectors_per_clusters));
95853 +/* inits internal info from on-disk boot sector*/
95854 +static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
95855 +                              u64 dev_size)
95857 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
95858 +       int err;
95859 +       u32 mb, gb, boot_sector_size, sct_per_clst, record_size;
95860 +       u64 sectors, clusters, fs_size, mlcn, mlcn2;
95861 +       struct NTFS_BOOT *boot;
95862 +       struct buffer_head *bh;
95863 +       struct MFT_REC *rec;
95864 +       u16 fn, ao;
95866 +       sbi->volume.blocks = dev_size >> PAGE_SHIFT;
95868 +       bh = ntfs_bread(sb, 0);
95869 +       if (!bh)
95870 +               return -EIO;
95872 +       err = -EINVAL;
95873 +       boot = (struct NTFS_BOOT *)bh->b_data;
95875 +       if (memcmp(boot->system_id, "NTFS    ", sizeof("NTFS    ") - 1))
95876 +               goto out;
95878 +       /* 0x55AA is not mandaroty. Thanks Maxim Suhanov*/
95879 +       /*if (0x55 != boot->boot_magic[0] || 0xAA != boot->boot_magic[1])
95880 +        *      goto out;
95881 +        */
95883 +       boot_sector_size = (u32)boot->bytes_per_sector[1] << 8;
95884 +       if (boot->bytes_per_sector[0] || boot_sector_size < SECTOR_SIZE ||
95885 +           !is_power_of2(boot_sector_size)) {
95886 +               goto out;
95887 +       }
95889 +       /* cluster size: 512, 1K, 2K, 4K, ... 2M */
95890 +       sct_per_clst = true_sectors_per_clst(boot);
95891 +       if (!is_power_of2(sct_per_clst))
95892 +               goto out;
95894 +       mlcn = le64_to_cpu(boot->mft_clst);
95895 +       mlcn2 = le64_to_cpu(boot->mft2_clst);
95896 +       sectors = le64_to_cpu(boot->sectors_per_volume);
95898 +       if (mlcn * sct_per_clst >= sectors)
95899 +               goto out;
95901 +       if (mlcn2 * sct_per_clst >= sectors)
95902 +               goto out;
95904 +       /* Check MFT record size */
95905 +       if ((boot->record_size < 0 &&
95906 +            SECTOR_SIZE > (2U << (-boot->record_size))) ||
95907 +           (boot->record_size >= 0 && !is_power_of2(boot->record_size))) {
95908 +               goto out;
95909 +       }
95911 +       /* Check index record size */
95912 +       if ((boot->index_size < 0 &&
95913 +            SECTOR_SIZE > (2U << (-boot->index_size))) ||
95914 +           (boot->index_size >= 0 && !is_power_of2(boot->index_size))) {
95915 +               goto out;
95916 +       }
95918 +       sbi->sector_size = boot_sector_size;
95919 +       sbi->sector_bits = blksize_bits(boot_sector_size);
95920 +       fs_size = (sectors + 1) << sbi->sector_bits;
95922 +       gb = format_size_gb(fs_size, &mb);
95924 +       /*
95925 +        * - Volume formatted and mounted with the same sector size
95926 +        * - Volume formatted 4K and mounted as 512
95927 +        * - Volume formatted 512 and mounted as 4K
95928 +        */
95929 +       if (sbi->sector_size != sector_size) {
95930 +               ntfs_warn(sb,
95931 +                         "Different NTFS' sector size and media sector size");
95932 +               dev_size += sector_size - 1;
95933 +       }
95935 +       sbi->cluster_size = boot_sector_size * sct_per_clst;
95936 +       sbi->cluster_bits = blksize_bits(sbi->cluster_size);
95938 +       sbi->mft.lbo = mlcn << sbi->cluster_bits;
95939 +       sbi->mft.lbo2 = mlcn2 << sbi->cluster_bits;
95941 +       if (sbi->cluster_size < sbi->sector_size)
95942 +               goto out;
95944 +       sbi->cluster_mask = sbi->cluster_size - 1;
95945 +       sbi->cluster_mask_inv = ~(u64)sbi->cluster_mask;
95946 +       sbi->record_size = record_size = boot->record_size < 0
95947 +                                                ? 1 << (-boot->record_size)
95948 +                                                : (u32)boot->record_size
95949 +                                                          << sbi->cluster_bits;
95951 +       if (record_size > MAXIMUM_BYTES_PER_MFT)
95952 +               goto out;
95954 +       sbi->record_bits = blksize_bits(record_size);
95955 +       sbi->attr_size_tr = (5 * record_size >> 4); // ~320 bytes
95957 +       sbi->max_bytes_per_attr =
95958 +               record_size - QuadAlign(MFTRECORD_FIXUP_OFFSET_1) -
95959 +               QuadAlign(((record_size >> SECTOR_SHIFT) * sizeof(short))) -
95960 +               QuadAlign(sizeof(enum ATTR_TYPE));
95962 +       sbi->index_size = boot->index_size < 0
95963 +                                 ? 1u << (-boot->index_size)
95964 +                                 : (u32)boot->index_size << sbi->cluster_bits;
95966 +       sbi->volume.ser_num = le64_to_cpu(boot->serial_num);
95967 +       sbi->volume.size = sectors << sbi->sector_bits;
95969 +       /* warning if RAW volume */
95970 +       if (dev_size < fs_size) {
95971 +               u32 mb0, gb0;
95973 +               gb0 = format_size_gb(dev_size, &mb0);
95974 +               ntfs_warn(
95975 +                       sb,
95976 +                       "RAW NTFS volume: Filesystem size %u.%02u Gb > volume size %u.%02u Gb. Mount in read-only",
95977 +                       gb, mb, gb0, mb0);
95978 +               sb->s_flags |= SB_RDONLY;
95979 +       }
95981 +       clusters = sbi->volume.size >> sbi->cluster_bits;
95982 +#ifndef CONFIG_NTFS3_64BIT_CLUSTER
95983 +       /* 32 bits per cluster */
95984 +       if (clusters >> 32) {
95985 +               ntfs_notice(
95986 +                       sb,
95987 +                       "NTFS %u.%02u Gb is too big to use 32 bits per cluster",
95988 +                       gb, mb);
95989 +               goto out;
95990 +       }
95991 +#elif BITS_PER_LONG < 64
95992 +#error "CONFIG_NTFS3_64BIT_CLUSTER incompatible in 32 bit OS"
95993 +#endif
95995 +       sbi->used.bitmap.nbits = clusters;
95997 +       rec = ntfs_zalloc(record_size);
95998 +       if (!rec) {
95999 +               err = -ENOMEM;
96000 +               goto out;
96001 +       }
96003 +       sbi->new_rec = rec;
96004 +       rec->rhdr.sign = NTFS_FILE_SIGNATURE;
96005 +       rec->rhdr.fix_off = cpu_to_le16(MFTRECORD_FIXUP_OFFSET_1);
96006 +       fn = (sbi->record_size >> SECTOR_SHIFT) + 1;
96007 +       rec->rhdr.fix_num = cpu_to_le16(fn);
96008 +       ao = QuadAlign(MFTRECORD_FIXUP_OFFSET_1 + sizeof(short) * fn);
96009 +       rec->attr_off = cpu_to_le16(ao);
96010 +       rec->used = cpu_to_le32(ao + QuadAlign(sizeof(enum ATTR_TYPE)));
96011 +       rec->total = cpu_to_le32(sbi->record_size);
96012 +       ((struct ATTRIB *)Add2Ptr(rec, ao))->type = ATTR_END;
96014 +       if (sbi->cluster_size < PAGE_SIZE)
96015 +               sb_set_blocksize(sb, sbi->cluster_size);
96017 +       sbi->block_mask = sb->s_blocksize - 1;
96018 +       sbi->blocks_per_cluster = sbi->cluster_size >> sb->s_blocksize_bits;
96019 +       sbi->volume.blocks = sbi->volume.size >> sb->s_blocksize_bits;
96021 +       /* Maximum size for normal files */
96022 +       sbi->maxbytes = (clusters << sbi->cluster_bits) - 1;
96024 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
96025 +       if (clusters >= (1ull << (64 - sbi->cluster_bits)))
96026 +               sbi->maxbytes = -1;
96027 +       sbi->maxbytes_sparse = -1;
96028 +#else
96029 +       /* Maximum size for sparse file */
96030 +       sbi->maxbytes_sparse = (1ull << (sbi->cluster_bits + 32)) - 1;
96031 +#endif
96033 +       err = 0;
96035 +out:
96036 +       brelse(bh);
96038 +       return err;
96041 +/* try to mount*/
96042 +static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
96044 +       int err;
96045 +       struct ntfs_sb_info *sbi;
96046 +       struct block_device *bdev = sb->s_bdev;
96047 +       struct inode *bd_inode = bdev->bd_inode;
96048 +       struct request_queue *rq = bdev_get_queue(bdev);
96049 +       struct inode *inode = NULL;
96050 +       struct ntfs_inode *ni;
96051 +       size_t i, tt;
96052 +       CLST vcn, lcn, len;
96053 +       struct ATTRIB *attr;
96054 +       const struct VOLUME_INFO *info;
96055 +       u32 idx, done, bytes;
96056 +       struct ATTR_DEF_ENTRY *t;
96057 +       u16 *upcase = NULL;
96058 +       u16 *shared;
96059 +       bool is_ro;
96060 +       struct MFT_REF ref;
96062 +       ref.high = 0;
96064 +       sbi = ntfs_zalloc(sizeof(struct ntfs_sb_info));
96065 +       if (!sbi)
96066 +               return -ENOMEM;
96068 +       sb->s_fs_info = sbi;
96069 +       sbi->sb = sb;
96070 +       sb->s_flags |= SB_NODIRATIME;
96071 +       sb->s_magic = 0x7366746e; // "ntfs"
96072 +       sb->s_op = &ntfs_sops;
96073 +       sb->s_export_op = &ntfs_export_ops;
96074 +       sb->s_time_gran = NTFS_TIME_GRAN; // 100 nsec
96075 +       sb->s_xattr = ntfs_xattr_handlers;
96077 +       ratelimit_state_init(&sbi->msg_ratelimit, DEFAULT_RATELIMIT_INTERVAL,
96078 +                            DEFAULT_RATELIMIT_BURST);
96080 +       err = ntfs_parse_options(sb, data, silent, &sbi->options);
96081 +       if (err)
96082 +               goto out;
96084 +       if (!rq || !blk_queue_discard(rq) || !rq->limits.discard_granularity) {
96085 +               ;
96086 +       } else {
96087 +               sbi->discard_granularity = rq->limits.discard_granularity;
96088 +               sbi->discard_granularity_mask_inv =
96089 +                       ~(u64)(sbi->discard_granularity - 1);
96090 +       }
96092 +       sb_set_blocksize(sb, PAGE_SIZE);
96094 +       /* parse boot */
96095 +       err = ntfs_init_from_boot(sb, rq ? queue_logical_block_size(rq) : 512,
96096 +                                 bd_inode->i_size);
96097 +       if (err)
96098 +               goto out;
96100 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
96101 +       sb->s_maxbytes = MAX_LFS_FILESIZE;
96102 +#else
96103 +       sb->s_maxbytes = 0xFFFFFFFFull << sbi->cluster_bits;
96104 +#endif
96106 +       mutex_init(&sbi->compress.mtx_lznt);
96107 +#ifdef CONFIG_NTFS3_LZX_XPRESS
96108 +       mutex_init(&sbi->compress.mtx_xpress);
96109 +       mutex_init(&sbi->compress.mtx_lzx);
96110 +#endif
96112 +       /*
96113 +        * Load $Volume. This should be done before LogFile
96114 +        * 'cause 'sbi->volume.ni' is used 'ntfs_set_state'
96115 +        */
96116 +       ref.low = cpu_to_le32(MFT_REC_VOL);
96117 +       ref.seq = cpu_to_le16(MFT_REC_VOL);
96118 +       inode = ntfs_iget5(sb, &ref, &NAME_VOLUME);
96119 +       if (IS_ERR(inode)) {
96120 +               err = PTR_ERR(inode);
96121 +               ntfs_err(sb, "Failed to load $Volume.");
96122 +               inode = NULL;
96123 +               goto out;
96124 +       }
96126 +       ni = ntfs_i(inode);
96128 +       /* Load and save label (not necessary) */
96129 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_LABEL, NULL, 0, NULL, NULL);
96131 +       if (!attr) {
96132 +               /* It is ok if no ATTR_LABEL */
96133 +       } else if (!attr->non_res && !is_attr_ext(attr)) {
96134 +               /* $AttrDef allows labels to be up to 128 symbols */
96135 +               err = utf16s_to_utf8s(resident_data(attr),
96136 +                                     le32_to_cpu(attr->res.data_size) >> 1,
96137 +                                     UTF16_LITTLE_ENDIAN, sbi->volume.label,
96138 +                                     sizeof(sbi->volume.label));
96139 +               if (err < 0)
96140 +                       sbi->volume.label[0] = 0;
96141 +       } else {
96142 +               /* should we break mounting here? */
96143 +               //err = -EINVAL;
96144 +               //goto out;
96145 +       }
96147 +       attr = ni_find_attr(ni, attr, NULL, ATTR_VOL_INFO, NULL, 0, NULL, NULL);
96148 +       if (!attr || is_attr_ext(attr)) {
96149 +               err = -EINVAL;
96150 +               goto out;
96151 +       }
96153 +       info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
96154 +       if (!info) {
96155 +               err = -EINVAL;
96156 +               goto out;
96157 +       }
96159 +       sbi->volume.major_ver = info->major_ver;
96160 +       sbi->volume.minor_ver = info->minor_ver;
96161 +       sbi->volume.flags = info->flags;
96163 +       sbi->volume.ni = ni;
96164 +       inode = NULL;
96166 +       /* Load $MFTMirr to estimate recs_mirr */
96167 +       ref.low = cpu_to_le32(MFT_REC_MIRR);
96168 +       ref.seq = cpu_to_le16(MFT_REC_MIRR);
96169 +       inode = ntfs_iget5(sb, &ref, &NAME_MIRROR);
96170 +       if (IS_ERR(inode)) {
96171 +               err = PTR_ERR(inode);
96172 +               ntfs_err(sb, "Failed to load $MFTMirr.");
96173 +               inode = NULL;
96174 +               goto out;
96175 +       }
96177 +       sbi->mft.recs_mirr =
96178 +               ntfs_up_cluster(sbi, inode->i_size) >> sbi->record_bits;
96180 +       iput(inode);
96182 +       /* Load LogFile to replay */
96183 +       ref.low = cpu_to_le32(MFT_REC_LOG);
96184 +       ref.seq = cpu_to_le16(MFT_REC_LOG);
96185 +       inode = ntfs_iget5(sb, &ref, &NAME_LOGFILE);
96186 +       if (IS_ERR(inode)) {
96187 +               err = PTR_ERR(inode);
96188 +               ntfs_err(sb, "Failed to load \x24LogFile.");
96189 +               inode = NULL;
96190 +               goto out;
96191 +       }
96193 +       ni = ntfs_i(inode);
96195 +       err = ntfs_loadlog_and_replay(ni, sbi);
96196 +       if (err)
96197 +               goto out;
96199 +       iput(inode);
96200 +       inode = NULL;
96202 +       is_ro = sb_rdonly(sbi->sb);
96204 +       if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
96205 +               if (!is_ro) {
96206 +                       ntfs_warn(sb,
96207 +                                 "failed to replay log file. Can't mount rw!");
96208 +                       err = -EINVAL;
96209 +                       goto out;
96210 +               }
96211 +       } else if (sbi->volume.flags & VOLUME_FLAG_DIRTY) {
96212 +               if (!is_ro && !sbi->options.force) {
96213 +                       ntfs_warn(
96214 +                               sb,
96215 +                               "volume is dirty and \"force\" flag is not set!");
96216 +                       err = -EINVAL;
96217 +                       goto out;
96218 +               }
96219 +       }
96221 +       /* Load $MFT */
96222 +       ref.low = cpu_to_le32(MFT_REC_MFT);
96223 +       ref.seq = cpu_to_le16(1);
96225 +       inode = ntfs_iget5(sb, &ref, &NAME_MFT);
96226 +       if (IS_ERR(inode)) {
96227 +               err = PTR_ERR(inode);
96228 +               ntfs_err(sb, "Failed to load $MFT.");
96229 +               inode = NULL;
96230 +               goto out;
96231 +       }
96233 +       ni = ntfs_i(inode);
96235 +       sbi->mft.used = ni->i_valid >> sbi->record_bits;
96236 +       tt = inode->i_size >> sbi->record_bits;
96237 +       sbi->mft.next_free = MFT_REC_USER;
96239 +       err = wnd_init(&sbi->mft.bitmap, sb, tt);
96240 +       if (err)
96241 +               goto out;
96243 +       err = ni_load_all_mi(ni);
96244 +       if (err)
96245 +               goto out;
96247 +       sbi->mft.ni = ni;
96249 +       /* Load $BadClus */
96250 +       ref.low = cpu_to_le32(MFT_REC_BADCLUST);
96251 +       ref.seq = cpu_to_le16(MFT_REC_BADCLUST);
96252 +       inode = ntfs_iget5(sb, &ref, &NAME_BADCLUS);
96253 +       if (IS_ERR(inode)) {
96254 +               err = PTR_ERR(inode);
96255 +               ntfs_err(sb, "Failed to load $BadClus.");
96256 +               inode = NULL;
96257 +               goto out;
96258 +       }
96260 +       ni = ntfs_i(inode);
96262 +       for (i = 0; run_get_entry(&ni->file.run, i, &vcn, &lcn, &len); i++) {
96263 +               if (lcn == SPARSE_LCN)
96264 +                       continue;
96266 +               if (!sbi->bad_clusters)
96267 +                       ntfs_notice(sb, "Volume contains bad blocks");
96269 +               sbi->bad_clusters += len;
96270 +       }
96272 +       iput(inode);
96274 +       /* Load $Bitmap */
96275 +       ref.low = cpu_to_le32(MFT_REC_BITMAP);
96276 +       ref.seq = cpu_to_le16(MFT_REC_BITMAP);
96277 +       inode = ntfs_iget5(sb, &ref, &NAME_BITMAP);
96278 +       if (IS_ERR(inode)) {
96279 +               err = PTR_ERR(inode);
96280 +               ntfs_err(sb, "Failed to load $Bitmap.");
96281 +               inode = NULL;
96282 +               goto out;
96283 +       }
96285 +       ni = ntfs_i(inode);
96287 +#ifndef CONFIG_NTFS3_64BIT_CLUSTER
96288 +       if (inode->i_size >> 32) {
96289 +               err = -EINVAL;
96290 +               goto out;
96291 +       }
96292 +#endif
96294 +       /* Check bitmap boundary */
96295 +       tt = sbi->used.bitmap.nbits;
96296 +       if (inode->i_size < bitmap_size(tt)) {
96297 +               err = -EINVAL;
96298 +               goto out;
96299 +       }
96301 +       /* Not necessary */
96302 +       sbi->used.bitmap.set_tail = true;
96303 +       err = wnd_init(&sbi->used.bitmap, sbi->sb, tt);
96304 +       if (err)
96305 +               goto out;
96307 +       iput(inode);
96309 +       /* Compute the mft zone */
96310 +       err = ntfs_refresh_zone(sbi);
96311 +       if (err)
96312 +               goto out;
96314 +       /* Load $AttrDef */
96315 +       ref.low = cpu_to_le32(MFT_REC_ATTR);
96316 +       ref.seq = cpu_to_le16(MFT_REC_ATTR);
96317 +       inode = ntfs_iget5(sbi->sb, &ref, &NAME_ATTRDEF);
96318 +       if (IS_ERR(inode)) {
96319 +               err = PTR_ERR(inode);
96320 +               ntfs_err(sb, "Failed to load $AttrDef -> %d", err);
96321 +               inode = NULL;
96322 +               goto out;
96323 +       }
96325 +       if (inode->i_size < sizeof(struct ATTR_DEF_ENTRY)) {
96326 +               err = -EINVAL;
96327 +               goto out;
96328 +       }
96329 +       bytes = inode->i_size;
96330 +       sbi->def_table = t = ntfs_malloc(bytes);
96331 +       if (!t) {
96332 +               err = -ENOMEM;
96333 +               goto out;
96334 +       }
96336 +       for (done = idx = 0; done < bytes; done += PAGE_SIZE, idx++) {
96337 +               unsigned long tail = bytes - done;
96338 +               struct page *page = ntfs_map_page(inode->i_mapping, idx);
96340 +               if (IS_ERR(page)) {
96341 +                       err = PTR_ERR(page);
96342 +                       goto out;
96343 +               }
96344 +               memcpy(Add2Ptr(t, done), page_address(page),
96345 +                      min(PAGE_SIZE, tail));
96346 +               ntfs_unmap_page(page);
96348 +               if (!idx && ATTR_STD != t->type) {
96349 +                       err = -EINVAL;
96350 +                       goto out;
96351 +               }
96352 +       }
96354 +       t += 1;
96355 +       sbi->def_entries = 1;
96356 +       done = sizeof(struct ATTR_DEF_ENTRY);
96357 +       sbi->reparse.max_size = MAXIMUM_REPARSE_DATA_BUFFER_SIZE;
96358 +       sbi->ea_max_size = 0x10000; /* default formater value */
96360 +       while (done + sizeof(struct ATTR_DEF_ENTRY) <= bytes) {
96361 +               u32 t32 = le32_to_cpu(t->type);
96362 +               u64 sz = le64_to_cpu(t->max_sz);
96364 +               if ((t32 & 0xF) || le32_to_cpu(t[-1].type) >= t32)
96365 +                       break;
96367 +               if (t->type == ATTR_REPARSE)
96368 +                       sbi->reparse.max_size = sz;
96369 +               else if (t->type == ATTR_EA)
96370 +                       sbi->ea_max_size = sz;
96372 +               done += sizeof(struct ATTR_DEF_ENTRY);
96373 +               t += 1;
96374 +               sbi->def_entries += 1;
96375 +       }
96376 +       iput(inode);
96378 +       /* Load $UpCase */
96379 +       ref.low = cpu_to_le32(MFT_REC_UPCASE);
96380 +       ref.seq = cpu_to_le16(MFT_REC_UPCASE);
96381 +       inode = ntfs_iget5(sb, &ref, &NAME_UPCASE);
96382 +       if (IS_ERR(inode)) {
96383 +               err = PTR_ERR(inode);
96384 +               ntfs_err(sb, "Failed to load \x24LogFile.");
96385 +               inode = NULL;
96386 +               goto out;
96387 +       }
96389 +       ni = ntfs_i(inode);
96391 +       if (inode->i_size != 0x10000 * sizeof(short)) {
96392 +               err = -EINVAL;
96393 +               goto out;
96394 +       }
96396 +       sbi->upcase = upcase = ntfs_vmalloc(0x10000 * sizeof(short));
96397 +       if (!upcase) {
96398 +               err = -ENOMEM;
96399 +               goto out;
96400 +       }
96402 +       for (idx = 0; idx < (0x10000 * sizeof(short) >> PAGE_SHIFT); idx++) {
96403 +               const __le16 *src;
96404 +               u16 *dst = Add2Ptr(upcase, idx << PAGE_SHIFT);
96405 +               struct page *page = ntfs_map_page(inode->i_mapping, idx);
96407 +               if (IS_ERR(page)) {
96408 +                       err = PTR_ERR(page);
96409 +                       goto out;
96410 +               }
96412 +               src = page_address(page);
96414 +#ifdef __BIG_ENDIAN
96415 +               for (i = 0; i < PAGE_SIZE / sizeof(u16); i++)
96416 +                       *dst++ = le16_to_cpu(*src++);
96417 +#else
96418 +               memcpy(dst, src, PAGE_SIZE);
96419 +#endif
96420 +               ntfs_unmap_page(page);
96421 +       }
96423 +       shared = ntfs_set_shared(upcase, 0x10000 * sizeof(short));
96424 +       if (shared && upcase != shared) {
96425 +               sbi->upcase = shared;
96426 +               ntfs_vfree(upcase);
96427 +       }
96429 +       iput(inode);
96430 +       inode = NULL;
96432 +       if (is_ntfs3(sbi)) {
96433 +               /* Load $Secure */
96434 +               err = ntfs_security_init(sbi);
96435 +               if (err)
96436 +                       goto out;
96438 +               /* Load $Extend */
96439 +               err = ntfs_extend_init(sbi);
96440 +               if (err)
96441 +                       goto load_root;
96443 +               /* Load $Extend\$Reparse */
96444 +               err = ntfs_reparse_init(sbi);
96445 +               if (err)
96446 +                       goto load_root;
96448 +               /* Load $Extend\$ObjId */
96449 +               err = ntfs_objid_init(sbi);
96450 +               if (err)
96451 +                       goto load_root;
96452 +       }
96454 +load_root:
96455 +       /* Load root */
96456 +       ref.low = cpu_to_le32(MFT_REC_ROOT);
96457 +       ref.seq = cpu_to_le16(MFT_REC_ROOT);
96458 +       inode = ntfs_iget5(sb, &ref, &NAME_ROOT);
96459 +       if (IS_ERR(inode)) {
96460 +               err = PTR_ERR(inode);
96461 +               ntfs_err(sb, "Failed to load root.");
96462 +               inode = NULL;
96463 +               goto out;
96464 +       }
96466 +       ni = ntfs_i(inode);
96468 +       sb->s_root = d_make_root(inode);
96470 +       if (!sb->s_root) {
96471 +               err = -EINVAL;
96472 +               goto out;
96473 +       }
96475 +       return 0;
96477 +out:
96478 +       iput(inode);
96480 +       if (sb->s_root) {
96481 +               d_drop(sb->s_root);
96482 +               sb->s_root = NULL;
96483 +       }
96485 +       put_ntfs(sbi);
96487 +       sb->s_fs_info = NULL;
96488 +       return err;
96491 +void ntfs_unmap_meta(struct super_block *sb, CLST lcn, CLST len)
96493 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
96494 +       struct block_device *bdev = sb->s_bdev;
96495 +       sector_t devblock = (u64)lcn * sbi->blocks_per_cluster;
96496 +       unsigned long blocks = (u64)len * sbi->blocks_per_cluster;
96497 +       unsigned long cnt = 0;
96498 +       unsigned long limit = global_zone_page_state(NR_FREE_PAGES)
96499 +                             << (PAGE_SHIFT - sb->s_blocksize_bits);
96501 +       if (limit >= 0x2000)
96502 +               limit -= 0x1000;
96503 +       else if (limit < 32)
96504 +               limit = 32;
96505 +       else
96506 +               limit >>= 1;
96508 +       while (blocks--) {
96509 +               clean_bdev_aliases(bdev, devblock++, 1);
96510 +               if (cnt++ >= limit) {
96511 +                       sync_blockdev(bdev);
96512 +                       cnt = 0;
96513 +               }
96514 +       }
96518 + * ntfs_discard
96519 + *
96520 + * issue a discard request (trim for SSD)
96521 + */
96522 +int ntfs_discard(struct ntfs_sb_info *sbi, CLST lcn, CLST len)
96524 +       int err;
96525 +       u64 lbo, bytes, start, end;
96526 +       struct super_block *sb;
96528 +       if (sbi->used.next_free_lcn == lcn + len)
96529 +               sbi->used.next_free_lcn = lcn;
96531 +       if (sbi->flags & NTFS_FLAGS_NODISCARD)
96532 +               return -EOPNOTSUPP;
96534 +       if (!sbi->options.discard)
96535 +               return -EOPNOTSUPP;
96537 +       lbo = (u64)lcn << sbi->cluster_bits;
96538 +       bytes = (u64)len << sbi->cluster_bits;
96540 +       /* Align up 'start' on discard_granularity */
96541 +       start = (lbo + sbi->discard_granularity - 1) &
96542 +               sbi->discard_granularity_mask_inv;
96543 +       /* Align down 'end' on discard_granularity */
96544 +       end = (lbo + bytes) & sbi->discard_granularity_mask_inv;
96546 +       sb = sbi->sb;
96547 +       if (start >= end)
96548 +               return 0;
96550 +       err = blkdev_issue_discard(sb->s_bdev, start >> 9, (end - start) >> 9,
96551 +                                  GFP_NOFS, 0);
96553 +       if (err == -EOPNOTSUPP)
96554 +               sbi->flags |= NTFS_FLAGS_NODISCARD;
96556 +       return err;
96559 +static struct dentry *ntfs_mount(struct file_system_type *fs_type, int flags,
96560 +                                const char *dev_name, void *data)
96562 +       return mount_bdev(fs_type, flags, dev_name, data, ntfs_fill_super);
96565 +static struct file_system_type ntfs_fs_type = {
96566 +       .owner = THIS_MODULE,
96567 +       .name = "ntfs3",
96568 +       .mount = ntfs_mount,
96569 +       .kill_sb = kill_block_super,
96570 +       .fs_flags = FS_REQUIRES_DEV,
96573 +static int __init init_ntfs_fs(void)
96575 +       int err;
96577 +       pr_notice("ntfs3: Index binary search\n");
96578 +       pr_notice("ntfs3: Hot fix free clusters\n");
96579 +       pr_notice("ntfs3: Max link count %u\n", NTFS_LINK_MAX);
96581 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
96582 +       pr_notice("ntfs3: Enabled Linux POSIX ACLs support\n");
96583 +#endif
96584 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
96585 +       pr_notice("ntfs3: Activated 64 bits per cluster\n");
96586 +#else
96587 +       pr_notice("ntfs3: Activated 32 bits per cluster\n");
96588 +#endif
96589 +#ifdef CONFIG_NTFS3_LZX_XPRESS
96590 +       pr_notice("ntfs3: Read-only lzx/xpress compression included\n");
96591 +#endif
96593 +       err = ntfs3_init_bitmap();
96594 +       if (err)
96595 +               return err;
96597 +       ntfs_inode_cachep = kmem_cache_create(
96598 +               "ntfs_inode_cache", sizeof(struct ntfs_inode), 0,
96599 +               (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT),
96600 +               init_once);
96601 +       if (!ntfs_inode_cachep) {
96602 +               err = -ENOMEM;
96603 +               goto out1;
96604 +       }
96606 +       err = register_filesystem(&ntfs_fs_type);
96607 +       if (err)
96608 +               goto out;
96610 +       return 0;
96611 +out:
96612 +       kmem_cache_destroy(ntfs_inode_cachep);
96613 +out1:
96614 +       ntfs3_exit_bitmap();
96615 +       return err;
96618 +static void __exit exit_ntfs_fs(void)
96620 +       if (ntfs_inode_cachep) {
96621 +               rcu_barrier();
96622 +               kmem_cache_destroy(ntfs_inode_cachep);
96623 +       }
96625 +       unregister_filesystem(&ntfs_fs_type);
96626 +       ntfs3_exit_bitmap();
96629 +MODULE_LICENSE("GPL");
96630 +MODULE_DESCRIPTION("ntfs3 read/write filesystem");
96631 +MODULE_INFO(behaviour, "Index binary search");
96632 +MODULE_INFO(behaviour, "Hot fix free clusters");
96633 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
96634 +MODULE_INFO(behaviour, "Enabled Linux POSIX ACLs support");
96635 +#endif
96636 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
96637 +MODULE_INFO(cluster, "Activated 64 bits per cluster");
96638 +#else
96639 +MODULE_INFO(cluster, "Activated 32 bits per cluster");
96640 +#endif
96641 +#ifdef CONFIG_NTFS3_LZX_XPRESS
96642 +MODULE_INFO(compression, "Read-only lzx/xpress compression included");
96643 +#endif
96645 +MODULE_AUTHOR("Konstantin Komarov");
96646 +MODULE_ALIAS_FS("ntfs3");
96648 +module_init(init_ntfs_fs);
96649 +module_exit(exit_ntfs_fs);
96650 diff --git a/fs/ntfs3/upcase.c b/fs/ntfs3/upcase.c
96651 new file mode 100644
96652 index 000000000000..9617382aca64
96653 --- /dev/null
96654 +++ b/fs/ntfs3/upcase.c
96655 @@ -0,0 +1,105 @@
96656 +// SPDX-License-Identifier: GPL-2.0
96658 + *
96659 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
96660 + *
96661 + */
96662 +#include <linux/blkdev.h>
96663 +#include <linux/buffer_head.h>
96664 +#include <linux/module.h>
96665 +#include <linux/nls.h>
96667 +#include "debug.h"
96668 +#include "ntfs.h"
96669 +#include "ntfs_fs.h"
96671 +static inline u16 upcase_unicode_char(const u16 *upcase, u16 chr)
96673 +       if (chr < 'a')
96674 +               return chr;
96676 +       if (chr <= 'z')
96677 +               return chr - ('a' - 'A');
96679 +       return upcase[chr];
96683 + * Thanks Kari Argillander <kari.argillander@gmail.com> for idea and implementation 'bothcase'
96684 + *
96685 + * Straigth way to compare names:
96686 + * - case insensitive
96687 + * - if name equals and 'bothcases' then
96688 + * - case sensitive
96689 + * 'Straigth way' code scans input names twice in worst case
96690 + * Optimized code scans input names only once
96691 + */
96692 +int ntfs_cmp_names(const __le16 *s1, size_t l1, const __le16 *s2, size_t l2,
96693 +                  const u16 *upcase, bool bothcase)
96695 +       int diff1 = 0;
96696 +       int diff2;
96697 +       size_t len = min(l1, l2);
96699 +       if (!bothcase && upcase)
96700 +               goto case_insentive;
96702 +       for (; len; s1++, s2++, len--) {
96703 +               diff1 = le16_to_cpu(*s1) - le16_to_cpu(*s2);
96704 +               if (diff1) {
96705 +                       if (bothcase && upcase)
96706 +                               goto case_insentive;
96708 +                       return diff1;
96709 +               }
96710 +       }
96711 +       return l1 - l2;
96713 +case_insentive:
96714 +       for (; len; s1++, s2++, len--) {
96715 +               diff2 = upcase_unicode_char(upcase, le16_to_cpu(*s1)) -
96716 +                       upcase_unicode_char(upcase, le16_to_cpu(*s2));
96717 +               if (diff2)
96718 +                       return diff2;
96719 +       }
96721 +       diff2 = l1 - l2;
96722 +       return diff2 ? diff2 : diff1;
96725 +int ntfs_cmp_names_cpu(const struct cpu_str *uni1, const struct le_str *uni2,
96726 +                      const u16 *upcase, bool bothcase)
96728 +       const u16 *s1 = uni1->name;
96729 +       const __le16 *s2 = uni2->name;
96730 +       size_t l1 = uni1->len;
96731 +       size_t l2 = uni2->len;
96732 +       size_t len = min(l1, l2);
96733 +       int diff1 = 0;
96734 +       int diff2;
96736 +       if (!bothcase && upcase)
96737 +               goto case_insentive;
96739 +       for (; len; s1++, s2++, len--) {
96740 +               diff1 = *s1 - le16_to_cpu(*s2);
96741 +               if (diff1) {
96742 +                       if (bothcase && upcase)
96743 +                               goto case_insentive;
96745 +                       return diff1;
96746 +               }
96747 +       }
96748 +       return l1 - l2;
96750 +case_insentive:
96751 +       for (; len; s1++, s2++, len--) {
96752 +               diff2 = upcase_unicode_char(upcase, *s1) -
96753 +                       upcase_unicode_char(upcase, le16_to_cpu(*s2));
96754 +               if (diff2)
96755 +                       return diff2;
96756 +       }
96758 +       diff2 = l1 - l2;
96759 +       return diff2 ? diff2 : diff1;
96761 diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
96762 new file mode 100644
96763 index 000000000000..759df507c92c
96764 --- /dev/null
96765 +++ b/fs/ntfs3/xattr.c
96766 @@ -0,0 +1,1046 @@
96767 +// SPDX-License-Identifier: GPL-2.0
96769 + *
96770 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
96771 + *
96772 + */
96774 +#include <linux/blkdev.h>
96775 +#include <linux/buffer_head.h>
96776 +#include <linux/fs.h>
96777 +#include <linux/nls.h>
96778 +#include <linux/posix_acl.h>
96779 +#include <linux/posix_acl_xattr.h>
96780 +#include <linux/xattr.h>
96782 +#include "debug.h"
96783 +#include "ntfs.h"
96784 +#include "ntfs_fs.h"
96786 +// clang-format off
96787 +#define SYSTEM_DOS_ATTRIB    "system.dos_attrib"
96788 +#define SYSTEM_NTFS_ATTRIB   "system.ntfs_attrib"
96789 +#define SYSTEM_NTFS_SECURITY "system.ntfs_security"
96790 +// clang-format on
96792 +static inline size_t unpacked_ea_size(const struct EA_FULL *ea)
96794 +       return ea->size ? le32_to_cpu(ea->size)
96795 +                       : DwordAlign(struct_size(
96796 +                                 ea, name,
96797 +                                 1 + ea->name_len + le16_to_cpu(ea->elength)));
96800 +static inline size_t packed_ea_size(const struct EA_FULL *ea)
96802 +       return struct_size(ea, name,
96803 +                          1 + ea->name_len + le16_to_cpu(ea->elength)) -
96804 +              offsetof(struct EA_FULL, flags);
96808 + * find_ea
96809 + *
96810 + * assume there is at least one xattr in the list
96811 + */
96812 +static inline bool find_ea(const struct EA_FULL *ea_all, u32 bytes,
96813 +                          const char *name, u8 name_len, u32 *off)
96815 +       *off = 0;
96817 +       if (!ea_all || !bytes)
96818 +               return false;
96820 +       for (;;) {
96821 +               const struct EA_FULL *ea = Add2Ptr(ea_all, *off);
96822 +               u32 next_off = *off + unpacked_ea_size(ea);
96824 +               if (next_off > bytes)
96825 +                       return false;
96827 +               if (ea->name_len == name_len &&
96828 +                   !memcmp(ea->name, name, name_len))
96829 +                       return true;
96831 +               *off = next_off;
96832 +               if (next_off >= bytes)
96833 +                       return false;
96834 +       }
96838 + * ntfs_read_ea
96839 + *
96840 + * reads all extended attributes
96841 + * ea - new allocated memory
96842 + * info - pointer into resident data
96843 + */
96844 +static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
96845 +                       size_t add_bytes, const struct EA_INFO **info)
96847 +       int err;
96848 +       struct ATTR_LIST_ENTRY *le = NULL;
96849 +       struct ATTRIB *attr_info, *attr_ea;
96850 +       void *ea_p;
96851 +       u32 size;
96853 +       static_assert(le32_to_cpu(ATTR_EA_INFO) < le32_to_cpu(ATTR_EA));
96855 +       *ea = NULL;
96856 +       *info = NULL;
96858 +       attr_info =
96859 +               ni_find_attr(ni, NULL, &le, ATTR_EA_INFO, NULL, 0, NULL, NULL);
96860 +       attr_ea =
96861 +               ni_find_attr(ni, attr_info, &le, ATTR_EA, NULL, 0, NULL, NULL);
96863 +       if (!attr_ea || !attr_info)
96864 +               return 0;
96866 +       *info = resident_data_ex(attr_info, sizeof(struct EA_INFO));
96867 +       if (!*info)
96868 +               return -EINVAL;
96870 +       /* Check Ea limit */
96871 +       size = le32_to_cpu((*info)->size);
96872 +       if (size > ni->mi.sbi->ea_max_size)
96873 +               return -EFBIG;
96875 +       if (attr_size(attr_ea) > ni->mi.sbi->ea_max_size)
96876 +               return -EFBIG;
96878 +       /* Allocate memory for packed Ea */
96879 +       ea_p = ntfs_malloc(size + add_bytes);
96880 +       if (!ea_p)
96881 +               return -ENOMEM;
96883 +       if (attr_ea->non_res) {
96884 +               struct runs_tree run;
96886 +               run_init(&run);
96888 +               err = attr_load_runs(attr_ea, ni, &run, NULL);
96889 +               if (!err)
96890 +                       err = ntfs_read_run_nb(ni->mi.sbi, &run, 0, ea_p, size,
96891 +                                              NULL);
96892 +               run_close(&run);
96894 +               if (err)
96895 +                       goto out;
96896 +       } else {
96897 +               void *p = resident_data_ex(attr_ea, size);
96899 +               if (!p) {
96900 +                       err = -EINVAL;
96901 +                       goto out;
96902 +               }
96903 +               memcpy(ea_p, p, size);
96904 +       }
96906 +       memset(Add2Ptr(ea_p, size), 0, add_bytes);
96907 +       *ea = ea_p;
96908 +       return 0;
96910 +out:
96911 +       ntfs_free(ea_p);
96912 +       *ea = NULL;
96913 +       return err;
96917 + * ntfs_list_ea
96918 + *
96919 + * copy a list of xattrs names into the buffer
96920 + * provided, or compute the buffer size required
96921 + *
96922 + * Returns a negative error number on failure, or the number of bytes
96923 + * used / required on success.
96924 + */
96925 +static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
96926 +                           size_t bytes_per_buffer)
96928 +       const struct EA_INFO *info;
96929 +       struct EA_FULL *ea_all = NULL;
96930 +       const struct EA_FULL *ea;
96931 +       u32 off, size;
96932 +       int err;
96933 +       size_t ret;
96935 +       err = ntfs_read_ea(ni, &ea_all, 0, &info);
96936 +       if (err)
96937 +               return err;
96939 +       if (!info || !ea_all)
96940 +               return 0;
96942 +       size = le32_to_cpu(info->size);
96944 +       /* Enumerate all xattrs */
96945 +       for (ret = 0, off = 0; off < size; off += unpacked_ea_size(ea)) {
96946 +               ea = Add2Ptr(ea_all, off);
96948 +               if (buffer) {
96949 +                       if (ret + ea->name_len + 1 > bytes_per_buffer) {
96950 +                               err = -ERANGE;
96951 +                               goto out;
96952 +                       }
96954 +                       memcpy(buffer + ret, ea->name, ea->name_len);
96955 +                       buffer[ret + ea->name_len] = 0;
96956 +               }
96958 +               ret += ea->name_len + 1;
96959 +       }
96961 +out:
96962 +       ntfs_free(ea_all);
96963 +       return err ? err : ret;
96966 +static int ntfs_get_ea(struct inode *inode, const char *name, size_t name_len,
96967 +                      void *buffer, size_t size, size_t *required)
96969 +       struct ntfs_inode *ni = ntfs_i(inode);
96970 +       const struct EA_INFO *info;
96971 +       struct EA_FULL *ea_all = NULL;
96972 +       const struct EA_FULL *ea;
96973 +       u32 off, len;
96974 +       int err;
96976 +       if (!(ni->ni_flags & NI_FLAG_EA))
96977 +               return -ENODATA;
96979 +       if (!required)
96980 +               ni_lock(ni);
96982 +       len = 0;
96984 +       if (name_len > 255) {
96985 +               err = -ENAMETOOLONG;
96986 +               goto out;
96987 +       }
96989 +       err = ntfs_read_ea(ni, &ea_all, 0, &info);
96990 +       if (err)
96991 +               goto out;
96993 +       if (!info)
96994 +               goto out;
96996 +       /* Enumerate all xattrs */
96997 +       if (!find_ea(ea_all, le32_to_cpu(info->size), name, name_len, &off)) {
96998 +               err = -ENODATA;
96999 +               goto out;
97000 +       }
97001 +       ea = Add2Ptr(ea_all, off);
97003 +       len = le16_to_cpu(ea->elength);
97004 +       if (!buffer) {
97005 +               err = 0;
97006 +               goto out;
97007 +       }
97009 +       if (len > size) {
97010 +               err = -ERANGE;
97011 +               if (required)
97012 +                       *required = len;
97013 +               goto out;
97014 +       }
97016 +       memcpy(buffer, ea->name + ea->name_len + 1, len);
97017 +       err = 0;
97019 +out:
97020 +       ntfs_free(ea_all);
97021 +       if (!required)
97022 +               ni_unlock(ni);
97024 +       return err ? err : len;
97027 +static noinline int ntfs_set_ea(struct inode *inode, const char *name,
97028 +                               size_t name_len, const void *value,
97029 +                               size_t val_size, int flags, int locked)
97031 +       struct ntfs_inode *ni = ntfs_i(inode);
97032 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
97033 +       int err;
97034 +       struct EA_INFO ea_info;
97035 +       const struct EA_INFO *info;
97036 +       struct EA_FULL *new_ea;
97037 +       struct EA_FULL *ea_all = NULL;
97038 +       size_t add, new_pack;
97039 +       u32 off, size;
97040 +       __le16 size_pack;
97041 +       struct ATTRIB *attr;
97042 +       struct ATTR_LIST_ENTRY *le;
97043 +       struct mft_inode *mi;
97044 +       struct runs_tree ea_run;
97045 +       u64 new_sz;
97046 +       void *p;
97048 +       if (!locked)
97049 +               ni_lock(ni);
97051 +       run_init(&ea_run);
97053 +       if (name_len > 255) {
97054 +               err = -ENAMETOOLONG;
97055 +               goto out;
97056 +       }
97058 +       add = DwordAlign(struct_size(ea_all, name, 1 + name_len + val_size));
97060 +       err = ntfs_read_ea(ni, &ea_all, add, &info);
97061 +       if (err)
97062 +               goto out;
97064 +       if (!info) {
97065 +               memset(&ea_info, 0, sizeof(ea_info));
97066 +               size = 0;
97067 +               size_pack = 0;
97068 +       } else {
97069 +               memcpy(&ea_info, info, sizeof(ea_info));
97070 +               size = le32_to_cpu(ea_info.size);
97071 +               size_pack = ea_info.size_pack;
97072 +       }
97074 +       if (info && find_ea(ea_all, size, name, name_len, &off)) {
97075 +               struct EA_FULL *ea;
97076 +               size_t ea_sz;
97078 +               if (flags & XATTR_CREATE) {
97079 +                       err = -EEXIST;
97080 +                       goto out;
97081 +               }
97083 +               /* Remove current xattr */
97084 +               ea = Add2Ptr(ea_all, off);
97085 +               if (ea->flags & FILE_NEED_EA)
97086 +                       le16_add_cpu(&ea_info.count, -1);
97088 +               ea_sz = unpacked_ea_size(ea);
97090 +               le16_add_cpu(&ea_info.size_pack, 0 - packed_ea_size(ea));
97092 +               memmove(ea, Add2Ptr(ea, ea_sz), size - off - ea_sz);
97094 +               size -= ea_sz;
97095 +               memset(Add2Ptr(ea_all, size), 0, ea_sz);
97097 +               ea_info.size = cpu_to_le32(size);
97099 +               if ((flags & XATTR_REPLACE) && !val_size)
97100 +                       goto update_ea;
97101 +       } else {
97102 +               if (flags & XATTR_REPLACE) {
97103 +                       err = -ENODATA;
97104 +                       goto out;
97105 +               }
97107 +               if (!ea_all) {
97108 +                       ea_all = ntfs_zalloc(add);
97109 +                       if (!ea_all) {
97110 +                               err = -ENOMEM;
97111 +                               goto out;
97112 +                       }
97113 +               }
97114 +       }
97116 +       /* append new xattr */
97117 +       new_ea = Add2Ptr(ea_all, size);
97118 +       new_ea->size = cpu_to_le32(add);
97119 +       new_ea->flags = 0;
97120 +       new_ea->name_len = name_len;
97121 +       new_ea->elength = cpu_to_le16(val_size);
97122 +       memcpy(new_ea->name, name, name_len);
97123 +       new_ea->name[name_len] = 0;
97124 +       memcpy(new_ea->name + name_len + 1, value, val_size);
97125 +       new_pack = le16_to_cpu(ea_info.size_pack) + packed_ea_size(new_ea);
97127 +       /* should fit into 16 bits */
97128 +       if (new_pack > 0xffff) {
97129 +               err = -EFBIG; // -EINVAL?
97130 +               goto out;
97131 +       }
97132 +       ea_info.size_pack = cpu_to_le16(new_pack);
97134 +       /* new size of ATTR_EA */
97135 +       size += add;
97136 +       if (size > sbi->ea_max_size) {
97137 +               err = -EFBIG; // -EINVAL?
97138 +               goto out;
97139 +       }
97140 +       ea_info.size = cpu_to_le32(size);
97142 +update_ea:
97144 +       if (!info) {
97145 +               /* Create xattr */
97146 +               if (!size) {
97147 +                       err = 0;
97148 +                       goto out;
97149 +               }
97151 +               err = ni_insert_resident(ni, sizeof(struct EA_INFO),
97152 +                                        ATTR_EA_INFO, NULL, 0, NULL, NULL);
97153 +               if (err)
97154 +                       goto out;
97156 +               err = ni_insert_resident(ni, 0, ATTR_EA, NULL, 0, NULL, NULL);
97157 +               if (err)
97158 +                       goto out;
97159 +       }
97161 +       new_sz = size;
97162 +       err = attr_set_size(ni, ATTR_EA, NULL, 0, &ea_run, new_sz, &new_sz,
97163 +                           false, NULL);
97164 +       if (err)
97165 +               goto out;
97167 +       le = NULL;
97168 +       attr = ni_find_attr(ni, NULL, &le, ATTR_EA_INFO, NULL, 0, NULL, &mi);
97169 +       if (!attr) {
97170 +               err = -EINVAL;
97171 +               goto out;
97172 +       }
97174 +       if (!size) {
97175 +               /* delete xattr, ATTR_EA_INFO */
97176 +               err = ni_remove_attr_le(ni, attr, le);
97177 +               if (err)
97178 +                       goto out;
97179 +       } else {
97180 +               p = resident_data_ex(attr, sizeof(struct EA_INFO));
97181 +               if (!p) {
97182 +                       err = -EINVAL;
97183 +                       goto out;
97184 +               }
97185 +               memcpy(p, &ea_info, sizeof(struct EA_INFO));
97186 +               mi->dirty = true;
97187 +       }
97189 +       le = NULL;
97190 +       attr = ni_find_attr(ni, NULL, &le, ATTR_EA, NULL, 0, NULL, &mi);
97191 +       if (!attr) {
97192 +               err = -EINVAL;
97193 +               goto out;
97194 +       }
97196 +       if (!size) {
97197 +               /* delete xattr, ATTR_EA */
97198 +               err = ni_remove_attr_le(ni, attr, le);
97199 +               if (err)
97200 +                       goto out;
97201 +       } else if (attr->non_res) {
97202 +               err = ntfs_sb_write_run(sbi, &ea_run, 0, ea_all, size);
97203 +               if (err)
97204 +                       goto out;
97205 +       } else {
97206 +               p = resident_data_ex(attr, size);
97207 +               if (!p) {
97208 +                       err = -EINVAL;
97209 +                       goto out;
97210 +               }
97211 +               memcpy(p, ea_all, size);
97212 +               mi->dirty = true;
97213 +       }
97215 +       /* Check if we delete the last xattr */
97216 +       if (size)
97217 +               ni->ni_flags |= NI_FLAG_EA;
97218 +       else
97219 +               ni->ni_flags &= ~NI_FLAG_EA;
97221 +       if (ea_info.size_pack != size_pack)
97222 +               ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
97223 +       mark_inode_dirty(&ni->vfs_inode);
97225 +out:
97226 +       if (!locked)
97227 +               ni_unlock(ni);
97229 +       run_close(&ea_run);
97230 +       ntfs_free(ea_all);
97232 +       return err;
97235 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
97236 +static inline void ntfs_posix_acl_release(struct posix_acl *acl)
97238 +       if (acl && refcount_dec_and_test(&acl->a_refcount))
97239 +               kfree(acl);
97242 +static struct posix_acl *ntfs_get_acl_ex(struct user_namespace *mnt_userns,
97243 +                                        struct inode *inode, int type,
97244 +                                        int locked)
97246 +       struct ntfs_inode *ni = ntfs_i(inode);
97247 +       const char *name;
97248 +       size_t name_len;
97249 +       struct posix_acl *acl;
97250 +       size_t req;
97251 +       int err;
97252 +       void *buf;
97254 +       /* allocate PATH_MAX bytes */
97255 +       buf = __getname();
97256 +       if (!buf)
97257 +               return ERR_PTR(-ENOMEM);
97259 +       /* Possible values of 'type' was already checked above */
97260 +       if (type == ACL_TYPE_ACCESS) {
97261 +               name = XATTR_NAME_POSIX_ACL_ACCESS;
97262 +               name_len = sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1;
97263 +       } else {
97264 +               name = XATTR_NAME_POSIX_ACL_DEFAULT;
97265 +               name_len = sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1;
97266 +       }
97268 +       if (!locked)
97269 +               ni_lock(ni);
97271 +       err = ntfs_get_ea(inode, name, name_len, buf, PATH_MAX, &req);
97273 +       if (!locked)
97274 +               ni_unlock(ni);
97276 +       /* Translate extended attribute to acl */
97277 +       if (err > 0) {
97278 +               acl = posix_acl_from_xattr(mnt_userns, buf, err);
97279 +               if (!IS_ERR(acl))
97280 +                       set_cached_acl(inode, type, acl);
97281 +       } else {
97282 +               acl = err == -ENODATA ? NULL : ERR_PTR(err);
97283 +       }
97285 +       __putname(buf);
97287 +       return acl;
97291 + * ntfs_get_acl
97292 + *
97293 + * inode_operations::get_acl
97294 + */
97295 +struct posix_acl *ntfs_get_acl(struct inode *inode, int type)
97297 +       /* TODO: init_user_ns? */
97298 +       return ntfs_get_acl_ex(&init_user_ns, inode, type, 0);
97301 +static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
97302 +                                   struct inode *inode, struct posix_acl *acl,
97303 +                                   int type, int locked)
97305 +       const char *name;
97306 +       size_t size, name_len;
97307 +       void *value = NULL;
97308 +       int err = 0;
97310 +       if (S_ISLNK(inode->i_mode))
97311 +               return -EOPNOTSUPP;
97313 +       switch (type) {
97314 +       case ACL_TYPE_ACCESS:
97315 +               if (acl) {
97316 +                       umode_t mode = inode->i_mode;
97318 +                       err = posix_acl_equiv_mode(acl, &mode);
97319 +                       if (err < 0)
97320 +                               return err;
97322 +                       if (inode->i_mode != mode) {
97323 +                               inode->i_mode = mode;
97324 +                               mark_inode_dirty(inode);
97325 +                       }
97327 +                       if (!err) {
97328 +                               /*
97329 +                                * acl can be exactly represented in the
97330 +                                * traditional file mode permission bits
97331 +                                */
97332 +                               acl = NULL;
97333 +                               goto out;
97334 +                       }
97335 +               }
97336 +               name = XATTR_NAME_POSIX_ACL_ACCESS;
97337 +               name_len = sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1;
97338 +               break;
97340 +       case ACL_TYPE_DEFAULT:
97341 +               if (!S_ISDIR(inode->i_mode))
97342 +                       return acl ? -EACCES : 0;
97343 +               name = XATTR_NAME_POSIX_ACL_DEFAULT;
97344 +               name_len = sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1;
97345 +               break;
97347 +       default:
97348 +               return -EINVAL;
97349 +       }
97351 +       if (!acl)
97352 +               goto out;
97354 +       size = posix_acl_xattr_size(acl->a_count);
97355 +       value = ntfs_malloc(size);
97356 +       if (!value)
97357 +               return -ENOMEM;
97359 +       err = posix_acl_to_xattr(mnt_userns, acl, value, size);
97360 +       if (err)
97361 +               goto out;
97363 +       err = ntfs_set_ea(inode, name, name_len, value, size, 0, locked);
97364 +       if (err)
97365 +               goto out;
97367 +       inode->i_flags &= ~S_NOSEC;
97369 +out:
97370 +       if (!err)
97371 +               set_cached_acl(inode, type, acl);
97373 +       kfree(value);
97375 +       return err;
97379 + * ntfs_set_acl
97380 + *
97381 + * inode_operations::set_acl
97382 + */
97383 +int ntfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
97384 +                struct posix_acl *acl, int type)
97386 +       return ntfs_set_acl_ex(mnt_userns, inode, acl, type, 0);
97389 +static int ntfs_xattr_get_acl(struct user_namespace *mnt_userns,
97390 +                             struct inode *inode, int type, void *buffer,
97391 +                             size_t size)
97393 +       struct posix_acl *acl;
97394 +       int err;
97396 +       if (!(inode->i_sb->s_flags & SB_POSIXACL))
97397 +               return -EOPNOTSUPP;
97399 +       acl = ntfs_get_acl(inode, type);
97400 +       if (IS_ERR(acl))
97401 +               return PTR_ERR(acl);
97403 +       if (!acl)
97404 +               return -ENODATA;
97406 +       err = posix_acl_to_xattr(mnt_userns, acl, buffer, size);
97407 +       ntfs_posix_acl_release(acl);
97409 +       return err;
97412 +static int ntfs_xattr_set_acl(struct user_namespace *mnt_userns,
97413 +                             struct inode *inode, int type, const void *value,
97414 +                             size_t size)
97416 +       struct posix_acl *acl;
97417 +       int err;
97419 +       if (!(inode->i_sb->s_flags & SB_POSIXACL))
97420 +               return -EOPNOTSUPP;
97422 +       if (!inode_owner_or_capable(mnt_userns, inode))
97423 +               return -EPERM;
97425 +       if (!value)
97426 +               return 0;
97428 +       acl = posix_acl_from_xattr(mnt_userns, value, size);
97429 +       if (IS_ERR(acl))
97430 +               return PTR_ERR(acl);
97432 +       if (acl) {
97433 +               err = posix_acl_valid(mnt_userns, acl);
97434 +               if (err)
97435 +                       goto release_and_out;
97436 +       }
97438 +       err = ntfs_set_acl(mnt_userns, inode, acl, type);
97440 +release_and_out:
97441 +       ntfs_posix_acl_release(acl);
97442 +       return err;
97446 + * Initialize the ACLs of a new inode. Called from ntfs_create_inode.
97447 + */
97448 +int ntfs_init_acl(struct user_namespace *mnt_userns, struct inode *inode,
97449 +                 struct inode *dir)
97451 +       struct posix_acl *default_acl, *acl;
97452 +       int err;
97454 +       /*
97455 +        * TODO refactoring lock
97456 +        * ni_lock(dir) ... -> posix_acl_create(dir,...) -> ntfs_get_acl -> ni_lock(dir)
97457 +        */
97458 +       inode->i_default_acl = NULL;
97460 +       default_acl = ntfs_get_acl_ex(mnt_userns, dir, ACL_TYPE_DEFAULT, 1);
97462 +       if (!default_acl || default_acl == ERR_PTR(-EOPNOTSUPP)) {
97463 +               inode->i_mode &= ~current_umask();
97464 +               err = 0;
97465 +               goto out;
97466 +       }
97468 +       if (IS_ERR(default_acl)) {
97469 +               err = PTR_ERR(default_acl);
97470 +               goto out;
97471 +       }
97473 +       acl = default_acl;
97474 +       err = __posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
97475 +       if (err < 0)
97476 +               goto out1;
97477 +       if (!err) {
97478 +               posix_acl_release(acl);
97479 +               acl = NULL;
97480 +       }
97482 +       if (!S_ISDIR(inode->i_mode)) {
97483 +               posix_acl_release(default_acl);
97484 +               default_acl = NULL;
97485 +       }
97487 +       if (default_acl)
97488 +               err = ntfs_set_acl_ex(mnt_userns, inode, default_acl,
97489 +                                     ACL_TYPE_DEFAULT, 1);
97491 +       if (!acl)
97492 +               inode->i_acl = NULL;
97493 +       else if (!err)
97494 +               err = ntfs_set_acl_ex(mnt_userns, inode, acl, ACL_TYPE_ACCESS,
97495 +                                     1);
97497 +       posix_acl_release(acl);
97498 +out1:
97499 +       posix_acl_release(default_acl);
97501 +out:
97502 +       return err;
97504 +#endif
97507 + * ntfs_acl_chmod
97508 + *
97509 + * helper for 'ntfs3_setattr'
97510 + */
97511 +int ntfs_acl_chmod(struct user_namespace *mnt_userns, struct inode *inode)
97513 +       struct super_block *sb = inode->i_sb;
97515 +       if (!(sb->s_flags & SB_POSIXACL))
97516 +               return 0;
97518 +       if (S_ISLNK(inode->i_mode))
97519 +               return -EOPNOTSUPP;
97521 +       return posix_acl_chmod(mnt_userns, inode, inode->i_mode);
97525 + * ntfs_permission
97526 + *
97527 + * inode_operations::permission
97528 + */
97529 +int ntfs_permission(struct user_namespace *mnt_userns, struct inode *inode,
97530 +                   int mask)
97532 +       if (ntfs_sb(inode->i_sb)->options.no_acs_rules) {
97533 +               /* "no access rules" mode - allow all changes */
97534 +               return 0;
97535 +       }
97537 +       return generic_permission(mnt_userns, inode, mask);
97541 + * ntfs_listxattr
97542 + *
97543 + * inode_operations::listxattr
97544 + */
97545 +ssize_t ntfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
97547 +       struct inode *inode = d_inode(dentry);
97548 +       struct ntfs_inode *ni = ntfs_i(inode);
97549 +       ssize_t ret;
97551 +       if (!(ni->ni_flags & NI_FLAG_EA)) {
97552 +               /* no xattr in file */
97553 +               return 0;
97554 +       }
97556 +       ni_lock(ni);
97558 +       ret = ntfs_list_ea(ni, buffer, size);
97560 +       ni_unlock(ni);
97562 +       return ret;
97565 +static int ntfs_getxattr(const struct xattr_handler *handler, struct dentry *de,
97566 +                        struct inode *inode, const char *name, void *buffer,
97567 +                        size_t size)
97569 +       int err;
97570 +       struct ntfs_inode *ni = ntfs_i(inode);
97571 +       size_t name_len = strlen(name);
97573 +       /* Dispatch request */
97574 +       if (name_len == sizeof(SYSTEM_DOS_ATTRIB) - 1 &&
97575 +           !memcmp(name, SYSTEM_DOS_ATTRIB, sizeof(SYSTEM_DOS_ATTRIB))) {
97576 +               /* system.dos_attrib */
97577 +               if (!buffer) {
97578 +                       err = sizeof(u8);
97579 +               } else if (size < sizeof(u8)) {
97580 +                       err = -ENODATA;
97581 +               } else {
97582 +                       err = sizeof(u8);
97583 +                       *(u8 *)buffer = le32_to_cpu(ni->std_fa);
97584 +               }
97585 +               goto out;
97586 +       }
97588 +       if (name_len == sizeof(SYSTEM_NTFS_ATTRIB) - 1 &&
97589 +           !memcmp(name, SYSTEM_NTFS_ATTRIB, sizeof(SYSTEM_NTFS_ATTRIB))) {
97590 +               /* system.ntfs_attrib */
97591 +               if (!buffer) {
97592 +                       err = sizeof(u32);
97593 +               } else if (size < sizeof(u32)) {
97594 +                       err = -ENODATA;
97595 +               } else {
97596 +                       err = sizeof(u32);
97597 +                       *(u32 *)buffer = le32_to_cpu(ni->std_fa);
97598 +               }
97599 +               goto out;
97600 +       }
97602 +       if (name_len == sizeof(SYSTEM_NTFS_SECURITY) - 1 &&
97603 +           !memcmp(name, SYSTEM_NTFS_SECURITY, sizeof(SYSTEM_NTFS_SECURITY))) {
97604 +               /* system.ntfs_security*/
97605 +               struct SECURITY_DESCRIPTOR_RELATIVE *sd = NULL;
97606 +               size_t sd_size = 0;
97608 +               if (!is_ntfs3(ni->mi.sbi)) {
97609 +                       /* we should get nt4 security */
97610 +                       err = -EINVAL;
97611 +                       goto out;
97612 +               } else if (le32_to_cpu(ni->std_security_id) <
97613 +                          SECURITY_ID_FIRST) {
97614 +                       err = -ENOENT;
97615 +                       goto out;
97616 +               }
97618 +               err = ntfs_get_security_by_id(ni->mi.sbi, ni->std_security_id,
97619 +                                             &sd, &sd_size);
97620 +               if (err)
97621 +                       goto out;
97623 +               if (!is_sd_valid(sd, sd_size)) {
97624 +                       ntfs_inode_warn(
97625 +                               inode,
97626 +                               "looks like you get incorrect security descriptor id=%u",
97627 +                               ni->std_security_id);
97628 +               }
97630 +               if (!buffer) {
97631 +                       err = sd_size;
97632 +               } else if (size < sd_size) {
97633 +                       err = -ENODATA;
97634 +               } else {
97635 +                       err = sd_size;
97636 +                       memcpy(buffer, sd, sd_size);
97637 +               }
97638 +               ntfs_free(sd);
97639 +               goto out;
97640 +       }
97642 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
97643 +       if ((name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1 &&
97644 +            !memcmp(name, XATTR_NAME_POSIX_ACL_ACCESS,
97645 +                    sizeof(XATTR_NAME_POSIX_ACL_ACCESS))) ||
97646 +           (name_len == sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1 &&
97647 +            !memcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT,
97648 +                    sizeof(XATTR_NAME_POSIX_ACL_DEFAULT)))) {
97649 +               /* TODO: init_user_ns? */
97650 +               err = ntfs_xattr_get_acl(
97651 +                       &init_user_ns, inode,
97652 +                       name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1
97653 +                               ? ACL_TYPE_ACCESS
97654 +                               : ACL_TYPE_DEFAULT,
97655 +                       buffer, size);
97656 +               goto out;
97657 +       }
97658 +#endif
97659 +       /* deal with ntfs extended attribute */
97660 +       err = ntfs_get_ea(inode, name, name_len, buffer, size, NULL);
97662 +out:
97663 +       return err;
97667 + * ntfs_setxattr
97668 + *
97669 + * inode_operations::setxattr
97670 + */
97671 +static noinline int ntfs_setxattr(const struct xattr_handler *handler,
97672 +                                 struct user_namespace *mnt_userns,
97673 +                                 struct dentry *de, struct inode *inode,
97674 +                                 const char *name, const void *value,
97675 +                                 size_t size, int flags)
97677 +       int err = -EINVAL;
97678 +       struct ntfs_inode *ni = ntfs_i(inode);
97679 +       size_t name_len = strlen(name);
97680 +       enum FILE_ATTRIBUTE new_fa;
97682 +       /* Dispatch request */
97683 +       if (name_len == sizeof(SYSTEM_DOS_ATTRIB) - 1 &&
97684 +           !memcmp(name, SYSTEM_DOS_ATTRIB, sizeof(SYSTEM_DOS_ATTRIB))) {
97685 +               if (sizeof(u8) != size)
97686 +                       goto out;
97687 +               new_fa = cpu_to_le32(*(u8 *)value);
97688 +               goto set_new_fa;
97689 +       }
97691 +       if (name_len == sizeof(SYSTEM_NTFS_ATTRIB) - 1 &&
97692 +           !memcmp(name, SYSTEM_NTFS_ATTRIB, sizeof(SYSTEM_NTFS_ATTRIB))) {
97693 +               if (size != sizeof(u32))
97694 +                       goto out;
97695 +               new_fa = cpu_to_le32(*(u32 *)value);
97697 +               if (S_ISREG(inode->i_mode)) {
97698 +                       /* Process compressed/sparsed in special way*/
97699 +                       ni_lock(ni);
97700 +                       err = ni_new_attr_flags(ni, new_fa);
97701 +                       ni_unlock(ni);
97702 +                       if (err)
97703 +                               goto out;
97704 +               }
97705 +set_new_fa:
97706 +               /*
97707 +                * Thanks Mark Harmstone:
97708 +                * keep directory bit consistency
97709 +                */
97710 +               if (S_ISDIR(inode->i_mode))
97711 +                       new_fa |= FILE_ATTRIBUTE_DIRECTORY;
97712 +               else
97713 +                       new_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
97715 +               if (ni->std_fa != new_fa) {
97716 +                       ni->std_fa = new_fa;
97717 +                       if (new_fa & FILE_ATTRIBUTE_READONLY)
97718 +                               inode->i_mode &= ~0222;
97719 +                       else
97720 +                               inode->i_mode |= 0222;
97721 +                       /* std attribute always in primary record */
97722 +                       ni->mi.dirty = true;
97723 +                       mark_inode_dirty(inode);
97724 +               }
97725 +               err = 0;
97727 +               goto out;
97728 +       }
97730 +       if (name_len == sizeof(SYSTEM_NTFS_SECURITY) - 1 &&
97731 +           !memcmp(name, SYSTEM_NTFS_SECURITY, sizeof(SYSTEM_NTFS_SECURITY))) {
97732 +               /* system.ntfs_security*/
97733 +               __le32 security_id;
97734 +               bool inserted;
97735 +               struct ATTR_STD_INFO5 *std;
97737 +               if (!is_ntfs3(ni->mi.sbi)) {
97738 +                       /*
97739 +                        * we should replace ATTR_SECURE
97740 +                        * Skip this way cause it is nt4 feature
97741 +                        */
97742 +                       err = -EINVAL;
97743 +                       goto out;
97744 +               }
97746 +               if (!is_sd_valid(value, size)) {
97747 +                       err = -EINVAL;
97748 +                       ntfs_inode_warn(
97749 +                               inode,
97750 +                               "you try to set invalid security descriptor");
97751 +                       goto out;
97752 +               }
97754 +               err = ntfs_insert_security(ni->mi.sbi, value, size,
97755 +                                          &security_id, &inserted);
97756 +               if (err)
97757 +                       goto out;
97759 +               ni_lock(ni);
97760 +               std = ni_std5(ni);
97761 +               if (!std) {
97762 +                       err = -EINVAL;
97763 +               } else if (std->security_id != security_id) {
97764 +                       std->security_id = ni->std_security_id = security_id;
97765 +                       /* std attribute always in primary record */
97766 +                       ni->mi.dirty = true;
97767 +                       mark_inode_dirty(&ni->vfs_inode);
97768 +               }
97769 +               ni_unlock(ni);
97770 +               goto out;
97771 +       }
97773 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
97774 +       if ((name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1 &&
97775 +            !memcmp(name, XATTR_NAME_POSIX_ACL_ACCESS,
97776 +                    sizeof(XATTR_NAME_POSIX_ACL_ACCESS))) ||
97777 +           (name_len == sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1 &&
97778 +            !memcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT,
97779 +                    sizeof(XATTR_NAME_POSIX_ACL_DEFAULT)))) {
97780 +               /* TODO: init_user_ns? */
97781 +               err = ntfs_xattr_set_acl(
97782 +                       &init_user_ns, inode,
97783 +                       name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1
97784 +                               ? ACL_TYPE_ACCESS
97785 +                               : ACL_TYPE_DEFAULT,
97786 +                       value, size);
97787 +               goto out;
97788 +       }
97789 +#endif
97790 +       /* deal with ntfs extended attribute */
97791 +       err = ntfs_set_ea(inode, name, name_len, value, size, flags, 0);
97793 +out:
97794 +       return err;
97797 +static bool ntfs_xattr_user_list(struct dentry *dentry)
97799 +       return true;
97802 +static const struct xattr_handler ntfs_xattr_handler = {
97803 +       .prefix = "",
97804 +       .get = ntfs_getxattr,
97805 +       .set = ntfs_setxattr,
97806 +       .list = ntfs_xattr_user_list,
97809 +const struct xattr_handler *ntfs_xattr_handlers[] = {
97810 +       &ntfs_xattr_handler,
97811 +       NULL,
97813 diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
97814 index 0b2891c6c71e..2846b943e80c 100644
97815 --- a/fs/overlayfs/copy_up.c
97816 +++ b/fs/overlayfs/copy_up.c
97817 @@ -932,7 +932,7 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
97818  static int ovl_copy_up_flags(struct dentry *dentry, int flags)
97820         int err = 0;
97821 -       const struct cred *old_cred = ovl_override_creds(dentry->d_sb);
97822 +       const struct cred *old_cred;
97823         bool disconnected = (dentry->d_flags & DCACHE_DISCONNECTED);
97825         /*
97826 @@ -943,6 +943,7 @@ static int ovl_copy_up_flags(struct dentry *dentry, int flags)
97827         if (WARN_ON(disconnected && d_is_dir(dentry)))
97828                 return -EIO;
97830 +       old_cred = ovl_override_creds(dentry->d_sb);
97831         while (!err) {
97832                 struct dentry *next;
97833                 struct dentry *parent = NULL;
97834 diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
97835 index 3fe05fb5d145..71e264e2f16b 100644
97836 --- a/fs/overlayfs/namei.c
97837 +++ b/fs/overlayfs/namei.c
97838 @@ -919,6 +919,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
97839                         continue;
97841                 if ((uppermetacopy || d.metacopy) && !ofs->config.metacopy) {
97842 +                       dput(this);
97843                         err = -EPERM;
97844                         pr_warn_ratelimited("refusing to follow metacopy origin for (%pd2)\n", dentry);
97845                         goto out_put;
97846 diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
97847 index 95cff83786a5..2322f854533c 100644
97848 --- a/fs/overlayfs/overlayfs.h
97849 +++ b/fs/overlayfs/overlayfs.h
97850 @@ -319,9 +319,6 @@ int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry,
97851                        enum ovl_xattr ox, const void *value, size_t size,
97852                        int xerr);
97853  int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry);
97854 -void ovl_set_flag(unsigned long flag, struct inode *inode);
97855 -void ovl_clear_flag(unsigned long flag, struct inode *inode);
97856 -bool ovl_test_flag(unsigned long flag, struct inode *inode);
97857  bool ovl_inuse_trylock(struct dentry *dentry);
97858  void ovl_inuse_unlock(struct dentry *dentry);
97859  bool ovl_is_inuse(struct dentry *dentry);
97860 @@ -335,6 +332,21 @@ char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct dentry *dentry,
97861                              int padding);
97862  int ovl_sync_status(struct ovl_fs *ofs);
97864 +static inline void ovl_set_flag(unsigned long flag, struct inode *inode)
97866 +       set_bit(flag, &OVL_I(inode)->flags);
97869 +static inline void ovl_clear_flag(unsigned long flag, struct inode *inode)
97871 +       clear_bit(flag, &OVL_I(inode)->flags);
97874 +static inline bool ovl_test_flag(unsigned long flag, struct inode *inode)
97876 +       return test_bit(flag, &OVL_I(inode)->flags);
97879  static inline bool ovl_is_impuredir(struct super_block *sb,
97880                                     struct dentry *dentry)
97882 @@ -439,6 +451,18 @@ int ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
97883                         struct dentry *dentry, int level);
97884  int ovl_indexdir_cleanup(struct ovl_fs *ofs);
97887 + * Can we iterate real dir directly?
97888 + *
97889 + * Non-merge dir may contain whiteouts from a time it was a merge upper, before
97890 + * lower dir was removed under it and possibly before it was rotated from upper
97891 + * to lower layer.
97892 + */
97893 +static inline bool ovl_dir_is_real(struct dentry *dir)
97895 +       return !ovl_test_flag(OVL_WHITEOUTS, d_inode(dir));
97898  /* inode.c */
97899  int ovl_set_nlink_upper(struct dentry *dentry);
97900  int ovl_set_nlink_lower(struct dentry *dentry);
97901 diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
97902 index f404a78e6b60..cc1e80257064 100644
97903 --- a/fs/overlayfs/readdir.c
97904 +++ b/fs/overlayfs/readdir.c
97905 @@ -319,18 +319,6 @@ static inline int ovl_dir_read(struct path *realpath,
97906         return err;
97910 - * Can we iterate real dir directly?
97911 - *
97912 - * Non-merge dir may contain whiteouts from a time it was a merge upper, before
97913 - * lower dir was removed under it and possibly before it was rotated from upper
97914 - * to lower layer.
97915 - */
97916 -static bool ovl_dir_is_real(struct dentry *dir)
97918 -       return !ovl_test_flag(OVL_WHITEOUTS, d_inode(dir));
97921  static void ovl_dir_reset(struct file *file)
97923         struct ovl_dir_file *od = file->private_data;
97924 diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
97925 index fdd72f1a9c5e..787ce7c38fba 100644
97926 --- a/fs/overlayfs/super.c
97927 +++ b/fs/overlayfs/super.c
97928 @@ -380,6 +380,8 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
97929                            ofs->config.metacopy ? "on" : "off");
97930         if (ofs->config.ovl_volatile)
97931                 seq_puts(m, ",volatile");
97932 +       if (ofs->config.userxattr)
97933 +               seq_puts(m, ",userxattr");
97934         return 0;
97937 @@ -1826,7 +1828,8 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
97938   * - upper/work dir of any overlayfs instance
97939   */
97940  static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
97941 -                          struct dentry *dentry, const char *name)
97942 +                          struct dentry *dentry, const char *name,
97943 +                          bool is_lower)
97945         struct dentry *next = dentry, *parent;
97946         int err = 0;
97947 @@ -1838,7 +1841,7 @@ static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
97949         /* Walk back ancestors to root (inclusive) looking for traps */
97950         while (!err && parent != next) {
97951 -               if (ovl_lookup_trap_inode(sb, parent)) {
97952 +               if (is_lower && ovl_lookup_trap_inode(sb, parent)) {
97953                         err = -ELOOP;
97954                         pr_err("overlapping %s path\n", name);
97955                 } else if (ovl_is_inuse(parent)) {
97956 @@ -1864,7 +1867,7 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
97958         if (ovl_upper_mnt(ofs)) {
97959                 err = ovl_check_layer(sb, ofs, ovl_upper_mnt(ofs)->mnt_root,
97960 -                                     "upperdir");
97961 +                                     "upperdir", false);
97962                 if (err)
97963                         return err;
97965 @@ -1875,7 +1878,8 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
97966                  * workbasedir.  In that case, we already have their traps in
97967                  * inode cache and we will catch that case on lookup.
97968                  */
97969 -               err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir");
97970 +               err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir",
97971 +                                     false);
97972                 if (err)
97973                         return err;
97974         }
97975 @@ -1883,7 +1887,7 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
97976         for (i = 1; i < ofs->numlayer; i++) {
97977                 err = ovl_check_layer(sb, ofs,
97978                                       ofs->layers[i].mnt->mnt_root,
97979 -                                     "lowerdir");
97980 +                                     "lowerdir", true);
97981                 if (err)
97982                         return err;
97983         }
97984 diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
97985 index 7f5a01a11f97..404a0a32ddf6 100644
97986 --- a/fs/overlayfs/util.c
97987 +++ b/fs/overlayfs/util.c
97988 @@ -422,18 +422,20 @@ void ovl_inode_update(struct inode *inode, struct dentry *upperdentry)
97989         }
97992 -static void ovl_dentry_version_inc(struct dentry *dentry, bool impurity)
97993 +static void ovl_dir_version_inc(struct dentry *dentry, bool impurity)
97995         struct inode *inode = d_inode(dentry);
97997         WARN_ON(!inode_is_locked(inode));
97998 +       WARN_ON(!d_is_dir(dentry));
97999         /*
98000 -        * Version is used by readdir code to keep cache consistent.  For merge
98001 -        * dirs all changes need to be noted.  For non-merge dirs, cache only
98002 -        * contains impure (ones which have been copied up and have origins)
98003 -        * entries, so only need to note changes to impure entries.
98004 +        * Version is used by readdir code to keep cache consistent.
98005 +        * For merge dirs (or dirs with origin) all changes need to be noted.
98006 +        * For non-merge dirs, cache contains only impure entries (i.e. ones
98007 +        * which have been copied up and have origins), so only need to note
98008 +        * changes to impure entries.
98009          */
98010 -       if (OVL_TYPE_MERGE(ovl_path_type(dentry)) || impurity)
98011 +       if (!ovl_dir_is_real(dentry) || impurity)
98012                 OVL_I(inode)->version++;
98015 @@ -442,7 +444,7 @@ void ovl_dir_modified(struct dentry *dentry, bool impurity)
98016         /* Copy mtime/ctime */
98017         ovl_copyattr(d_inode(ovl_dentry_upper(dentry)), d_inode(dentry));
98019 -       ovl_dentry_version_inc(dentry, impurity);
98020 +       ovl_dir_version_inc(dentry, impurity);
98023  u64 ovl_dentry_version_get(struct dentry *dentry)
98024 @@ -638,21 +640,6 @@ int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry)
98025         return err;
98028 -void ovl_set_flag(unsigned long flag, struct inode *inode)
98030 -       set_bit(flag, &OVL_I(inode)->flags);
98033 -void ovl_clear_flag(unsigned long flag, struct inode *inode)
98035 -       clear_bit(flag, &OVL_I(inode)->flags);
98038 -bool ovl_test_flag(unsigned long flag, struct inode *inode)
98040 -       return test_bit(flag, &OVL_I(inode)->flags);
98043  /**
98044   * Caller must hold a reference to inode to prevent it from being freed while
98045   * it is marked inuse.
98046 diff --git a/fs/proc/array.c b/fs/proc/array.c
98047 index bb87e4d89cd8..7ec59171f197 100644
98048 --- a/fs/proc/array.c
98049 +++ b/fs/proc/array.c
98050 @@ -342,8 +342,10 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
98051         seq_put_decimal_ull(m, "NoNewPrivs:\t", task_no_new_privs(p));
98052  #ifdef CONFIG_SECCOMP
98053         seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode);
98054 +#ifdef CONFIG_SECCOMP_FILTER
98055         seq_put_decimal_ull(m, "\nSeccomp_filters:\t",
98056                             atomic_read(&p->seccomp.filter_count));
98057 +#endif
98058  #endif
98059         seq_puts(m, "\nSpeculation_Store_Bypass:\t");
98060         switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
98061 diff --git a/fs/proc/generic.c b/fs/proc/generic.c
98062 index bc86aa87cc41..5600da30e289 100644
98063 --- a/fs/proc/generic.c
98064 +++ b/fs/proc/generic.c
98065 @@ -756,7 +756,7 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
98066         while (1) {
98067                 next = pde_subdir_first(de);
98068                 if (next) {
98069 -                       if (unlikely(pde_is_permanent(root))) {
98070 +                       if (unlikely(pde_is_permanent(next))) {
98071                                 write_unlock(&proc_subdir_lock);
98072                                 WARN(1, "removing permanent /proc entry '%s/%s'",
98073                                         next->parent->name, next->name);
98074 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
98075 index e862cab69583..d292f20c4e3d 100644
98076 --- a/fs/proc/task_mmu.c
98077 +++ b/fs/proc/task_mmu.c
98078 @@ -19,6 +19,7 @@
98079  #include <linux/shmem_fs.h>
98080  #include <linux/uaccess.h>
98081  #include <linux/pkeys.h>
98082 +#include <linux/mm_inline.h>
98084  #include <asm/elf.h>
98085  #include <asm/tlb.h>
98086 @@ -1718,7 +1719,7 @@ static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
98087         if (PageSwapCache(page))
98088                 md->swapcache += nr_pages;
98090 -       if (PageActive(page) || PageUnevictable(page))
98091 +       if (PageUnevictable(page) || page_is_active(compound_head(page), NULL))
98092                 md->active += nr_pages;
98094         if (PageWriteback(page))
98095 diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
98096 index d963ae7902f9..67b194ba1b03 100644
98097 --- a/fs/pstore/platform.c
98098 +++ b/fs/pstore/platform.c
98099 @@ -218,7 +218,7 @@ static int zbufsize_842(size_t size)
98100  #if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
98101  static int zbufsize_zstd(size_t size)
98103 -       return ZSTD_compressBound(size);
98104 +       return zstd_compress_bound(size);
98106  #endif
98108 diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
98109 index 7b1128398976..89d492916dea 100644
98110 --- a/fs/squashfs/file.c
98111 +++ b/fs/squashfs/file.c
98112 @@ -211,11 +211,11 @@ static long long read_indexes(struct super_block *sb, int n,
98113   * If the skip factor is limited in this way then the file will use multiple
98114   * slots.
98115   */
98116 -static inline int calculate_skip(int blocks)
98117 +static inline int calculate_skip(u64 blocks)
98119 -       int skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
98120 +       u64 skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
98121                  * SQUASHFS_META_INDEXES);
98122 -       return min(SQUASHFS_CACHED_BLKS - 1, skip + 1);
98123 +       return min((u64) SQUASHFS_CACHED_BLKS - 1, skip + 1);
98127 diff --git a/fs/squashfs/zstd_wrapper.c b/fs/squashfs/zstd_wrapper.c
98128 index b7cb1faa652d..6967c0aae801 100644
98129 --- a/fs/squashfs/zstd_wrapper.c
98130 +++ b/fs/squashfs/zstd_wrapper.c
98131 @@ -34,7 +34,7 @@ static void *zstd_init(struct squashfs_sb_info *msblk, void *buff)
98132                 goto failed;
98133         wksp->window_size = max_t(size_t,
98134                         msblk->block_size, SQUASHFS_METADATA_SIZE);
98135 -       wksp->mem_size = ZSTD_DStreamWorkspaceBound(wksp->window_size);
98136 +       wksp->mem_size = zstd_dstream_workspace_bound(wksp->window_size);
98137         wksp->mem = vmalloc(wksp->mem_size);
98138         if (wksp->mem == NULL)
98139                 goto failed;
98140 @@ -63,15 +63,15 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
98141         struct squashfs_page_actor *output)
98143         struct workspace *wksp = strm;
98144 -       ZSTD_DStream *stream;
98145 +       zstd_dstream *stream;
98146         size_t total_out = 0;
98147         int error = 0;
98148 -       ZSTD_inBuffer in_buf = { NULL, 0, 0 };
98149 -       ZSTD_outBuffer out_buf = { NULL, 0, 0 };
98150 +       zstd_in_buffer in_buf = { NULL, 0, 0 };
98151 +       zstd_out_buffer out_buf = { NULL, 0, 0 };
98152         struct bvec_iter_all iter_all = {};
98153         struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
98155 -       stream = ZSTD_initDStream(wksp->window_size, wksp->mem, wksp->mem_size);
98156 +       stream = zstd_init_dstream(wksp->window_size, wksp->mem, wksp->mem_size);
98158         if (!stream) {
98159                 ERROR("Failed to initialize zstd decompressor\n");
98160 @@ -116,14 +116,14 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
98161                 }
98163                 total_out -= out_buf.pos;
98164 -               zstd_err = ZSTD_decompressStream(stream, &out_buf, &in_buf);
98165 +               zstd_err = zstd_decompress_stream(stream, &out_buf, &in_buf);
98166                 total_out += out_buf.pos; /* add the additional data produced */
98167                 if (zstd_err == 0)
98168                         break;
98170 -               if (ZSTD_isError(zstd_err)) {
98171 +               if (zstd_is_error(zstd_err)) {
98172                         ERROR("zstd decompression error: %d\n",
98173 -                                       (int)ZSTD_getErrorCode(zstd_err));
98174 +                                       (int)zstd_get_error_code(zstd_err));
98175                         error = -EIO;
98176                         break;
98177                 }
98178 diff --git a/fs/stat.c b/fs/stat.c
98179 index fbc171d038aa..1fa38bdec1a6 100644
98180 --- a/fs/stat.c
98181 +++ b/fs/stat.c
98182 @@ -86,12 +86,20 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
98183         /* SB_NOATIME means filesystem supplies dummy atime value */
98184         if (inode->i_sb->s_flags & SB_NOATIME)
98185                 stat->result_mask &= ~STATX_ATIME;
98187 +       /*
98188 +        * Note: If you add another clause to set an attribute flag, please
98189 +        * update attributes_mask below.
98190 +        */
98191         if (IS_AUTOMOUNT(inode))
98192                 stat->attributes |= STATX_ATTR_AUTOMOUNT;
98194         if (IS_DAX(inode))
98195                 stat->attributes |= STATX_ATTR_DAX;
98197 +       stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT |
98198 +                                 STATX_ATTR_DAX);
98200         mnt_userns = mnt_user_ns(path->mnt);
98201         if (inode->i_op->getattr)
98202                 return inode->i_op->getattr(mnt_userns, path, stat,
98203 diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
98204 index 0f8a6a16421b..1929ec63a0cb 100644
98205 --- a/fs/ubifs/replay.c
98206 +++ b/fs/ubifs/replay.c
98207 @@ -223,7 +223,8 @@ static bool inode_still_linked(struct ubifs_info *c, struct replay_entry *rino)
98208          */
98209         list_for_each_entry_reverse(r, &c->replay_list, list) {
98210                 ubifs_assert(c, r->sqnum >= rino->sqnum);
98211 -               if (key_inum(c, &r->key) == key_inum(c, &rino->key))
98212 +               if (key_inum(c, &r->key) == key_inum(c, &rino->key) &&
98213 +                   key_type(c, &r->key) == UBIFS_INO_KEY)
98214                         return r->deletion == 0;
98216         }
98217 diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
98218 index 472b3039eabb..902e5f7e6642 100644
98219 --- a/fs/xfs/libxfs/xfs_attr.c
98220 +++ b/fs/xfs/libxfs/xfs_attr.c
98221 @@ -928,6 +928,7 @@ xfs_attr_node_addname(
98222          * Search to see if name already exists, and get back a pointer
98223          * to where it should go.
98224          */
98225 +       error = 0;
98226         retval = xfs_attr_node_hasname(args, &state);
98227         if (retval != -ENOATTR && retval != -EEXIST)
98228                 goto out;
98229 diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
98230 index fcde59c65a81..cb3d6b1c655d 100644
98231 --- a/include/crypto/acompress.h
98232 +++ b/include/crypto/acompress.h
98233 @@ -165,6 +165,8 @@ static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
98234   * crypto_free_acomp() -- free ACOMPRESS tfm handle
98235   *
98236   * @tfm:       ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
98237 + *
98238 + * If @tfm is a NULL or error pointer, this function does nothing.
98239   */
98240  static inline void crypto_free_acomp(struct crypto_acomp *tfm)
98242 diff --git a/include/crypto/aead.h b/include/crypto/aead.h
98243 index fcc12c593ef8..e728469c4ccc 100644
98244 --- a/include/crypto/aead.h
98245 +++ b/include/crypto/aead.h
98246 @@ -185,6 +185,8 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
98247  /**
98248   * crypto_free_aead() - zeroize and free aead handle
98249   * @tfm: cipher handle to be freed
98250 + *
98251 + * If @tfm is a NULL or error pointer, this function does nothing.
98252   */
98253  static inline void crypto_free_aead(struct crypto_aead *tfm)
98255 diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
98256 index 1d3aa252caba..5764b46bd1ec 100644
98257 --- a/include/crypto/akcipher.h
98258 +++ b/include/crypto/akcipher.h
98259 @@ -174,6 +174,8 @@ static inline struct crypto_akcipher *crypto_akcipher_reqtfm(
98260   * crypto_free_akcipher() - free AKCIPHER tfm handle
98261   *
98262   * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
98263 + *
98264 + * If @tfm is a NULL or error pointer, this function does nothing.
98265   */
98266  static inline void crypto_free_akcipher(struct crypto_akcipher *tfm)
98268 diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
98269 index 3a1c72fdb7cf..dabaee698718 100644
98270 --- a/include/crypto/chacha.h
98271 +++ b/include/crypto/chacha.h
98272 @@ -47,13 +47,18 @@ static inline void hchacha_block(const u32 *state, u32 *out, int nrounds)
98273                 hchacha_block_generic(state, out, nrounds);
98276 -void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
98277 -static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
98278 +static inline void chacha_init_consts(u32 *state)
98280         state[0]  = 0x61707865; /* "expa" */
98281         state[1]  = 0x3320646e; /* "nd 3" */
98282         state[2]  = 0x79622d32; /* "2-by" */
98283         state[3]  = 0x6b206574; /* "te k" */
98286 +void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
98287 +static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
98289 +       chacha_init_consts(state);
98290         state[4]  = key[0];
98291         state[5]  = key[1];
98292         state[6]  = key[2];
98293 diff --git a/include/crypto/hash.h b/include/crypto/hash.h
98294 index 13f8a6a54ca8..b2bc1e46e86a 100644
98295 --- a/include/crypto/hash.h
98296 +++ b/include/crypto/hash.h
98297 @@ -281,6 +281,8 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
98298  /**
98299   * crypto_free_ahash() - zeroize and free the ahash handle
98300   * @tfm: cipher handle to be freed
98301 + *
98302 + * If @tfm is a NULL or error pointer, this function does nothing.
98303   */
98304  static inline void crypto_free_ahash(struct crypto_ahash *tfm)
98306 @@ -724,6 +726,8 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
98307  /**
98308   * crypto_free_shash() - zeroize and free the message digest handle
98309   * @tfm: cipher handle to be freed
98310 + *
98311 + * If @tfm is a NULL or error pointer, this function does nothing.
98312   */
98313  static inline void crypto_free_shash(struct crypto_shash *tfm)
98315 diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h
98316 index 064e52ca5248..196aa769f296 100644
98317 --- a/include/crypto/internal/poly1305.h
98318 +++ b/include/crypto/internal/poly1305.h
98319 @@ -18,7 +18,8 @@
98320   * only the ε-almost-∆-universal hash function (not the full MAC) is computed.
98321   */
98323 -void poly1305_core_setkey(struct poly1305_core_key *key, const u8 *raw_key);
98324 +void poly1305_core_setkey(struct poly1305_core_key *key,
98325 +                         const u8 raw_key[POLY1305_BLOCK_SIZE]);
98326  static inline void poly1305_core_init(struct poly1305_state *state)
98328         *state = (struct poly1305_state){};
98329 diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
98330 index 88b591215d5c..cccceadc164b 100644
98331 --- a/include/crypto/kpp.h
98332 +++ b/include/crypto/kpp.h
98333 @@ -154,6 +154,8 @@ static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags)
98334   * crypto_free_kpp() - free KPP tfm handle
98335   *
98336   * @tfm: KPP tfm handle allocated with crypto_alloc_kpp()
98337 + *
98338 + * If @tfm is a NULL or error pointer, this function does nothing.
98339   */
98340  static inline void crypto_free_kpp(struct crypto_kpp *tfm)
98342 diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
98343 index f1f67fc749cf..090692ec3bc7 100644
98344 --- a/include/crypto/poly1305.h
98345 +++ b/include/crypto/poly1305.h
98346 @@ -58,8 +58,10 @@ struct poly1305_desc_ctx {
98347         };
98348  };
98350 -void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key);
98351 -void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key);
98352 +void poly1305_init_arch(struct poly1305_desc_ctx *desc,
98353 +                       const u8 key[POLY1305_KEY_SIZE]);
98354 +void poly1305_init_generic(struct poly1305_desc_ctx *desc,
98355 +                          const u8 key[POLY1305_KEY_SIZE]);
98357  static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key)
98359 diff --git a/include/crypto/rng.h b/include/crypto/rng.h
98360 index 8b4b844b4eef..17bb3673d3c1 100644
98361 --- a/include/crypto/rng.h
98362 +++ b/include/crypto/rng.h
98363 @@ -111,6 +111,8 @@ static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
98364  /**
98365   * crypto_free_rng() - zeroize and free RNG handle
98366   * @tfm: cipher handle to be freed
98367 + *
98368 + * If @tfm is a NULL or error pointer, this function does nothing.
98369   */
98370  static inline void crypto_free_rng(struct crypto_rng *tfm)
98372 diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
98373 index 6a733b171a5d..ef0fc9ed4342 100644
98374 --- a/include/crypto/skcipher.h
98375 +++ b/include/crypto/skcipher.h
98376 @@ -196,6 +196,8 @@ static inline struct crypto_tfm *crypto_skcipher_tfm(
98377  /**
98378   * crypto_free_skcipher() - zeroize and free cipher handle
98379   * @tfm: cipher handle to be freed
98380 + *
98381 + * If @tfm is a NULL or error pointer, this function does nothing.
98382   */
98383  static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
98385 diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h
98386 index a94c03a61d8f..b2ed3481c6a0 100644
98387 --- a/include/keys/trusted-type.h
98388 +++ b/include/keys/trusted-type.h
98389 @@ -30,6 +30,7 @@ struct trusted_key_options {
98390         uint16_t keytype;
98391         uint32_t keyhandle;
98392         unsigned char keyauth[TPM_DIGEST_SIZE];
98393 +       uint32_t blobauth_len;
98394         unsigned char blobauth[TPM_DIGEST_SIZE];
98395         uint32_t pcrinfo_len;
98396         unsigned char pcrinfo[MAX_PCRINFO_SIZE];
98397 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
98398 index 158aefae1030..40c48e30f1eb 100644
98399 --- a/include/linux/blkdev.h
98400 +++ b/include/linux/blkdev.h
98401 @@ -620,6 +620,7 @@ struct request_queue {
98403  #define QUEUE_FLAG_MQ_DEFAULT  ((1 << QUEUE_FLAG_IO_STAT) |            \
98404                                  (1 << QUEUE_FLAG_SAME_COMP) |          \
98405 +                                (1 << QUEUE_FLAG_SAME_FORCE) |         \
98406                                  (1 << QUEUE_FLAG_NOWAIT))
98408  void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
98409 diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
98410 index 971b33aca13d..99bc82342ca0 100644
98411 --- a/include/linux/bpf_verifier.h
98412 +++ b/include/linux/bpf_verifier.h
98413 @@ -299,10 +299,11 @@ struct bpf_verifier_state_list {
98414  };
98416  /* Possible states for alu_state member. */
98417 -#define BPF_ALU_SANITIZE_SRC           1U
98418 -#define BPF_ALU_SANITIZE_DST           2U
98419 +#define BPF_ALU_SANITIZE_SRC           (1U << 0)
98420 +#define BPF_ALU_SANITIZE_DST           (1U << 1)
98421  #define BPF_ALU_NEG_VALUE              (1U << 2)
98422  #define BPF_ALU_NON_POINTER            (1U << 3)
98423 +#define BPF_ALU_IMMEDIATE              (1U << 4)
98424  #define BPF_ALU_SANITIZE               (BPF_ALU_SANITIZE_SRC | \
98425                                          BPF_ALU_SANITIZE_DST)
98427 diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
98428 index 4f2f79de083e..bd5744360cfa 100644
98429 --- a/include/linux/cgroup.h
98430 +++ b/include/linux/cgroup.h
98431 @@ -432,6 +432,18 @@ static inline void cgroup_put(struct cgroup *cgrp)
98432         css_put(&cgrp->self);
98435 +extern struct mutex cgroup_mutex;
98437 +static inline void cgroup_lock(void)
98439 +       mutex_lock(&cgroup_mutex);
98442 +static inline void cgroup_unlock(void)
98444 +       mutex_unlock(&cgroup_mutex);
98447  /**
98448   * task_css_set_check - obtain a task's css_set with extra access conditions
98449   * @task: the task to obtain css_set for
98450 @@ -446,7 +458,6 @@ static inline void cgroup_put(struct cgroup *cgrp)
98451   * as locks used during the cgroup_subsys::attach() methods.
98452   */
98453  #ifdef CONFIG_PROVE_RCU
98454 -extern struct mutex cgroup_mutex;
98455  extern spinlock_t css_set_lock;
98456  #define task_css_set_check(task, __c)                                  \
98457         rcu_dereference_check((task)->cgroups,                          \
98458 @@ -704,6 +715,8 @@ struct cgroup;
98459  static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; }
98460  static inline void css_get(struct cgroup_subsys_state *css) {}
98461  static inline void css_put(struct cgroup_subsys_state *css) {}
98462 +static inline void cgroup_lock(void) {}
98463 +static inline void cgroup_unlock(void) {}
98464  static inline int cgroup_attach_task_all(struct task_struct *from,
98465                                          struct task_struct *t) { return 0; }
98466  static inline int cgroupstats_build(struct cgroupstats *stats,
98467 diff --git a/include/linux/compat.h b/include/linux/compat.h
98468 index 6e65be753603..d4c1b402b962 100644
98469 --- a/include/linux/compat.h
98470 +++ b/include/linux/compat.h
98471 @@ -365,6 +365,17 @@ struct compat_robust_list_head {
98472         compat_uptr_t                   list_op_pending;
98473  };
98475 +struct compat_futex_waitv {
98476 +       compat_uptr_t uaddr;
98477 +       compat_uint_t val;
98478 +       compat_uint_t flags;
98481 +struct compat_futex_requeue {
98482 +       compat_uptr_t uaddr;
98483 +       compat_uint_t flags;
98486  #ifdef CONFIG_COMPAT_OLD_SIGACTION
98487  struct compat_old_sigaction {
98488         compat_uptr_t                   sa_handler;
98489 @@ -654,6 +665,18 @@ asmlinkage long
98490  compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
98491                            compat_size_t __user *len_ptr);
98493 +/* kernel/futex2.c */
98494 +asmlinkage long compat_sys_futex_waitv(struct compat_futex_waitv *waiters,
98495 +                                      compat_uint_t nr_futexes, compat_uint_t flags,
98496 +                                      struct __kernel_timespec __user *timo);
98498 +asmlinkage long compat_sys_futex_requeue(struct compat_futex_requeue *uaddr1,
98499 +                                        struct compat_futex_requeue *uaddr2,
98500 +                                        compat_uint_t nr_wake,
98501 +                                        compat_uint_t nr_requeue,
98502 +                                        compat_uint_t cmpval,
98503 +                                        compat_uint_t flags);
98505  /* kernel/itimer.c */
98506  asmlinkage long compat_sys_getitimer(int which,
98507                                      struct old_itimerval32 __user *it);
98508 diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
98509 index 153734816b49..d5b9c8d40c18 100644
98510 --- a/include/linux/console_struct.h
98511 +++ b/include/linux/console_struct.h
98512 @@ -101,6 +101,7 @@ struct vc_data {
98513         unsigned int    vc_rows;
98514         unsigned int    vc_size_row;            /* Bytes per row */
98515         unsigned int    vc_scan_lines;          /* # of scan lines */
98516 +       unsigned int    vc_cell_height;         /* CRTC character cell height */
98517         unsigned long   vc_origin;              /* [!] Start of real screen */
98518         unsigned long   vc_scr_end;             /* [!] End of real screen */
98519         unsigned long   vc_visible_origin;      /* [!] Top of visible window */
98520 diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
98521 index bceb06498521..4f4556232dcf 100644
98522 --- a/include/linux/context_tracking.h
98523 +++ b/include/linux/context_tracking.h
98524 @@ -131,16 +131,26 @@ static __always_inline void guest_enter_irqoff(void)
98525         }
98528 -static __always_inline void guest_exit_irqoff(void)
98529 +static __always_inline void context_tracking_guest_exit(void)
98531         if (context_tracking_enabled())
98532                 __context_tracking_exit(CONTEXT_GUEST);
98535 -       instrumentation_begin();
98536 +static __always_inline void vtime_account_guest_exit(void)
98538         if (vtime_accounting_enabled_this_cpu())
98539                 vtime_guest_exit(current);
98540         else
98541                 current->flags &= ~PF_VCPU;
98544 +static __always_inline void guest_exit_irqoff(void)
98546 +       context_tracking_guest_exit();
98548 +       instrumentation_begin();
98549 +       vtime_account_guest_exit();
98550         instrumentation_end();
98553 @@ -159,12 +169,19 @@ static __always_inline void guest_enter_irqoff(void)
98554         instrumentation_end();
98557 +static __always_inline void context_tracking_guest_exit(void) { }
98559 +static __always_inline void vtime_account_guest_exit(void)
98561 +       vtime_account_kernel(current);
98562 +       current->flags &= ~PF_VCPU;
98565  static __always_inline void guest_exit_irqoff(void)
98567         instrumentation_begin();
98568         /* Flush the guest cputime we spent on the guest */
98569 -       vtime_account_kernel(current);
98570 -       current->flags &= ~PF_VCPU;
98571 +       vtime_account_guest_exit();
98572         instrumentation_end();
98574  #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
98575 diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
98576 index f14adb882338..cc7c3fda2aa6 100644
98577 --- a/include/linux/cpuhotplug.h
98578 +++ b/include/linux/cpuhotplug.h
98579 @@ -135,6 +135,7 @@ enum cpuhp_state {
98580         CPUHP_AP_RISCV_TIMER_STARTING,
98581         CPUHP_AP_CLINT_TIMER_STARTING,
98582         CPUHP_AP_CSKY_TIMER_STARTING,
98583 +       CPUHP_AP_TI_GP_TIMER_STARTING,
98584         CPUHP_AP_HYPERV_TIMER_STARTING,
98585         CPUHP_AP_KVM_STARTING,
98586         CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
98587 diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
98588 index 706b68d1359b..13d1f4c14d7b 100644
98589 --- a/include/linux/dma-iommu.h
98590 +++ b/include/linux/dma-iommu.h
98591 @@ -40,6 +40,8 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
98592  void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
98593                 struct iommu_domain *domain);
98595 +extern bool iommu_dma_forcedac;
98597  #else /* CONFIG_IOMMU_DMA */
98599  struct iommu_domain;
98600 diff --git a/include/linux/elevator.h b/include/linux/elevator.h
98601 index 1fe8e105b83b..dcb2f9022c1d 100644
98602 --- a/include/linux/elevator.h
98603 +++ b/include/linux/elevator.h
98604 @@ -34,7 +34,7 @@ struct elevator_mq_ops {
98605         void (*depth_updated)(struct blk_mq_hw_ctx *);
98607         bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
98608 -       bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int);
98609 +       bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int);
98610         int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
98611         void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
98612         void (*requests_merged)(struct request_queue *, struct request *, struct request *);
98613 diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
98614 index 71177b17eee5..66e2423d9feb 100644
98615 --- a/include/linux/firmware/xlnx-zynqmp.h
98616 +++ b/include/linux/firmware/xlnx-zynqmp.h
98617 @@ -354,11 +354,6 @@ int zynqmp_pm_read_pggs(u32 index, u32 *value);
98618  int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype);
98619  int zynqmp_pm_set_boot_health_status(u32 value);
98620  #else
98621 -static inline struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
98623 -       return ERR_PTR(-ENODEV);
98626  static inline int zynqmp_pm_get_api_version(u32 *version)
98628         return -ENODEV;
98629 diff --git a/include/linux/freezer.h b/include/linux/freezer.h
98630 index 27828145ca09..504cc97bf475 100644
98631 --- a/include/linux/freezer.h
98632 +++ b/include/linux/freezer.h
98633 @@ -311,6 +311,7 @@ static inline void set_freezable(void) {}
98634  #define wait_event_freezekillable_unsafe(wq, condition)                        \
98635                 wait_event_killable(wq, condition)
98637 +#define pm_freezing (false)
98638  #endif /* !CONFIG_FREEZER */
98640  #endif /* FREEZER_H_INCLUDED */
98641 diff --git a/include/linux/fs.h b/include/linux/fs.h
98642 index ec8f3ddf4a6a..33683ff94cb3 100644
98643 --- a/include/linux/fs.h
98644 +++ b/include/linux/fs.h
98645 @@ -683,6 +683,7 @@ struct inode {
98646         };
98647         atomic64_t              i_version;
98648         atomic64_t              i_sequence; /* see futex */
98649 +       atomic64_t              i_sequence2; /* see futex2 */
98650         atomic_t                i_count;
98651         atomic_t                i_dio_count;
98652         atomic_t                i_writecount;
98653 diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
98654 index 286de0520574..ecf0032a0995 100644
98655 --- a/include/linux/gpio/driver.h
98656 +++ b/include/linux/gpio/driver.h
98657 @@ -624,8 +624,17 @@ void gpiochip_irq_domain_deactivate(struct irq_domain *domain,
98658  bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc,
98659                                 unsigned int offset);
98661 +#ifdef CONFIG_GPIOLIB_IRQCHIP
98662  int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
98663                                 struct irq_domain *domain);
98664 +#else
98665 +static inline int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
98666 +                                             struct irq_domain *domain)
98668 +       WARN_ON(1);
98669 +       return -EINVAL;
98671 +#endif
98673  int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset);
98674  void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset);
98675 diff --git a/include/linux/hid.h b/include/linux/hid.h
98676 index ef702b3f56e3..3e33eb14118c 100644
98677 --- a/include/linux/hid.h
98678 +++ b/include/linux/hid.h
98679 @@ -262,6 +262,8 @@ struct hid_item {
98680  #define HID_CP_SELECTION       0x000c0080
98681  #define HID_CP_MEDIASELECTION  0x000c0087
98682  #define HID_CP_SELECTDISC      0x000c00ba
98683 +#define HID_CP_VOLUMEUP                0x000c00e9
98684 +#define HID_CP_VOLUMEDOWN      0x000c00ea
98685  #define HID_CP_PLAYBACKSPEED   0x000c00f1
98686  #define HID_CP_PROXIMITY       0x000c0109
98687  #define HID_CP_SPEAKERSYSTEM   0x000c0160
98688 diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
98689 index ba973efcd369..0ba7b3f9029c 100644
98690 --- a/include/linux/huge_mm.h
98691 +++ b/include/linux/huge_mm.h
98692 @@ -443,6 +443,11 @@ static inline bool is_huge_zero_page(struct page *page)
98693         return false;
98696 +static inline bool is_huge_zero_pmd(pmd_t pmd)
98698 +       return false;
98701  static inline bool is_huge_zero_pud(pud_t pud)
98703         return false;
98704 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
98705 index 56622658b215..a670ae129f4b 100644
98706 --- a/include/linux/i2c.h
98707 +++ b/include/linux/i2c.h
98708 @@ -687,6 +687,8 @@ struct i2c_adapter_quirks {
98709  #define I2C_AQ_NO_ZERO_LEN_READ                BIT(5)
98710  #define I2C_AQ_NO_ZERO_LEN_WRITE       BIT(6)
98711  #define I2C_AQ_NO_ZERO_LEN             (I2C_AQ_NO_ZERO_LEN_READ | I2C_AQ_NO_ZERO_LEN_WRITE)
98712 +/* adapter cannot do repeated START */
98713 +#define I2C_AQ_NO_REP_START            BIT(7)
98715  /*
98716   * i2c_adapter is the structure used to identify a physical i2c bus along
98717 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
98718 index 1bc46b88711a..d1f32b33415a 100644
98719 --- a/include/linux/intel-iommu.h
98720 +++ b/include/linux/intel-iommu.h
98721 @@ -372,6 +372,7 @@ enum {
98722  /* PASID cache invalidation granu */
98723  #define QI_PC_ALL_PASIDS       0
98724  #define QI_PC_PASID_SEL                1
98725 +#define QI_PC_GLOBAL           3
98727  #define QI_EIOTLB_ADDR(addr)   ((u64)(addr) & VTD_PAGE_MASK)
98728  #define QI_EIOTLB_IH(ih)       (((u64)ih) << 6)
98729 diff --git a/include/linux/iommu.h b/include/linux/iommu.h
98730 index 5e7fe519430a..9ca6e6b8084d 100644
98731 --- a/include/linux/iommu.h
98732 +++ b/include/linux/iommu.h
98733 @@ -547,7 +547,7 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
98734          * structure can be rewritten.
98735          */
98736         if (gather->pgsize != size ||
98737 -           end < gather->start || start > gather->end) {
98738 +           end + 1 < gather->start || start > gather->end + 1) {
98739                 if (gather->pgsize)
98740                         iommu_iotlb_sync(domain, gather);
98741                 gather->pgsize = size;
98742 diff --git a/include/linux/ioport.h b/include/linux/ioport.h
98743 index 55de385c839c..647744d8514e 100644
98744 --- a/include/linux/ioport.h
98745 +++ b/include/linux/ioport.h
98746 @@ -331,7 +331,7 @@ static inline void irqresource_disabled(struct resource *res, u32 irq)
98748         res->start = irq;
98749         res->end = irq;
98750 -       res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
98751 +       res->flags |= IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
98754  extern struct address_space *iomem_get_mapping(void);
98755 diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
98756 index 05e22770af51..3ccd19f13f5c 100644
98757 --- a/include/linux/ipc_namespace.h
98758 +++ b/include/linux/ipc_namespace.h
98759 @@ -120,6 +120,9 @@ extern int mq_init_ns(struct ipc_namespace *ns);
98760  static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; }
98761  #endif
98763 +extern struct ipc_namespace *get_ipc_ns_exported(struct ipc_namespace *ns);
98764 +extern struct ipc_namespace *show_init_ipc_ns(void);
98766  #if defined(CONFIG_IPC_NS)
98767  extern struct ipc_namespace *copy_ipcs(unsigned long flags,
98768         struct user_namespace *user_ns, struct ipc_namespace *ns);
98769 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
98770 index 1b65e7204344..99dccea4293c 100644
98771 --- a/include/linux/kvm_host.h
98772 +++ b/include/linux/kvm_host.h
98773 @@ -192,8 +192,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
98774                     int len, void *val);
98775  int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
98776                             int len, struct kvm_io_device *dev);
98777 -void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
98778 -                              struct kvm_io_device *dev);
98779 +int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
98780 +                             struct kvm_io_device *dev);
98781  struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
98782                                          gpa_t addr);
98784 diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
98785 index 0c04d39a7967..cff95ed1ee2b 100644
98786 --- a/include/linux/memcontrol.h
98787 +++ b/include/linux/memcontrol.h
98788 @@ -212,6 +212,8 @@ struct obj_cgroup {
98789         };
98790  };
98792 +struct lru_gen_mm_list;
98794  /*
98795   * The memory controller data structure. The memory controller controls both
98796   * page cache and RSS per cgroup. We would eventually like to provide
98797 @@ -335,6 +337,10 @@ struct mem_cgroup {
98798         struct deferred_split deferred_split_queue;
98799  #endif
98801 +#ifdef CONFIG_LRU_GEN
98802 +       struct lru_gen_mm_list *mm_list;
98803 +#endif
98805         struct mem_cgroup_per_node *nodeinfo[0];
98806         /* WARNING: nodeinfo must be the last member here */
98807  };
98808 @@ -1077,7 +1083,6 @@ static inline struct mem_cgroup *page_memcg(struct page *page)
98810  static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
98812 -       WARN_ON_ONCE(!rcu_read_lock_held());
98813         return NULL;
98816 diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h
98817 index 1dbabf1b3cb8..6e0f66a2e727 100644
98818 --- a/include/linux/mfd/da9063/registers.h
98819 +++ b/include/linux/mfd/da9063/registers.h
98820 @@ -1037,6 +1037,9 @@
98821  #define                DA9063_NONKEY_PIN_AUTODOWN      0x02
98822  #define                DA9063_NONKEY_PIN_AUTOFLPRT     0x03
98824 +/* DA9063_REG_CONFIG_J (addr=0x10F) */
98825 +#define DA9063_TWOWIRE_TO                      0x40
98827  /* DA9063_REG_MON_REG_5 (addr=0x116) */
98828  #define DA9063_MON_A8_IDX_MASK                 0x07
98829  #define                DA9063_MON_A8_IDX_NONE          0x00
98830 diff --git a/include/linux/mfd/intel-m10-bmc.h b/include/linux/mfd/intel-m10-bmc.h
98831 index 74d4e193966a..9b54ca13eac3 100644
98832 --- a/include/linux/mfd/intel-m10-bmc.h
98833 +++ b/include/linux/mfd/intel-m10-bmc.h
98834 @@ -11,7 +11,7 @@
98836  #define M10BMC_LEGACY_SYS_BASE         0x300400
98837  #define M10BMC_SYS_BASE                        0x300800
98838 -#define M10BMC_MEM_END                 0x200000fc
98839 +#define M10BMC_MEM_END                 0x1fffffff
98841  /* Register offset of system registers */
98842  #define NIOS2_FW_VERSION               0x0
98843 diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
98844 index 53b89631a1d9..ab07f09f2bad 100644
98845 --- a/include/linux/mlx5/driver.h
98846 +++ b/include/linux/mlx5/driver.h
98847 @@ -1226,7 +1226,7 @@ enum {
98848         MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
98849  };
98851 -static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev)
98852 +static inline bool mlx5_is_roce_init_enabled(struct mlx5_core_dev *dev)
98854         struct devlink *devlink = priv_to_devlink(dev);
98855         union devlink_param_value val;
98856 diff --git a/include/linux/mm.h b/include/linux/mm.h
98857 index 8ba434287387..c0ecb207198c 100644
98858 --- a/include/linux/mm.h
98859 +++ b/include/linux/mm.h
98860 @@ -203,6 +203,9 @@ static inline void __mm_zero_struct_page(struct page *page)
98862  extern int sysctl_max_map_count;
98864 +extern unsigned long sysctl_clean_low_kbytes;
98865 +extern unsigned long sysctl_clean_min_kbytes;
98867  extern unsigned long sysctl_user_reserve_kbytes;
98868  extern unsigned long sysctl_admin_reserve_kbytes;
98870 @@ -1070,6 +1073,8 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
98871  #define ZONES_PGOFF            (NODES_PGOFF - ZONES_WIDTH)
98872  #define LAST_CPUPID_PGOFF      (ZONES_PGOFF - LAST_CPUPID_WIDTH)
98873  #define KASAN_TAG_PGOFF                (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
98874 +#define LRU_GEN_PGOFF          (KASAN_TAG_PGOFF - LRU_GEN_WIDTH)
98875 +#define LRU_USAGE_PGOFF                (LRU_GEN_PGOFF - LRU_USAGE_WIDTH)
98877  /*
98878   * Define the bit shifts to access each section.  For non-existent
98879 @@ -3170,5 +3175,37 @@ extern int sysctl_nr_trim_pages;
98881  void mem_dump_obj(void *object);
98883 +/**
98884 + * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it
98885 + * @seals: the seals to check
98886 + * @vma: the vma to operate on
98887 + *
98888 + * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on
98889 + * the vma flags.  Return 0 if check pass, or <0 for errors.
98890 + */
98891 +static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
98893 +       if (seals & F_SEAL_FUTURE_WRITE) {
98894 +               /*
98895 +                * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
98896 +                * "future write" seal active.
98897 +                */
98898 +               if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
98899 +                       return -EPERM;
98901 +               /*
98902 +                * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
98903 +                * MAP_SHARED and read-only, take care to not allow mprotect to
98904 +                * revert protections on such mappings. Do this only for shared
98905 +                * mappings. For private mappings, don't need to mask
98906 +                * VM_MAYWRITE as we still want them to be COW-writable.
98907 +                */
98908 +               if (vma->vm_flags & VM_SHARED)
98909 +                       vma->vm_flags &= ~(VM_MAYWRITE);
98910 +       }
98912 +       return 0;
98915  #endif /* __KERNEL__ */
98916  #endif /* _LINUX_MM_H */
98917 diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
98918 index 355ea1ee32bd..5eb4b12972ec 100644
98919 --- a/include/linux/mm_inline.h
98920 +++ b/include/linux/mm_inline.h
98921 @@ -79,11 +79,299 @@ static __always_inline enum lru_list page_lru(struct page *page)
98922         return lru;
98925 +#ifdef CONFIG_LRU_GEN
98927 +#ifdef CONFIG_LRU_GEN_ENABLED
98928 +DECLARE_STATIC_KEY_TRUE(lru_gen_static_key);
98929 +#define lru_gen_enabled() static_branch_likely(&lru_gen_static_key)
98930 +#else
98931 +DECLARE_STATIC_KEY_FALSE(lru_gen_static_key);
98932 +#define lru_gen_enabled() static_branch_unlikely(&lru_gen_static_key)
98933 +#endif
98935 +/* We track at most MAX_NR_GENS generations using the sliding window technique. */
98936 +static inline int lru_gen_from_seq(unsigned long seq)
98938 +       return seq % MAX_NR_GENS;
98941 +/* Convert the level of usage to a tier. See the comment on MAX_NR_TIERS. */
98942 +static inline int lru_tier_from_usage(int usage)
98944 +       return order_base_2(usage + 1);
98947 +/* Return a proper index regardless whether we keep a full history of stats. */
98948 +static inline int sid_from_seq_or_gen(int seq_or_gen)
98950 +       return seq_or_gen % NR_STAT_GENS;
98953 +/* The youngest and the second youngest generations are considered active. */
98954 +static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
98956 +       unsigned long max_seq = READ_ONCE(lruvec->evictable.max_seq);
98958 +       VM_BUG_ON(!max_seq);
98959 +       VM_BUG_ON(gen >= MAX_NR_GENS);
98961 +       return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1);
98964 +/* Update the sizes of the multigenerational lru. */
98965 +static inline void lru_gen_update_size(struct page *page, struct lruvec *lruvec,
98966 +                                      int old_gen, int new_gen)
98968 +       int file = page_is_file_lru(page);
98969 +       int zone = page_zonenum(page);
98970 +       int delta = thp_nr_pages(page);
98971 +       enum lru_list lru = LRU_FILE * file;
98972 +       struct lrugen *lrugen = &lruvec->evictable;
98974 +       lockdep_assert_held(&lruvec->lru_lock);
98975 +       VM_BUG_ON(old_gen != -1 && old_gen >= MAX_NR_GENS);
98976 +       VM_BUG_ON(new_gen != -1 && new_gen >= MAX_NR_GENS);
98977 +       VM_BUG_ON(old_gen == -1 && new_gen == -1);
98979 +       if (old_gen >= 0)
98980 +               WRITE_ONCE(lrugen->sizes[old_gen][file][zone],
98981 +                          lrugen->sizes[old_gen][file][zone] - delta);
98982 +       if (new_gen >= 0)
98983 +               WRITE_ONCE(lrugen->sizes[new_gen][file][zone],
98984 +                          lrugen->sizes[new_gen][file][zone] + delta);
98986 +       if (old_gen < 0) {
98987 +               if (lru_gen_is_active(lruvec, new_gen))
98988 +                       lru += LRU_ACTIVE;
98989 +               update_lru_size(lruvec, lru, zone, delta);
98990 +               return;
98991 +       }
98993 +       if (new_gen < 0) {
98994 +               if (lru_gen_is_active(lruvec, old_gen))
98995 +                       lru += LRU_ACTIVE;
98996 +               update_lru_size(lruvec, lru, zone, -delta);
98997 +               return;
98998 +       }
99000 +       if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) {
99001 +               update_lru_size(lruvec, lru, zone, -delta);
99002 +               update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta);
99003 +       }
99005 +       VM_BUG_ON(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
99008 +/* Add a page to a list of the multigenerational lru. Return true on success. */
99009 +static inline bool lru_gen_addition(struct page *page, struct lruvec *lruvec, bool front)
99011 +       int gen;
99012 +       unsigned long old_flags, new_flags;
99013 +       int file = page_is_file_lru(page);
99014 +       int zone = page_zonenum(page);
99015 +       struct lrugen *lrugen = &lruvec->evictable;
99017 +       if (PageUnevictable(page) || !lrugen->enabled[file])
99018 +               return false;
99019 +       /*
99020 +        * If a page is being faulted in, add it to the youngest generation.
99021 +        * try_walk_mm_list() may look at the size of the youngest generation to
99022 +        * determine if the aging is due.
99023 +        *
99024 +        * If a page can't be evicted immediately, i.e., a shmem page not in
99025 +        * swap cache, a dirty page waiting on writeback, or a page rejected by
99026 +        * evict_lru_gen_pages() due to races, dirty buffer heads, etc., add it
99027 +        * to the second oldest generation.
99028 +        *
99029 +        * If a page could be evicted immediately, i.e., deactivated, rotated by
99030 +        * writeback, or allocated for buffered io, add it to the oldest
99031 +        * generation.
99032 +        */
99033 +       if (PageActive(page))
99034 +               gen = lru_gen_from_seq(lrugen->max_seq);
99035 +       else if ((!file && !PageSwapCache(page)) ||
99036 +                (PageReclaim(page) && (PageDirty(page) || PageWriteback(page))) ||
99037 +                (!PageReferenced(page) && PageWorkingset(page)))
99038 +               gen = lru_gen_from_seq(lrugen->min_seq[file] + 1);
99039 +       else
99040 +               gen = lru_gen_from_seq(lrugen->min_seq[file]);
99042 +       do {
99043 +               old_flags = READ_ONCE(page->flags);
99044 +               VM_BUG_ON_PAGE(old_flags & LRU_GEN_MASK, page);
99046 +               new_flags = (old_flags & ~(LRU_GEN_MASK | BIT(PG_active))) |
99047 +                           ((gen + 1UL) << LRU_GEN_PGOFF);
99048 +               /* see the comment in evict_lru_gen_pages() */
99049 +               if (!(old_flags & BIT(PG_referenced)))
99050 +                       new_flags &= ~(LRU_USAGE_MASK | LRU_TIER_FLAGS);
99051 +       } while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
99053 +       lru_gen_update_size(page, lruvec, -1, gen);
99054 +       if (front)
99055 +               list_add(&page->lru, &lrugen->lists[gen][file][zone]);
99056 +       else
99057 +               list_add_tail(&page->lru, &lrugen->lists[gen][file][zone]);
99059 +       return true;
99062 +/* Delete a page from a list of the multigenerational lru. Return true on success. */
99063 +static inline bool lru_gen_deletion(struct page *page, struct lruvec *lruvec)
99065 +       int gen;
99066 +       unsigned long old_flags, new_flags;
99068 +       do {
99069 +               old_flags = READ_ONCE(page->flags);
99070 +               if (!(old_flags & LRU_GEN_MASK))
99071 +                       return false;
99073 +               VM_BUG_ON_PAGE(PageActive(page), page);
99074 +               VM_BUG_ON_PAGE(PageUnevictable(page), page);
99076 +               gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
99078 +               new_flags = old_flags & ~LRU_GEN_MASK;
99079 +               /* mark page active accordingly */
99080 +               if (lru_gen_is_active(lruvec, gen))
99081 +                       new_flags |= BIT(PG_active);
99082 +       } while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
99084 +       lru_gen_update_size(page, lruvec, gen, -1);
99085 +       list_del(&page->lru);
99087 +       return true;
99090 +/* Activate a page from page cache or swap cache after it's mapped. */
99091 +static inline void lru_gen_activation(struct page *page, struct vm_area_struct *vma)
99093 +       if (!lru_gen_enabled())
99094 +               return;
99096 +       if (PageActive(page) || PageUnevictable(page) || vma_is_dax(vma) ||
99097 +           (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)))
99098 +               return;
99099 +       /*
99100 +        * TODO: pass vm_fault to add_to_page_cache_lru() and
99101 +        * __read_swap_cache_async() so they can activate pages directly when in
99102 +        * the page fault path.
99103 +        */
99104 +       activate_page(page);
99107 +/* Return -1 when a page is not on a list of the multigenerational lru. */
99108 +static inline int page_lru_gen(struct page *page)
99110 +       return ((READ_ONCE(page->flags) & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
99113 +/* This function works regardless whether the multigenerational lru is enabled. */
99114 +static inline bool page_is_active(struct page *page, struct lruvec *lruvec)
99116 +       struct mem_cgroup *memcg;
99117 +       int gen = page_lru_gen(page);
99118 +       bool active = false;
99120 +       VM_BUG_ON_PAGE(PageTail(page), page);
99122 +       if (gen < 0)
99123 +               return PageActive(page);
99125 +       if (lruvec) {
99126 +               VM_BUG_ON_PAGE(PageUnevictable(page), page);
99127 +               VM_BUG_ON_PAGE(PageActive(page), page);
99128 +               lockdep_assert_held(&lruvec->lru_lock);
99130 +               return lru_gen_is_active(lruvec, gen);
99131 +       }
99133 +       rcu_read_lock();
99135 +       memcg = page_memcg_rcu(page);
99136 +       lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
99137 +       active = lru_gen_is_active(lruvec, gen);
99139 +       rcu_read_unlock();
99141 +       return active;
99144 +/* Return the level of usage of a page. See the comment on MAX_NR_TIERS. */
99145 +static inline int page_tier_usage(struct page *page)
99147 +       unsigned long flags = READ_ONCE(page->flags);
99149 +       return flags & BIT(PG_workingset) ?
99150 +              ((flags & LRU_USAGE_MASK) >> LRU_USAGE_PGOFF) + 1 : 0;
99153 +/* Increment the usage counter after a page is accessed via file descriptors. */
99154 +static inline bool page_inc_usage(struct page *page)
99156 +       unsigned long old_flags, new_flags;
99158 +       if (!lru_gen_enabled())
99159 +               return PageActive(page);
99161 +       do {
99162 +               old_flags = READ_ONCE(page->flags);
99164 +               if (!(old_flags & BIT(PG_workingset)))
99165 +                       new_flags = old_flags | BIT(PG_workingset);
99166 +               else
99167 +                       new_flags = (old_flags & ~LRU_USAGE_MASK) | min(LRU_USAGE_MASK,
99168 +                                   (old_flags & LRU_USAGE_MASK) + BIT(LRU_USAGE_PGOFF));
99170 +               if (old_flags == new_flags)
99171 +                       break;
99172 +       } while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
99174 +       return true;
99177 +#else /* CONFIG_LRU_GEN */
99179 +static inline bool lru_gen_enabled(void)
99181 +       return false;
99184 +static inline bool lru_gen_addition(struct page *page, struct lruvec *lruvec, bool front)
99186 +       return false;
99189 +static inline bool lru_gen_deletion(struct page *page, struct lruvec *lruvec)
99191 +       return false;
99194 +static inline void lru_gen_activation(struct page *page, struct vm_area_struct *vma)
99198 +static inline bool page_is_active(struct page *page, struct lruvec *lruvec)
99200 +       return PageActive(page);
99203 +static inline bool page_inc_usage(struct page *page)
99205 +       return PageActive(page);
99208 +#endif /* CONFIG_LRU_GEN */
99210  static __always_inline void add_page_to_lru_list(struct page *page,
99211                                 struct lruvec *lruvec)
99213         enum lru_list lru = page_lru(page);
99215 +       if (lru_gen_addition(page, lruvec, true))
99216 +               return;
99218         update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
99219         list_add(&page->lru, &lruvec->lists[lru]);
99221 @@ -93,6 +381,9 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page,
99223         enum lru_list lru = page_lru(page);
99225 +       if (lru_gen_addition(page, lruvec, false))
99226 +               return;
99228         update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
99229         list_add_tail(&page->lru, &lruvec->lists[lru]);
99231 @@ -100,6 +391,9 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page,
99232  static __always_inline void del_page_from_lru_list(struct page *page,
99233                                 struct lruvec *lruvec)
99235 +       if (lru_gen_deletion(page, lruvec))
99236 +               return;
99238         list_del(&page->lru);
99239         update_lru_size(lruvec, page_lru(page), page_zonenum(page),
99240                         -thp_nr_pages(page));
99241 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
99242 index 6613b26a8894..b936703a39a2 100644
99243 --- a/include/linux/mm_types.h
99244 +++ b/include/linux/mm_types.h
99245 @@ -15,6 +15,8 @@
99246  #include <linux/page-flags-layout.h>
99247  #include <linux/workqueue.h>
99248  #include <linux/seqlock.h>
99249 +#include <linux/nodemask.h>
99250 +#include <linux/mmdebug.h>
99252  #include <asm/mmu.h>
99254 @@ -97,10 +99,10 @@ struct page {
99255                 };
99256                 struct {        /* page_pool used by netstack */
99257                         /**
99258 -                        * @dma_addr: might require a 64-bit value even on
99259 +                        * @dma_addr: might require a 64-bit value on
99260                          * 32-bit architectures.
99261                          */
99262 -                       dma_addr_t dma_addr;
99263 +                       unsigned long dma_addr[2];
99264                 };
99265                 struct {        /* slab, slob and slub */
99266                         union {
99267 @@ -383,6 +385,8 @@ struct core_state {
99268         struct completion startup;
99269  };
99271 +#define ANON_AND_FILE 2
99273  struct kioctx_table;
99274  struct mm_struct {
99275         struct {
99276 @@ -561,6 +565,22 @@ struct mm_struct {
99278  #ifdef CONFIG_IOMMU_SUPPORT
99279                 u32 pasid;
99280 +#endif
99281 +#ifdef CONFIG_LRU_GEN
99282 +               struct {
99283 +                       /* the node of a global or per-memcg mm_struct list */
99284 +                       struct list_head list;
99285 +#ifdef CONFIG_MEMCG
99286 +                       /* points to memcg of the owner task above */
99287 +                       struct mem_cgroup *memcg;
99288 +#endif
99289 +                       /* whether this mm_struct has been used since the last walk */
99290 +                       nodemask_t nodes[ANON_AND_FILE];
99291 +#ifndef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
99292 +                       /* the number of CPUs using this mm_struct */
99293 +                       atomic_t nr_cpus;
99294 +#endif
99295 +               } lrugen;
99296  #endif
99297         } __randomize_layout;
99299 @@ -588,6 +608,103 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
99300         return (struct cpumask *)&mm->cpu_bitmap;
99303 +#ifdef CONFIG_LRU_GEN
99305 +void lru_gen_init_mm(struct mm_struct *mm);
99306 +void lru_gen_add_mm(struct mm_struct *mm);
99307 +void lru_gen_del_mm(struct mm_struct *mm);
99308 +#ifdef CONFIG_MEMCG
99309 +int lru_gen_alloc_mm_list(struct mem_cgroup *memcg);
99310 +void lru_gen_free_mm_list(struct mem_cgroup *memcg);
99311 +void lru_gen_migrate_mm(struct mm_struct *mm);
99312 +#endif
99315 + * Track the usage so mm_struct's that haven't been used since the last walk can
99316 + * be skipped. This function adds a theoretical overhead to each context switch,
99317 + * which hasn't been measurable.
99318 + */
99319 +static inline void lru_gen_switch_mm(struct mm_struct *old, struct mm_struct *new)
99321 +       int file;
99323 +       /* exclude init_mm, efi_mm, etc. */
99324 +       if (!core_kernel_data((unsigned long)old)) {
99325 +               VM_BUG_ON(old == &init_mm);
99327 +               for (file = 0; file < ANON_AND_FILE; file++)
99328 +                       nodes_setall(old->lrugen.nodes[file]);
99330 +#ifndef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
99331 +               atomic_dec(&old->lrugen.nr_cpus);
99332 +               VM_BUG_ON_MM(atomic_read(&old->lrugen.nr_cpus) < 0, old);
99333 +#endif
99334 +       } else
99335 +               VM_BUG_ON_MM(READ_ONCE(old->lrugen.list.prev) ||
99336 +                            READ_ONCE(old->lrugen.list.next), old);
99338 +       if (!core_kernel_data((unsigned long)new)) {
99339 +               VM_BUG_ON(new == &init_mm);
99341 +#ifndef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
99342 +               atomic_inc(&new->lrugen.nr_cpus);
99343 +               VM_BUG_ON_MM(atomic_read(&new->lrugen.nr_cpus) < 0, new);
99344 +#endif
99345 +       } else
99346 +               VM_BUG_ON_MM(READ_ONCE(new->lrugen.list.prev) ||
99347 +                            READ_ONCE(new->lrugen.list.next), new);
99350 +/* Return whether this mm_struct is being used on any CPUs. */
99351 +static inline bool lru_gen_mm_is_active(struct mm_struct *mm)
99353 +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
99354 +       return !cpumask_empty(mm_cpumask(mm));
99355 +#else
99356 +       return atomic_read(&mm->lrugen.nr_cpus);
99357 +#endif
99360 +#else /* CONFIG_LRU_GEN */
99362 +static inline void lru_gen_init_mm(struct mm_struct *mm)
99366 +static inline void lru_gen_add_mm(struct mm_struct *mm)
99370 +static inline void lru_gen_del_mm(struct mm_struct *mm)
99374 +#ifdef CONFIG_MEMCG
99375 +static inline int lru_gen_alloc_mm_list(struct mem_cgroup *memcg)
99377 +       return 0;
99380 +static inline void lru_gen_free_mm_list(struct mem_cgroup *memcg)
99384 +static inline void lru_gen_migrate_mm(struct mm_struct *mm)
99387 +#endif
99389 +static inline void lru_gen_switch_mm(struct mm_struct *old, struct mm_struct *new)
99393 +static inline bool lru_gen_mm_is_active(struct mm_struct *mm)
99395 +       return false;
99398 +#endif /* CONFIG_LRU_GEN */
99400  struct mmu_gather;
99401  extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
99402  extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
99403 diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
99404 index 26a3c7bc29ae..a3a4e374f802 100644
99405 --- a/include/linux/mmc/host.h
99406 +++ b/include/linux/mmc/host.h
99407 @@ -302,9 +302,6 @@ struct mmc_host {
99408         u32                     ocr_avail_sdio; /* SDIO-specific OCR */
99409         u32                     ocr_avail_sd;   /* SD-specific OCR */
99410         u32                     ocr_avail_mmc;  /* MMC-specific OCR */
99411 -#ifdef CONFIG_PM_SLEEP
99412 -       struct notifier_block   pm_notify;
99413 -#endif
99414         struct wakeup_source    *ws;            /* Enable consume of uevents */
99415         u32                     max_current_330;
99416         u32                     max_current_300;
99417 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
99418 index 47946cec7584..a22e9e40083f 100644
99419 --- a/include/linux/mmzone.h
99420 +++ b/include/linux/mmzone.h
99421 @@ -285,14 +285,124 @@ static inline bool is_active_lru(enum lru_list lru)
99422         return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
99425 -#define ANON_AND_FILE 2
99427  enum lruvec_flags {
99428         LRUVEC_CONGESTED,               /* lruvec has many dirty pages
99429                                          * backed by a congested BDI
99430                                          */
99431  };
99433 +struct lruvec;
99434 +struct page_vma_mapped_walk;
99436 +#define LRU_GEN_MASK           ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
99437 +#define LRU_USAGE_MASK         ((BIT(LRU_USAGE_WIDTH) - 1) << LRU_USAGE_PGOFF)
99439 +#ifdef CONFIG_LRU_GEN
99442 + * For each lruvec, evictable pages are divided into multiple generations. The
99443 + * youngest and the oldest generation numbers, AKA max_seq and min_seq, are
99444 + * monotonically increasing. The sliding window technique is used to track at
99445 + * most MAX_NR_GENS and at least MIN_NR_GENS generations. An offset within the
99446 + * window, AKA gen, indexes an array of per-type and per-zone lists for the
99447 + * corresponding generation. All pages from this array of lists have gen+1
99448 + * stored in page->flags. 0 is reserved to indicate that pages are not on the
99449 + * lists.
99450 + */
99451 +#define MAX_NR_GENS            ((unsigned int)CONFIG_NR_LRU_GENS)
99454 + * Each generation is then divided into multiple tiers. Tiers represent levels
99455 + * of usage from file descriptors, i.e., mark_page_accessed(). In contrast to
99456 + * moving across generations which requires the lru lock, moving across tiers
99457 + * only involves an atomic operation on page->flags and therefore has a
99458 + * negligible cost.
99459 + *
99460 + * The purposes of tiers are to:
99461 + *   1) estimate whether pages accessed multiple times via file descriptors are
99462 + *   more active than pages accessed only via page tables by separating the two
99463 + *   access types into upper tiers and the base tier and comparing refault rates
99464 + *   across tiers.
99465 + *   2) improve buffered io performance by deferring activations of pages
99466 + *   accessed multiple times until the eviction. That is activations happen in
99467 + *   the reclaim path, not the access path.
99468 + *
99469 + * Pages accessed N times via file descriptors belong to tier order_base_2(N).
99470 + * The base tier uses the following page flag:
99471 + *   !PageReferenced() -- readahead pages
99472 + *   PageReferenced() -- single-access pages
99473 + * All upper tiers use the following page flags:
99474 + *   PageReferenced() && PageWorkingset() -- multi-access pages
99475 + * in addition to the bits storing N-2 accesses. Therefore, we can support one
99476 + * upper tier without using additional bits in page->flags.
99477 + *
99478 + * Note that
99479 + *   1) PageWorkingset() is always set for upper tiers because we want to
99480 + *    maintain the existing psi behavior.
99481 + *   2) !PageReferenced() && PageWorkingset() is not a valid tier. See the
99482 + *   comment in evict_lru_gen_pages().
99483 + *   3) pages accessed only via page tables belong to the base tier.
99484 + *
99485 + * Pages from the base tier are evicted regardless of the refault rate. Pages
99486 + * from upper tiers will be moved to the next generation, if their refault rates
99487 + * are higher than that of the base tier.
99488 + */
99489 +#define MAX_NR_TIERS           ((unsigned int)CONFIG_TIERS_PER_GEN)
99490 +#define LRU_TIER_FLAGS         (BIT(PG_referenced) | BIT(PG_workingset))
99491 +#define LRU_USAGE_SHIFT                (CONFIG_TIERS_PER_GEN - 1)
99493 +/* Whether to keep historical stats for each generation. */
99494 +#ifdef CONFIG_LRU_GEN_STATS
99495 +#define NR_STAT_GENS           ((unsigned int)CONFIG_NR_LRU_GENS)
99496 +#else
99497 +#define NR_STAT_GENS           1U
99498 +#endif
99500 +struct lrugen {
99501 +       /* the aging increments the max generation number */
99502 +       unsigned long max_seq;
99503 +       /* the eviction increments the min generation numbers */
99504 +       unsigned long min_seq[ANON_AND_FILE];
99505 +       /* the birth time of each generation in jiffies */
99506 +       unsigned long timestamps[MAX_NR_GENS];
99507 +       /* the lists of the multigenerational lru */
99508 +       struct list_head lists[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
99509 +       /* the sizes of the multigenerational lru in pages */
99510 +       unsigned long sizes[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
99511 +       /* to determine which type and its tiers to evict */
99512 +       atomic_long_t evicted[NR_STAT_GENS][ANON_AND_FILE][MAX_NR_TIERS];
99513 +       atomic_long_t refaulted[NR_STAT_GENS][ANON_AND_FILE][MAX_NR_TIERS];
99514 +       /* the base tier is inactive and won't be activated */
99515 +       unsigned long activated[NR_STAT_GENS][ANON_AND_FILE][MAX_NR_TIERS - 1];
99516 +       /* arithmetic mean weighted by geometric series 1/2, 1/4, ... */
99517 +       unsigned long avg_total[ANON_AND_FILE][MAX_NR_TIERS];
99518 +       unsigned long avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS];
99519 +       /* reclaim priority to compare across memcgs */
99520 +       atomic_t priority;
99521 +       /* whether the multigenerational lru is enabled */
99522 +       bool enabled[ANON_AND_FILE];
99525 +void lru_gen_init_lruvec(struct lruvec *lruvec);
99526 +void lru_gen_set_state(bool enable, bool main, bool swap);
99527 +void lru_gen_scan_around(struct page_vma_mapped_walk *pvmw);
99529 +#else /* CONFIG_LRU_GEN */
99531 +static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
99535 +static inline void lru_gen_set_state(bool enable, bool main, bool swap)
99539 +static inline void lru_gen_scan_around(struct page_vma_mapped_walk *pvmw)
99543 +#endif /* CONFIG_LRU_GEN */
99545  struct lruvec {
99546         struct list_head                lists[NR_LRU_LISTS];
99547         /* per lruvec lru_lock for memcg */
99548 @@ -310,6 +420,10 @@ struct lruvec {
99549         unsigned long                   refaults[ANON_AND_FILE];
99550         /* Various lruvec state flags (enum lruvec_flags) */
99551         unsigned long                   flags;
99552 +#ifdef CONFIG_LRU_GEN
99553 +       /* unevictable pages are on LRU_UNEVICTABLE */
99554 +       struct lrugen                   evictable;
99555 +#endif
99556  #ifdef CONFIG_MEMCG
99557         struct pglist_data *pgdat;
99558  #endif
99559 diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
99560 index 3327239fa2f9..cc29dee508f7 100644
99561 --- a/include/linux/nfs_xdr.h
99562 +++ b/include/linux/nfs_xdr.h
99563 @@ -15,6 +15,8 @@
99564  #define NFS_DEF_FILE_IO_SIZE   (4096U)
99565  #define NFS_MIN_FILE_IO_SIZE   (1024U)
99567 +#define NFS_BITMASK_SZ         3
99569  struct nfs4_string {
99570         unsigned int len;
99571         char *data;
99572 @@ -525,7 +527,8 @@ struct nfs_closeargs {
99573         struct nfs_seqid *      seqid;
99574         fmode_t                 fmode;
99575         u32                     share_access;
99576 -       u32 *                   bitmask;
99577 +       const u32 *             bitmask;
99578 +       u32                     bitmask_store[NFS_BITMASK_SZ];
99579         struct nfs4_layoutreturn_args *lr_args;
99580  };
99582 @@ -608,7 +611,8 @@ struct nfs4_delegreturnargs {
99583         struct nfs4_sequence_args       seq_args;
99584         const struct nfs_fh *fhandle;
99585         const nfs4_stateid *stateid;
99586 -       u32 * bitmask;
99587 +       const u32 *bitmask;
99588 +       u32 bitmask_store[NFS_BITMASK_SZ];
99589         struct nfs4_layoutreturn_args *lr_args;
99590  };
99592 @@ -648,7 +652,8 @@ struct nfs_pgio_args {
99593         union {
99594                 unsigned int            replen;                 /* used by read */
99595                 struct {
99596 -                       u32 *                   bitmask;        /* used by write */
99597 +                       const u32 *             bitmask;        /* used by write */
99598 +                       u32 bitmask_store[NFS_BITMASK_SZ];      /* used by write */
99599                         enum nfs3_stable_how    stable;         /* used by write */
99600                 };
99601         };
99602 diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
99603 index ac398e143c9a..89fe4e3592f9 100644
99604 --- a/include/linux/nodemask.h
99605 +++ b/include/linux/nodemask.h
99606 @@ -486,6 +486,7 @@ static inline int num_node_state(enum node_states state)
99607  #define first_online_node      0
99608  #define first_memory_node      0
99609  #define next_online_node(nid)  (MAX_NUMNODES)
99610 +#define next_memory_node(nid)  (MAX_NUMNODES)
99611  #define nr_node_ids            1U
99612  #define nr_online_nodes                1U
99614 diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
99615 index 7d4ec26d8a3e..df83aaec8498 100644
99616 --- a/include/linux/page-flags-layout.h
99617 +++ b/include/linux/page-flags-layout.h
99618 @@ -24,6 +24,17 @@
99619  #error ZONES_SHIFT -- too many zones configured adjust calculation
99620  #endif
99622 +#ifdef CONFIG_LRU_GEN
99624 + * LRU_GEN_WIDTH is generated from order_base_2(CONFIG_NR_LRU_GENS + 1). And the
99625 + * comment on MAX_NR_TIERS explains why we offset by 2 here.
99626 + */
99627 +#define LRU_USAGE_WIDTH                (CONFIG_TIERS_PER_GEN - 2)
99628 +#else
99629 +#define LRU_GEN_WIDTH          0
99630 +#define LRU_USAGE_WIDTH                0
99631 +#endif
99633  #ifdef CONFIG_SPARSEMEM
99634  #include <asm/sparsemem.h>
99636 @@ -56,7 +67,8 @@
99638  #define ZONES_WIDTH            ZONES_SHIFT
99640 -#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
99641 +#if SECTIONS_WIDTH+ZONES_WIDTH+LRU_GEN_WIDTH+LRU_USAGE_WIDTH+NODES_SHIFT \
99642 +       <= BITS_PER_LONG - NR_PAGEFLAGS
99643  #define NODES_WIDTH            NODES_SHIFT
99644  #else
99645  #ifdef CONFIG_SPARSEMEM_VMEMMAP
99646 @@ -83,14 +95,16 @@
99647  #define KASAN_TAG_WIDTH 0
99648  #endif
99650 -#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \
99651 +#if SECTIONS_WIDTH+ZONES_WIDTH+LRU_GEN_WIDTH+LRU_USAGE_WIDTH+ \
99652 +       NODES_WIDTH+KASAN_TAG_WIDTH+LAST_CPUPID_SHIFT \
99653         <= BITS_PER_LONG - NR_PAGEFLAGS
99654  #define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
99655  #else
99656  #define LAST_CPUPID_WIDTH 0
99657  #endif
99659 -#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \
99660 +#if SECTIONS_WIDTH+ZONES_WIDTH+LRU_GEN_WIDTH+LRU_USAGE_WIDTH+ \
99661 +       NODES_WIDTH+KASAN_TAG_WIDTH+LAST_CPUPID_WIDTH \
99662         > BITS_PER_LONG - NR_PAGEFLAGS
99663  #error "Not enough bits in page flags"
99664  #endif
99665 diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
99666 index 04a34c08e0a6..e58984fca32a 100644
99667 --- a/include/linux/page-flags.h
99668 +++ b/include/linux/page-flags.h
99669 @@ -817,7 +817,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
99670          1UL << PG_private      | 1UL << PG_private_2   |       \
99671          1UL << PG_writeback    | 1UL << PG_reserved    |       \
99672          1UL << PG_slab         | 1UL << PG_active      |       \
99673 -        1UL << PG_unevictable  | __PG_MLOCKED)
99674 +        1UL << PG_unevictable  | __PG_MLOCKED | LRU_GEN_MASK)
99676  /*
99677   * Flags checked when a page is prepped for return by the page allocator.
99678 @@ -828,7 +828,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
99679   * alloc-free cycle to prevent from reusing the page.
99680   */
99681  #define PAGE_FLAGS_CHECK_AT_PREP       \
99682 -       (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
99683 +       ((((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_USAGE_MASK)
99685  #define PAGE_FLAGS_PRIVATE                             \
99686         (1UL << PG_private | 1UL << PG_private_2)
99687 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
99688 index 3f7f89ea5e51..3d478abf411c 100644
99689 --- a/include/linux/perf_event.h
99690 +++ b/include/linux/perf_event.h
99691 @@ -607,6 +607,7 @@ struct swevent_hlist {
99692  #define PERF_ATTACH_TASK_DATA  0x08
99693  #define PERF_ATTACH_ITRACE     0x10
99694  #define PERF_ATTACH_SCHED_CB   0x20
99695 +#define PERF_ATTACH_CHILD      0x40
99697  struct perf_cgroup;
99698  struct perf_buffer;
99699 diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
99700 index 5e772392a379..08dd9b8c055a 100644
99701 --- a/include/linux/pgtable.h
99702 +++ b/include/linux/pgtable.h
99703 @@ -193,7 +193,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
99704  #endif
99706  #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
99707 -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
99708 +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG)
99709  static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
99710                                             unsigned long address,
99711                                             pmd_t *pmdp)
99712 @@ -214,7 +214,7 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
99713         BUILD_BUG();
99714         return 0;
99716 -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
99717 +#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG */
99718  #endif
99720  #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
99721 diff --git a/include/linux/phy.h b/include/linux/phy.h
99722 index 1a12e4436b5b..8644b097dea3 100644
99723 --- a/include/linux/phy.h
99724 +++ b/include/linux/phy.h
99725 @@ -493,6 +493,7 @@ struct macsec_ops;
99726   * @loopback_enabled: Set true if this PHY has been loopbacked successfully.
99727   * @downshifted_rate: Set true if link speed has been downshifted.
99728   * @is_on_sfp_module: Set true if PHY is located on an SFP module.
99729 + * @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY
99730   * @state: State of the PHY for management purposes
99731   * @dev_flags: Device-specific flags used by the PHY driver.
99732   * @irq: IRQ number of the PHY's interrupt (-1 if none)
99733 @@ -567,6 +568,7 @@ struct phy_device {
99734         unsigned loopback_enabled:1;
99735         unsigned downshifted_rate:1;
99736         unsigned is_on_sfp_module:1;
99737 +       unsigned mac_managed_pm:1;
99739         unsigned autoneg:1;
99740         /* The most recently read link state */
99741 diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
99742 index 3f23f6e430bf..cd81e060863c 100644
99743 --- a/include/linux/platform_device.h
99744 +++ b/include/linux/platform_device.h
99745 @@ -359,4 +359,7 @@ static inline int is_sh_early_platform_device(struct platform_device *pdev)
99747  #endif /* CONFIG_SUPERH */
99749 +/* For now only SuperH uses it */
99750 +void early_platform_cleanup(void);
99752  #endif /* _PLATFORM_DEVICE_H_ */
99753 diff --git a/include/linux/pm.h b/include/linux/pm.h
99754 index 482313a8ccfc..628718697679 100644
99755 --- a/include/linux/pm.h
99756 +++ b/include/linux/pm.h
99757 @@ -602,6 +602,7 @@ struct dev_pm_info {
99758         unsigned int            idle_notification:1;
99759         unsigned int            request_pending:1;
99760         unsigned int            deferred_resume:1;
99761 +       unsigned int            needs_force_resume:1;
99762         unsigned int            runtime_auto:1;
99763         bool                    ignore_children:1;
99764         unsigned int            no_callbacks:1;
99765 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
99766 index b492ae00cc90..6c08a085367b 100644
99767 --- a/include/linux/pm_runtime.h
99768 +++ b/include/linux/pm_runtime.h
99769 @@ -265,7 +265,7 @@ static inline void pm_runtime_no_callbacks(struct device *dev) {}
99770  static inline void pm_runtime_irq_safe(struct device *dev) {}
99771  static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
99773 -static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; }
99774 +static inline bool pm_runtime_has_no_callbacks(struct device *dev) { return false; }
99775  static inline void pm_runtime_mark_last_busy(struct device *dev) {}
99776  static inline void __pm_runtime_use_autosuspend(struct device *dev,
99777                                                 bool use) {}
99778 diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
99779 index 111a40d0d3d5..8d5f4f40fb41 100644
99780 --- a/include/linux/power/bq27xxx_battery.h
99781 +++ b/include/linux/power/bq27xxx_battery.h
99782 @@ -53,7 +53,6 @@ struct bq27xxx_reg_cache {
99783         int capacity;
99784         int energy;
99785         int flags;
99786 -       int power_avg;
99787         int health;
99788  };
99790 diff --git a/include/linux/reset.h b/include/linux/reset.h
99791 index b9109efa2a5c..9700124affa3 100644
99792 --- a/include/linux/reset.h
99793 +++ b/include/linux/reset.h
99794 @@ -47,6 +47,11 @@ static inline int reset_control_reset(struct reset_control *rstc)
99795         return 0;
99798 +static inline int reset_control_rearm(struct reset_control *rstc)
99800 +       return 0;
99803  static inline int reset_control_assert(struct reset_control *rstc)
99805         return 0;
99806 diff --git a/include/linux/sched.h b/include/linux/sched.h
99807 index ef00bb22164c..98f2e1dc6f90 100644
99808 --- a/include/linux/sched.h
99809 +++ b/include/linux/sched.h
99810 @@ -216,13 +216,40 @@ struct task_group;
99812  extern void scheduler_tick(void);
99814 -#define        MAX_SCHEDULE_TIMEOUT            LONG_MAX
99816 +#define        MAX_SCHEDULE_TIMEOUT    LONG_MAX
99817  extern long schedule_timeout(long timeout);
99818  extern long schedule_timeout_interruptible(long timeout);
99819  extern long schedule_timeout_killable(long timeout);
99820  extern long schedule_timeout_uninterruptible(long timeout);
99821  extern long schedule_timeout_idle(long timeout);
99823 +#ifdef CONFIG_HIGH_RES_TIMERS
99824 +extern long schedule_msec_hrtimeout(long timeout);
99825 +extern long schedule_min_hrtimeout(void);
99826 +extern long schedule_msec_hrtimeout_interruptible(long timeout);
99827 +extern long schedule_msec_hrtimeout_uninterruptible(long timeout);
99828 +#else
99829 +static inline long schedule_msec_hrtimeout(long timeout)
99831 +       return schedule_timeout(msecs_to_jiffies(timeout));
99834 +static inline long schedule_min_hrtimeout(void)
99836 +       return schedule_timeout(1);
99839 +static inline long schedule_msec_hrtimeout_interruptible(long timeout)
99841 +       return schedule_timeout_interruptible(msecs_to_jiffies(timeout));
99844 +static inline long schedule_msec_hrtimeout_uninterruptible(long timeout)
99846 +       return schedule_timeout_uninterruptible(msecs_to_jiffies(timeout));
99848 +#endif
99850  asmlinkage void schedule(void);
99851  extern void schedule_preempt_disabled(void);
99852  asmlinkage void preempt_schedule_irq(void);
99853 diff --git a/include/linux/smp.h b/include/linux/smp.h
99854 index 70c6f6284dcf..238a3f97a415 100644
99855 --- a/include/linux/smp.h
99856 +++ b/include/linux/smp.h
99857 @@ -73,7 +73,7 @@ void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
99858  void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
99859                            void *info, bool wait, const struct cpumask *mask);
99861 -int smp_call_function_single_async(int cpu, call_single_data_t *csd);
99862 +int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
99864  #ifdef CONFIG_SMP
99866 diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
99867 index 592897fa4f03..643139b1eafe 100644
99868 --- a/include/linux/spi/spi.h
99869 +++ b/include/linux/spi/spi.h
99870 @@ -510,6 +510,9 @@ struct spi_controller {
99872  #define SPI_MASTER_GPIO_SS             BIT(5)  /* GPIO CS must select slave */
99874 +       /* flag indicating this is a non-devres managed controller */
99875 +       bool                    devm_allocated;
99877         /* flag indicating this is an SPI slave controller */
99878         bool                    slave;
99880 diff --git a/include/linux/swap.h b/include/linux/swap.h
99881 index 4cc6ec3bf0ab..0e7532c7db22 100644
99882 --- a/include/linux/swap.h
99883 +++ b/include/linux/swap.h
99884 @@ -344,13 +344,14 @@ extern void lru_add_drain_cpu(int cpu);
99885  extern void lru_add_drain_cpu_zone(struct zone *zone);
99886  extern void lru_add_drain_all(void);
99887  extern void rotate_reclaimable_page(struct page *page);
99888 +extern void activate_page(struct page *page);
99889  extern void deactivate_file_page(struct page *page);
99890  extern void deactivate_page(struct page *page);
99891  extern void mark_page_lazyfree(struct page *page);
99892  extern void swap_setup(void);
99894 -extern void lru_cache_add_inactive_or_unevictable(struct page *page,
99895 -                                               struct vm_area_struct *vma);
99896 +extern void lru_cache_add_page_vma(struct page *page, struct vm_area_struct *vma,
99897 +                                  bool faulting);
99899  /* linux/mm/vmscan.c */
99900  extern unsigned long zone_reclaimable_pages(struct zone *zone);
99901 diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
99902 index 2839dc9a7c01..b07b7d4334a6 100644
99903 --- a/include/linux/syscalls.h
99904 +++ b/include/linux/syscalls.h
99905 @@ -69,6 +69,8 @@ struct io_uring_params;
99906  struct clone_args;
99907  struct open_how;
99908  struct mount_attr;
99909 +struct futex_waitv;
99910 +struct futex_requeue;
99912  #include <linux/types.h>
99913  #include <linux/aio_abi.h>
99914 @@ -619,6 +621,20 @@ asmlinkage long sys_get_robust_list(int pid,
99915  asmlinkage long sys_set_robust_list(struct robust_list_head __user *head,
99916                                     size_t len);
99918 +/* kernel/futex2.c */
99919 +asmlinkage long sys_futex_wait(void __user *uaddr, unsigned int val,
99920 +                              unsigned int flags,
99921 +                              struct __kernel_timespec __user *timo);
99922 +asmlinkage long sys_futex_wake(void __user *uaddr, unsigned int nr_wake,
99923 +                              unsigned int flags);
99924 +asmlinkage long sys_futex_waitv(struct futex_waitv __user *waiters,
99925 +                               unsigned int nr_futexes, unsigned int flags,
99926 +                               struct __kernel_timespec __user *timo);
99927 +asmlinkage long sys_futex_requeue(struct futex_requeue __user *uaddr1,
99928 +                                 struct futex_requeue __user *uaddr2,
99929 +                                 unsigned int nr_wake, unsigned int nr_requeue,
99930 +                                 unsigned int cmpval, unsigned int flags);
99932  /* kernel/hrtimer.c */
99933  asmlinkage long sys_nanosleep(struct __kernel_timespec __user *rqtp,
99934                               struct __kernel_timespec __user *rmtp);
99935 @@ -1300,6 +1316,8 @@ int ksys_ipc(unsigned int call, int first, unsigned long second,
99936         unsigned long third, void __user * ptr, long fifth);
99937  int compat_ksys_ipc(u32 call, int first, int second,
99938         u32 third, u32 ptr, u32 fifth);
99939 +long ksys_futex_wake(void __user *uaddr, unsigned long nr_wake,
99940 +                    unsigned int flags);
99942  /*
99943   * The following kernel syscall equivalents are just wrappers to fs-internal
99944 diff --git a/include/linux/tcp.h b/include/linux/tcp.h
99945 index 48d8a363319e..1bd559c69e83 100644
99946 --- a/include/linux/tcp.h
99947 +++ b/include/linux/tcp.h
99948 @@ -225,7 +225,8 @@ struct tcp_sock {
99949         u8      compressed_ack;
99950         u8      dup_ack_counter:2,
99951                 tlp_retrans:1,  /* TLP is a retransmission */
99952 -               unused:5;
99953 +               fast_ack_mode:2, /* which fast ack mode ? */
99954 +               unused:3;
99955         u32     chrono_start;   /* Start time in jiffies of a TCP chrono */
99956         u32     chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
99957         u8      chrono_type:2,  /* current chronograph type */
99958 diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
99959 index 61c3372d3f32..2f719b471d52 100644
99960 --- a/include/linux/tty_driver.h
99961 +++ b/include/linux/tty_driver.h
99962 @@ -228,7 +228,7 @@
99963   *
99964   *     Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel
99965   *     structure to complete. This method is optional and will only be called
99966 - *     if provided (otherwise EINVAL will be returned).
99967 + *     if provided (otherwise ENOTTY will be returned).
99968   */
99970  #include <linux/export.h>
99971 diff --git a/include/linux/udp.h b/include/linux/udp.h
99972 index aa84597bdc33..ae58ff3b6b5b 100644
99973 --- a/include/linux/udp.h
99974 +++ b/include/linux/udp.h
99975 @@ -51,7 +51,9 @@ struct udp_sock {
99976                                            * different encapsulation layer set
99977                                            * this
99978                                            */
99979 -                        gro_enabled:1; /* Can accept GRO packets */
99980 +                        gro_enabled:1, /* Request GRO aggregation */
99981 +                        accept_udp_l4:1,
99982 +                        accept_udp_fraglist:1;
99983         /*
99984          * Following member retains the information to create a UDP header
99985          * when the socket is uncorked.
99986 @@ -131,8 +133,16 @@ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
99988  static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
99990 -       return !udp_sk(sk)->gro_enabled && skb_is_gso(skb) &&
99991 -              skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4;
99992 +       if (!skb_is_gso(skb))
99993 +               return false;
99995 +       if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && !udp_sk(sk)->accept_udp_l4)
99996 +               return true;
99998 +       if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist)
99999 +               return true;
100001 +       return false;
100004  #define udp_portaddr_for_each_entry(__sk, list) \
100005 diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
100006 index 70d681918d01..bf00259493e0 100644
100007 --- a/include/linux/usb/pd.h
100008 +++ b/include/linux/usb/pd.h
100009 @@ -493,4 +493,6 @@ static inline unsigned int rdo_max_power(u32 rdo)
100010  #define PD_N_CAPS_COUNT                (PD_T_NO_RESPONSE / PD_T_SEND_SOURCE_CAP)
100011  #define PD_N_HARD_RESET_COUNT  2
100013 +#define PD_P_SNK_STDBY_MW      2500    /* 2500 mW */
100015  #endif /* __LINUX_USB_PD_H */
100016 diff --git a/include/linux/zstd.h b/include/linux/zstd.h
100017 index e87f78c9b19c..446ecabcdd02 100644
100018 --- a/include/linux/zstd.h
100019 +++ b/include/linux/zstd.h
100020 @@ -1,138 +1,97 @@
100021 +/* SPDX-License-Identifier: GPL-2.0-only */
100023 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
100024 + * Copyright (c) Yann Collet, Facebook, Inc.
100025   * All rights reserved.
100026   *
100027 - * This source code is licensed under the BSD-style license found in the
100028 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
100029 - * An additional grant of patent rights can be found in the PATENTS file in the
100030 - * same directory.
100032 - * This program is free software; you can redistribute it and/or modify it under
100033 - * the terms of the GNU General Public License version 2 as published by the
100034 - * Free Software Foundation. This program is dual-licensed; you may select
100035 - * either version 2 of the GNU General Public License ("GPL") or BSD license
100036 - * ("BSD").
100037 + * This source code is licensed under both the BSD-style license (found in the
100038 + * LICENSE file in the root directory of https://github.com/facebook/zstd) and
100039 + * the GPLv2 (found in the COPYING file in the root directory of
100040 + * https://github.com/facebook/zstd). You may select, at your option, one of the
100041 + * above-listed licenses.
100042   */
100044 -#ifndef ZSTD_H
100045 -#define ZSTD_H
100046 +#ifndef LINUX_ZSTD_H
100047 +#define LINUX_ZSTD_H
100049 -/* ======   Dependency   ======*/
100050 -#include <linux/types.h>   /* size_t */
100052 + * This is a kernel-style API that wraps the upstream zstd API, which cannot be
100053 + * used directly because the symbols aren't exported. It exposes the minimal
100054 + * functionality which is currently required by users of zstd in the kernel.
100055 + * Expose extra functions from lib/zstd/zstd.h as needed.
100056 + */
100058 +/* ======   Dependency   ====== */
100059 +#include <linux/types.h>
100060 +#include <linux/zstd_errors.h>
100061 +#include <linux/zstd_lib.h>
100063 -/*-*****************************************************************************
100064 - * Introduction
100065 +/* ======   Helper Functions   ====== */
100067 + * zstd_compress_bound() - maximum compressed size in worst case scenario
100068 + * @src_size: The size of the data to compress.
100069   *
100070 - * zstd, short for Zstandard, is a fast lossless compression algorithm,
100071 - * targeting real-time compression scenarios at zlib-level and better
100072 - * compression ratios. The zstd compression library provides in-memory
100073 - * compression and decompression functions. The library supports compression
100074 - * levels from 1 up to ZSTD_maxCLevel() which is 22. Levels >= 20, labeled
100075 - * ultra, should be used with caution, as they require more memory.
100076 - * Compression can be done in:
100077 - *  - a single step, reusing a context (described as Explicit memory management)
100078 - *  - unbounded multiple steps (described as Streaming compression)
100079 - * The compression ratio achievable on small data can be highly improved using
100080 - * compression with a dictionary in:
100081 - *  - a single step (described as Simple dictionary API)
100082 - *  - a single step, reusing a dictionary (described as Fast dictionary API)
100083 - ******************************************************************************/
100085 -/*======  Helper functions  ======*/
100086 + * Return:    The maximum compressed size in the worst case scenario.
100087 + */
100088 +size_t zstd_compress_bound(size_t src_size);
100090  /**
100091 - * enum ZSTD_ErrorCode - zstd error codes
100092 + * zstd_is_error() - tells if a size_t function result is an error code
100093 + * @code:  The function result to check for error.
100094   *
100095 - * Functions that return size_t can be checked for errors using ZSTD_isError()
100096 - * and the ZSTD_ErrorCode can be extracted using ZSTD_getErrorCode().
100097 + * Return: Non-zero iff the code is an error.
100098 + */
100099 +unsigned int zstd_is_error(size_t code);
100102 + * enum zstd_error_code - zstd error codes
100103   */
100104 -typedef enum {
100105 -       ZSTD_error_no_error,
100106 -       ZSTD_error_GENERIC,
100107 -       ZSTD_error_prefix_unknown,
100108 -       ZSTD_error_version_unsupported,
100109 -       ZSTD_error_parameter_unknown,
100110 -       ZSTD_error_frameParameter_unsupported,
100111 -       ZSTD_error_frameParameter_unsupportedBy32bits,
100112 -       ZSTD_error_frameParameter_windowTooLarge,
100113 -       ZSTD_error_compressionParameter_unsupported,
100114 -       ZSTD_error_init_missing,
100115 -       ZSTD_error_memory_allocation,
100116 -       ZSTD_error_stage_wrong,
100117 -       ZSTD_error_dstSize_tooSmall,
100118 -       ZSTD_error_srcSize_wrong,
100119 -       ZSTD_error_corruption_detected,
100120 -       ZSTD_error_checksum_wrong,
100121 -       ZSTD_error_tableLog_tooLarge,
100122 -       ZSTD_error_maxSymbolValue_tooLarge,
100123 -       ZSTD_error_maxSymbolValue_tooSmall,
100124 -       ZSTD_error_dictionary_corrupted,
100125 -       ZSTD_error_dictionary_wrong,
100126 -       ZSTD_error_dictionaryCreation_failed,
100127 -       ZSTD_error_maxCode
100128 -} ZSTD_ErrorCode;
100129 +typedef ZSTD_ErrorCode zstd_error_code;
100131  /**
100132 - * ZSTD_maxCLevel() - maximum compression level available
100133 + * zstd_get_error_code() - translates an error function result to an error code
100134 + * @code:  The function result for which zstd_is_error(code) is true.
100135   *
100136 - * Return: Maximum compression level available.
100137 + * Return: A unique error code for this error.
100138   */
100139 -int ZSTD_maxCLevel(void);
100140 +zstd_error_code zstd_get_error_code(size_t code);
100142  /**
100143 - * ZSTD_compressBound() - maximum compressed size in worst case scenario
100144 - * @srcSize: The size of the data to compress.
100145 + * zstd_get_error_name() - translates an error function result to a string
100146 + * @code:  The function result for which zstd_is_error(code) is true.
100147   *
100148 - * Return:   The maximum compressed size in the worst case scenario.
100149 + * Return: An error string corresponding to the error code.
100150   */
100151 -size_t ZSTD_compressBound(size_t srcSize);
100152 +const char *zstd_get_error_name(size_t code);
100154  /**
100155 - * ZSTD_isError() - tells if a size_t function result is an error code
100156 - * @code:  The function result to check for error.
100157 + * zstd_min_clevel() - minimum allowed compression level
100158   *
100159 - * Return: Non-zero iff the code is an error.
100160 + * Return: The minimum allowed compression level.
100161   */
100162 -static __attribute__((unused)) unsigned int ZSTD_isError(size_t code)
100164 -       return code > (size_t)-ZSTD_error_maxCode;
100166 +int zstd_min_clevel(void);
100168  /**
100169 - * ZSTD_getErrorCode() - translates an error function result to a ZSTD_ErrorCode
100170 - * @functionResult: The result of a function for which ZSTD_isError() is true.
100171 + * zstd_max_clevel() - maximum allowed compression level
100172   *
100173 - * Return:          The ZSTD_ErrorCode corresponding to the functionResult or 0
100174 - *                  if the functionResult isn't an error.
100175 + * Return: The maximum allowed compression level.
100176   */
100177 -static __attribute__((unused)) ZSTD_ErrorCode ZSTD_getErrorCode(
100178 -       size_t functionResult)
100180 -       if (!ZSTD_isError(functionResult))
100181 -               return (ZSTD_ErrorCode)0;
100182 -       return (ZSTD_ErrorCode)(0 - functionResult);
100184 +int zstd_max_clevel(void);
100186 +/* ======   Parameter Selection   ====== */
100188  /**
100189 - * enum ZSTD_strategy - zstd compression search strategy
100190 + * enum zstd_strategy - zstd compression search strategy
100191   *
100192 - * From faster to stronger.
100193 + * From faster to stronger. See zstd_lib.h.
100194   */
100195 -typedef enum {
100196 -       ZSTD_fast,
100197 -       ZSTD_dfast,
100198 -       ZSTD_greedy,
100199 -       ZSTD_lazy,
100200 -       ZSTD_lazy2,
100201 -       ZSTD_btlazy2,
100202 -       ZSTD_btopt,
100203 -       ZSTD_btopt2
100204 -} ZSTD_strategy;
100205 +typedef ZSTD_strategy zstd_strategy;
100207  /**
100208 - * struct ZSTD_compressionParameters - zstd compression parameters
100209 + * struct zstd_compression_parameters - zstd compression parameters
100210   * @windowLog:    Log of the largest match distance. Larger means more
100211   *                compression, and more memory needed during decompression.
100212 - * @chainLog:     Fully searched segment. Larger means more compression, slower,
100213 - *                and more memory (useless for fast).
100214 + * @chainLog:     Fully searched segment. Larger means more compression,
100215 + *                slower, and more memory (useless for fast).
100216   * @hashLog:      Dispatch table. Larger means more compression,
100217   *                slower, and more memory.
100218   * @searchLog:    Number of searches. Larger means more compression and slower.
100219 @@ -141,1017 +100,348 @@ typedef enum {
100220   * @targetLength: Acceptable match size for optimal parser (only). Larger means
100221   *                more compression, and slower.
100222   * @strategy:     The zstd compression strategy.
100224 + * See zstd_lib.h.
100225   */
100226 -typedef struct {
100227 -       unsigned int windowLog;
100228 -       unsigned int chainLog;
100229 -       unsigned int hashLog;
100230 -       unsigned int searchLog;
100231 -       unsigned int searchLength;
100232 -       unsigned int targetLength;
100233 -       ZSTD_strategy strategy;
100234 -} ZSTD_compressionParameters;
100235 +typedef ZSTD_compressionParameters zstd_compression_parameters;
100237  /**
100238 - * struct ZSTD_frameParameters - zstd frame parameters
100239 - * @contentSizeFlag: Controls whether content size will be present in the frame
100240 - *                   header (when known).
100241 - * @checksumFlag:    Controls whether a 32-bit checksum is generated at the end
100242 - *                   of the frame for error detection.
100243 - * @noDictIDFlag:    Controls whether dictID will be saved into the frame header
100244 - *                   when using dictionary compression.
100245 + * struct zstd_frame_parameters - zstd frame parameters
100246 + * @contentSizeFlag: Controls whether content size will be present in the
100247 + *                   frame header (when known).
100248 + * @checksumFlag:    Controls whether a 32-bit checksum is generated at the
100249 + *                   end of the frame for error detection.
100250 + * @noDictIDFlag:    Controls whether dictID will be saved into the frame
100251 + *                   header when using dictionary compression.
100252   *
100253 - * The default value is all fields set to 0.
100254 + * The default value is all fields set to 0. See zstd_lib.h.
100255   */
100256 -typedef struct {
100257 -       unsigned int contentSizeFlag;
100258 -       unsigned int checksumFlag;
100259 -       unsigned int noDictIDFlag;
100260 -} ZSTD_frameParameters;
100261 +typedef ZSTD_frameParameters zstd_frame_parameters;
100263  /**
100264 - * struct ZSTD_parameters - zstd parameters
100265 + * struct zstd_parameters - zstd parameters
100266   * @cParams: The compression parameters.
100267   * @fParams: The frame parameters.
100268   */
100269 -typedef struct {
100270 -       ZSTD_compressionParameters cParams;
100271 -       ZSTD_frameParameters fParams;
100272 -} ZSTD_parameters;
100273 +typedef ZSTD_parameters zstd_parameters;
100275  /**
100276 - * ZSTD_getCParams() - returns ZSTD_compressionParameters for selected level
100277 - * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel().
100278 - * @estimatedSrcSize: The estimated source size to compress or 0 if unknown.
100279 - * @dictSize:         The dictionary size or 0 if a dictionary isn't being used.
100280 + * zstd_get_params() - returns zstd_parameters for selected level
100281 + * @level:              The compression level
100282 + * @estimated_src_size: The estimated source size to compress or 0
100283 + *                      if unknown.
100284   *
100285 - * Return:            The selected ZSTD_compressionParameters.
100286 + * Return:              The selected zstd_parameters.
100287   */
100288 -ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel,
100289 -       unsigned long long estimatedSrcSize, size_t dictSize);
100290 +zstd_parameters zstd_get_params(int level,
100291 +       unsigned long long estimated_src_size);
100294 - * ZSTD_getParams() - returns ZSTD_parameters for selected level
100295 - * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel().
100296 - * @estimatedSrcSize: The estimated source size to compress or 0 if unknown.
100297 - * @dictSize:         The dictionary size or 0 if a dictionary isn't being used.
100299 - * The same as ZSTD_getCParams() except also selects the default frame
100300 - * parameters (all zero).
100302 - * Return:            The selected ZSTD_parameters.
100303 - */
100304 -ZSTD_parameters ZSTD_getParams(int compressionLevel,
100305 -       unsigned long long estimatedSrcSize, size_t dictSize);
100306 +/* ======   Single-pass Compression   ====== */
100308 -/*-*************************************
100309 - * Explicit memory management
100310 - **************************************/
100311 +typedef ZSTD_CCtx zstd_cctx;
100313  /**
100314 - * ZSTD_CCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_CCtx
100315 - * @cParams: The compression parameters to be used for compression.
100316 + * zstd_cctx_workspace_bound() - max memory needed to initialize a zstd_cctx
100317 + * @parameters: The compression parameters to be used.
100318   *
100319   * If multiple compression parameters might be used, the caller must call
100320 - * ZSTD_CCtxWorkspaceBound() for each set of parameters and use the maximum
100321 + * zstd_cctx_workspace_bound() for each set of parameters and use the maximum
100322   * size.
100323   *
100324 - * Return:   A lower bound on the size of the workspace that is passed to
100325 - *           ZSTD_initCCtx().
100326 + * Return:      A lower bound on the size of the workspace that is passed to
100327 + *              zstd_init_cctx().
100328   */
100329 -size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams);
100330 +size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *parameters);
100332  /**
100333 - * struct ZSTD_CCtx - the zstd compression context
100335 - * When compressing many times it is recommended to allocate a context just once
100336 - * and reuse it for each successive compression operation.
100337 - */
100338 -typedef struct ZSTD_CCtx_s ZSTD_CCtx;
100340 - * ZSTD_initCCtx() - initialize a zstd compression context
100341 - * @workspace:     The workspace to emplace the context into. It must outlive
100342 - *                 the returned context.
100343 - * @workspaceSize: The size of workspace. Use ZSTD_CCtxWorkspaceBound() to
100344 - *                 determine how large the workspace must be.
100346 - * Return:         A compression context emplaced into workspace.
100347 - */
100348 -ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize);
100351 - * ZSTD_compressCCtx() - compress src into dst
100352 - * @ctx:         The context. Must have been initialized with a workspace at
100353 - *               least as large as ZSTD_CCtxWorkspaceBound(params.cParams).
100354 - * @dst:         The buffer to compress src into.
100355 - * @dstCapacity: The size of the destination buffer. May be any size, but
100356 - *               ZSTD_compressBound(srcSize) is guaranteed to be large enough.
100357 - * @src:         The data to compress.
100358 - * @srcSize:     The size of the data to compress.
100359 - * @params:      The parameters to use for compression. See ZSTD_getParams().
100361 - * Return:       The compressed size or an error, which can be checked using
100362 - *               ZSTD_isError().
100363 - */
100364 -size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity,
100365 -       const void *src, size_t srcSize, ZSTD_parameters params);
100368 - * ZSTD_DCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_DCtx
100370 - * Return: A lower bound on the size of the workspace that is passed to
100371 - *         ZSTD_initDCtx().
100372 - */
100373 -size_t ZSTD_DCtxWorkspaceBound(void);
100376 - * struct ZSTD_DCtx - the zstd decompression context
100378 - * When decompressing many times it is recommended to allocate a context just
100379 - * once and reuse it for each successive decompression operation.
100380 - */
100381 -typedef struct ZSTD_DCtx_s ZSTD_DCtx;
100383 - * ZSTD_initDCtx() - initialize a zstd decompression context
100384 - * @workspace:     The workspace to emplace the context into. It must outlive
100385 - *                 the returned context.
100386 - * @workspaceSize: The size of workspace. Use ZSTD_DCtxWorkspaceBound() to
100387 - *                 determine how large the workspace must be.
100389 - * Return:         A decompression context emplaced into workspace.
100390 - */
100391 -ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize);
100394 - * ZSTD_decompressDCtx() - decompress zstd compressed src into dst
100395 - * @ctx:         The decompression context.
100396 - * @dst:         The buffer to decompress src into.
100397 - * @dstCapacity: The size of the destination buffer. Must be at least as large
100398 - *               as the decompressed size. If the caller cannot upper bound the
100399 - *               decompressed size, then it's better to use the streaming API.
100400 - * @src:         The zstd compressed data to decompress. Multiple concatenated
100401 - *               frames and skippable frames are allowed.
100402 - * @srcSize:     The exact size of the data to decompress.
100404 - * Return:       The decompressed size or an error, which can be checked using
100405 - *               ZSTD_isError().
100406 - */
100407 -size_t ZSTD_decompressDCtx(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity,
100408 -       const void *src, size_t srcSize);
100410 -/*-************************
100411 - * Simple dictionary API
100412 - **************************/
100415 - * ZSTD_compress_usingDict() - compress src into dst using a dictionary
100416 - * @ctx:         The context. Must have been initialized with a workspace at
100417 - *               least as large as ZSTD_CCtxWorkspaceBound(params.cParams).
100418 - * @dst:         The buffer to compress src into.
100419 - * @dstCapacity: The size of the destination buffer. May be any size, but
100420 - *               ZSTD_compressBound(srcSize) is guaranteed to be large enough.
100421 - * @src:         The data to compress.
100422 - * @srcSize:     The size of the data to compress.
100423 - * @dict:        The dictionary to use for compression.
100424 - * @dictSize:    The size of the dictionary.
100425 - * @params:      The parameters to use for compression. See ZSTD_getParams().
100427 - * Compression using a predefined dictionary. The same dictionary must be used
100428 - * during decompression.
100430 - * Return:       The compressed size or an error, which can be checked using
100431 - *               ZSTD_isError().
100432 - */
100433 -size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity,
100434 -       const void *src, size_t srcSize, const void *dict, size_t dictSize,
100435 -       ZSTD_parameters params);
100438 - * ZSTD_decompress_usingDict() - decompress src into dst using a dictionary
100439 - * @ctx:         The decompression context.
100440 - * @dst:         The buffer to decompress src into.
100441 - * @dstCapacity: The size of the destination buffer. Must be at least as large
100442 - *               as the decompressed size. If the caller cannot upper bound the
100443 - *               decompressed size, then it's better to use the streaming API.
100444 - * @src:         The zstd compressed data to decompress. Multiple concatenated
100445 - *               frames and skippable frames are allowed.
100446 - * @srcSize:     The exact size of the data to decompress.
100447 - * @dict:        The dictionary to use for decompression. The same dictionary
100448 - *               must've been used to compress the data.
100449 - * @dictSize:    The size of the dictionary.
100451 - * Return:       The decompressed size or an error, which can be checked using
100452 - *               ZSTD_isError().
100453 - */
100454 -size_t ZSTD_decompress_usingDict(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity,
100455 -       const void *src, size_t srcSize, const void *dict, size_t dictSize);
100457 -/*-**************************
100458 - * Fast dictionary API
100459 - ***************************/
100462 - * ZSTD_CDictWorkspaceBound() - memory needed to initialize a ZSTD_CDict
100463 - * @cParams: The compression parameters to be used for compression.
100464 + * zstd_init_cctx() - initialize a zstd compression context
100465 + * @workspace:      The workspace to emplace the context into. It must outlive
100466 + *                  the returned context.
100467 + * @workspace_size: The size of workspace. Use zstd_cctx_workspace_bound() to
100468 + *                  determine how large the workspace must be.
100469   *
100470 - * Return:   A lower bound on the size of the workspace that is passed to
100471 - *           ZSTD_initCDict().
100472 - */
100473 -size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams);
100476 - * struct ZSTD_CDict - a digested dictionary to be used for compression
100477 + * Return:          A zstd compression context or NULL on error.
100478   */
100479 -typedef struct ZSTD_CDict_s ZSTD_CDict;
100480 +zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size);
100482  /**
100483 - * ZSTD_initCDict() - initialize a digested dictionary for compression
100484 - * @dictBuffer:    The dictionary to digest. The buffer is referenced by the
100485 - *                 ZSTD_CDict so it must outlive the returned ZSTD_CDict.
100486 - * @dictSize:      The size of the dictionary.
100487 - * @params:        The parameters to use for compression. See ZSTD_getParams().
100488 - * @workspace:     The workspace. It must outlive the returned ZSTD_CDict.
100489 - * @workspaceSize: The workspace size. Must be at least
100490 - *                 ZSTD_CDictWorkspaceBound(params.cParams).
100491 + * zstd_compress_cctx() - compress src into dst with the initialized parameters
100492 + * @cctx:         The context. Must have been initialized with zstd_init_cctx().
100493 + * @dst:          The buffer to compress src into.
100494 + * @dst_capacity: The size of the destination buffer. May be any size, but
100495 + *                ZSTD_compressBound(srcSize) is guaranteed to be large enough.
100496 + * @src:          The data to compress.
100497 + * @src_size:     The size of the data to compress.
100498 + * @parameters:   The compression parameters to be used.
100499   *
100500 - * When compressing multiple messages / blocks with the same dictionary it is
100501 - * recommended to load it just once. The ZSTD_CDict merely references the
100502 - * dictBuffer, so it must outlive the returned ZSTD_CDict.
100504 - * Return:         The digested dictionary emplaced into workspace.
100505 + * Return:        The compressed size or an error, which can be checked using
100506 + *                zstd_is_error().
100507   */
100508 -ZSTD_CDict *ZSTD_initCDict(const void *dictBuffer, size_t dictSize,
100509 -       ZSTD_parameters params, void *workspace, size_t workspaceSize);
100510 +size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
100511 +       const void *src, size_t src_size, const zstd_parameters *parameters);
100514 - * ZSTD_compress_usingCDict() - compress src into dst using a ZSTD_CDict
100515 - * @ctx:         The context. Must have been initialized with a workspace at
100516 - *               least as large as ZSTD_CCtxWorkspaceBound(cParams) where
100517 - *               cParams are the compression parameters used to initialize the
100518 - *               cdict.
100519 - * @dst:         The buffer to compress src into.
100520 - * @dstCapacity: The size of the destination buffer. May be any size, but
100521 - *               ZSTD_compressBound(srcSize) is guaranteed to be large enough.
100522 - * @src:         The data to compress.
100523 - * @srcSize:     The size of the data to compress.
100524 - * @cdict:       The digested dictionary to use for compression.
100525 - * @params:      The parameters to use for compression. See ZSTD_getParams().
100527 - * Compression using a digested dictionary. The same dictionary must be used
100528 - * during decompression.
100530 - * Return:       The compressed size or an error, which can be checked using
100531 - *               ZSTD_isError().
100532 - */
100533 -size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
100534 -       const void *src, size_t srcSize, const ZSTD_CDict *cdict);
100535 +/* ======   Single-pass Decompression   ====== */
100537 +typedef ZSTD_DCtx zstd_dctx;
100539  /**
100540 - * ZSTD_DDictWorkspaceBound() - memory needed to initialize a ZSTD_DDict
100541 + * zstd_dctx_workspace_bound() - max memory needed to initialize a zstd_dctx
100542   *
100543 - * Return:  A lower bound on the size of the workspace that is passed to
100544 - *          ZSTD_initDDict().
100545 - */
100546 -size_t ZSTD_DDictWorkspaceBound(void);
100549 - * struct ZSTD_DDict - a digested dictionary to be used for decompression
100550 + * Return: A lower bound on the size of the workspace that is passed to
100551 + *         zstd_init_dctx().
100552   */
100553 -typedef struct ZSTD_DDict_s ZSTD_DDict;
100554 +size_t zstd_dctx_workspace_bound(void);
100556  /**
100557 - * ZSTD_initDDict() - initialize a digested dictionary for decompression
100558 - * @dictBuffer:    The dictionary to digest. The buffer is referenced by the
100559 - *                 ZSTD_DDict so it must outlive the returned ZSTD_DDict.
100560 - * @dictSize:      The size of the dictionary.
100561 - * @workspace:     The workspace. It must outlive the returned ZSTD_DDict.
100562 - * @workspaceSize: The workspace size. Must be at least
100563 - *                 ZSTD_DDictWorkspaceBound().
100565 - * When decompressing multiple messages / blocks with the same dictionary it is
100566 - * recommended to load it just once. The ZSTD_DDict merely references the
100567 - * dictBuffer, so it must outlive the returned ZSTD_DDict.
100568 + * zstd_init_dctx() - initialize a zstd decompression context
100569 + * @workspace:      The workspace to emplace the context into. It must outlive
100570 + *                  the returned context.
100571 + * @workspace_size: The size of workspace. Use zstd_dctx_workspace_bound() to
100572 + *                  determine how large the workspace must be.
100573   *
100574 - * Return:         The digested dictionary emplaced into workspace.
100575 + * Return:          A zstd decompression context or NULL on error.
100576   */
100577 -ZSTD_DDict *ZSTD_initDDict(const void *dictBuffer, size_t dictSize,
100578 -       void *workspace, size_t workspaceSize);
100579 +zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size);
100581  /**
100582 - * ZSTD_decompress_usingDDict() - decompress src into dst using a ZSTD_DDict
100583 - * @ctx:         The decompression context.
100584 - * @dst:         The buffer to decompress src into.
100585 - * @dstCapacity: The size of the destination buffer. Must be at least as large
100586 - *               as the decompressed size. If the caller cannot upper bound the
100587 - *               decompressed size, then it's better to use the streaming API.
100588 - * @src:         The zstd compressed data to decompress. Multiple concatenated
100589 - *               frames and skippable frames are allowed.
100590 - * @srcSize:     The exact size of the data to decompress.
100591 - * @ddict:       The digested dictionary to use for decompression. The same
100592 - *               dictionary must've been used to compress the data.
100593 + * zstd_decompress_dctx() - decompress zstd compressed src into dst
100594 + * @dctx:         The decompression context.
100595 + * @dst:          The buffer to decompress src into.
100596 + * @dst_capacity: The size of the destination buffer. Must be at least as large
100597 + *                as the decompressed size. If the caller cannot upper bound the
100598 + *                decompressed size, then it's better to use the streaming API.
100599 + * @src:          The zstd compressed data to decompress. Multiple concatenated
100600 + *                frames and skippable frames are allowed.
100601 + * @src_size:     The exact size of the data to decompress.
100602   *
100603 - * Return:       The decompressed size or an error, which can be checked using
100604 - *               ZSTD_isError().
100605 + * Return:        The decompressed size or an error, which can be checked using
100606 + *                zstd_is_error().
100607   */
100608 -size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst,
100609 -       size_t dstCapacity, const void *src, size_t srcSize,
100610 -       const ZSTD_DDict *ddict);
100611 +size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity,
100612 +       const void *src, size_t src_size);
100615 -/*-**************************
100616 - * Streaming
100617 - ***************************/
100618 +/* ======   Streaming Buffers   ====== */
100620  /**
100621 - * struct ZSTD_inBuffer - input buffer for streaming
100622 + * struct zstd_in_buffer - input buffer for streaming
100623   * @src:  Start of the input buffer.
100624   * @size: Size of the input buffer.
100625   * @pos:  Position where reading stopped. Will be updated.
100626   *        Necessarily 0 <= pos <= size.
100628 + * See zstd_lib.h.
100629   */
100630 -typedef struct ZSTD_inBuffer_s {
100631 -       const void *src;
100632 -       size_t size;
100633 -       size_t pos;
100634 -} ZSTD_inBuffer;
100635 +typedef ZSTD_inBuffer zstd_in_buffer;
100637  /**
100638 - * struct ZSTD_outBuffer - output buffer for streaming
100639 + * struct zstd_out_buffer - output buffer for streaming
100640   * @dst:  Start of the output buffer.
100641   * @size: Size of the output buffer.
100642   * @pos:  Position where writing stopped. Will be updated.
100643   *        Necessarily 0 <= pos <= size.
100645 + * See zstd_lib.h.
100646   */
100647 -typedef struct ZSTD_outBuffer_s {
100648 -       void *dst;
100649 -       size_t size;
100650 -       size_t pos;
100651 -} ZSTD_outBuffer;
100652 +typedef ZSTD_outBuffer zstd_out_buffer;
100654 +/* ======   Streaming Compression   ====== */
100657 -/*-*****************************************************************************
100658 - * Streaming compression - HowTo
100660 - * A ZSTD_CStream object is required to track streaming operation.
100661 - * Use ZSTD_initCStream() to initialize a ZSTD_CStream object.
100662 - * ZSTD_CStream objects can be reused multiple times on consecutive compression
100663 - * operations. It is recommended to re-use ZSTD_CStream in situations where many
100664 - * streaming operations will be achieved consecutively. Use one separate
100665 - * ZSTD_CStream per thread for parallel execution.
100667 - * Use ZSTD_compressStream() repetitively to consume input stream.
100668 - * The function will automatically update both `pos` fields.
100669 - * Note that it may not consume the entire input, in which case `pos < size`,
100670 - * and it's up to the caller to present again remaining data.
100671 - * It returns a hint for the preferred number of bytes to use as an input for
100672 - * the next function call.
100674 - * At any moment, it's possible to flush whatever data remains within internal
100675 - * buffer, using ZSTD_flushStream(). `output->pos` will be updated. There might
100676 - * still be some content left within the internal buffer if `output->size` is
100677 - * too small. It returns the number of bytes left in the internal buffer and
100678 - * must be called until it returns 0.
100680 - * ZSTD_endStream() instructs to finish a frame. It will perform a flush and
100681 - * write frame epilogue. The epilogue is required for decoders to consider a
100682 - * frame completed. Similar to ZSTD_flushStream(), it may not be able to flush
100683 - * the full content if `output->size` is too small. In which case, call again
100684 - * ZSTD_endStream() to complete the flush. It returns the number of bytes left
100685 - * in the internal buffer and must be called until it returns 0.
100686 - ******************************************************************************/
100687 +typedef ZSTD_CStream zstd_cstream;
100689  /**
100690 - * ZSTD_CStreamWorkspaceBound() - memory needed to initialize a ZSTD_CStream
100691 - * @cParams: The compression parameters to be used for compression.
100692 + * zstd_cstream_workspace_bound() - memory needed to initialize a zstd_cstream
100693 + * @cparams: The compression parameters to be used for compression.
100694   *
100695   * Return:   A lower bound on the size of the workspace that is passed to
100696 - *           ZSTD_initCStream() and ZSTD_initCStream_usingCDict().
100697 - */
100698 -size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams);
100701 - * struct ZSTD_CStream - the zstd streaming compression context
100702 - */
100703 -typedef struct ZSTD_CStream_s ZSTD_CStream;
100705 -/*===== ZSTD_CStream management functions =====*/
100707 - * ZSTD_initCStream() - initialize a zstd streaming compression context
100708 - * @params:         The zstd compression parameters.
100709 - * @pledgedSrcSize: If params.fParams.contentSizeFlag == 1 then the caller must
100710 - *                  pass the source size (zero means empty source). Otherwise,
100711 - *                  the caller may optionally pass the source size, or zero if
100712 - *                  unknown.
100713 - * @workspace:      The workspace to emplace the context into. It must outlive
100714 - *                  the returned context.
100715 - * @workspaceSize:  The size of workspace.
100716 - *                  Use ZSTD_CStreamWorkspaceBound(params.cParams) to determine
100717 - *                  how large the workspace must be.
100719 - * Return:          The zstd streaming compression context.
100720 + *           zstd_init_cstream().
100721   */
100722 -ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params,
100723 -       unsigned long long pledgedSrcSize, void *workspace,
100724 -       size_t workspaceSize);
100725 +size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams);
100727  /**
100728 - * ZSTD_initCStream_usingCDict() - initialize a streaming compression context
100729 - * @cdict:          The digested dictionary to use for compression.
100730 - * @pledgedSrcSize: Optionally the source size, or zero if unknown.
100731 - * @workspace:      The workspace to emplace the context into. It must outlive
100732 - *                  the returned context.
100733 - * @workspaceSize:  The size of workspace. Call ZSTD_CStreamWorkspaceBound()
100734 - *                  with the cParams used to initialize the cdict to determine
100735 - *                  how large the workspace must be.
100736 + * zstd_init_cstream() - initialize a zstd streaming compression context
100737 + * @parameters        The zstd parameters to use for compression.
100738 + * @pledged_src_size: If params.fParams.contentSizeFlag == 1 then the caller
100739 + *                    must pass the source size (zero means empty source).
100740 + *                    Otherwise, the caller may optionally pass the source
100741 + *                    size, or zero if unknown.
100742 + * @workspace:        The workspace to emplace the context into. It must outlive
100743 + *                    the returned context.
100744 + * @workspace_size:   The size of workspace.
100745 + *                    Use zstd_cstream_workspace_bound(params->cparams) to
100746 + *                    determine how large the workspace must be.
100747   *
100748 - * Return:          The zstd streaming compression context.
100749 + * Return:            The zstd streaming compression context or NULL on error.
100750   */
100751 -ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict,
100752 -       unsigned long long pledgedSrcSize, void *workspace,
100753 -       size_t workspaceSize);
100754 +zstd_cstream *zstd_init_cstream(const zstd_parameters *parameters,
100755 +       unsigned long long pledged_src_size, void *workspace, size_t workspace_size);
100757 -/*===== Streaming compression functions =====*/
100758  /**
100759 - * ZSTD_resetCStream() - reset the context using parameters from creation
100760 - * @zcs:            The zstd streaming compression context to reset.
100761 - * @pledgedSrcSize: Optionally the source size, or zero if unknown.
100762 + * zstd_reset_cstream() - reset the context using parameters from creation
100763 + * @cstream:          The zstd streaming compression context to reset.
100764 + * @pledged_src_size: Optionally the source size, or zero if unknown.
100765   *
100766   * Resets the context using the parameters from creation. Skips dictionary
100767 - * loading, since it can be reused. If `pledgedSrcSize` is non-zero the frame
100768 + * loading, since it can be reused. If `pledged_src_size` is non-zero the frame
100769   * content size is always written into the frame header.
100770   *
100771 - * Return:          Zero or an error, which can be checked using ZSTD_isError().
100772 + * Return:            Zero or an error, which can be checked using
100773 + *                    zstd_is_error().
100774   */
100775 -size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize);
100776 +size_t zstd_reset_cstream(zstd_cstream *cstream,
100777 +       unsigned long long pledged_src_size);
100779  /**
100780 - * ZSTD_compressStream() - streaming compress some of input into output
100781 - * @zcs:    The zstd streaming compression context.
100782 - * @output: Destination buffer. `output->pos` is updated to indicate how much
100783 - *          compressed data was written.
100784 - * @input:  Source buffer. `input->pos` is updated to indicate how much data was
100785 - *          read. Note that it may not consume the entire input, in which case
100786 - *          `input->pos < input->size`, and it's up to the caller to present
100787 - *          remaining data again.
100788 + * zstd_compress_stream() - streaming compress some of input into output
100789 + * @cstream: The zstd streaming compression context.
100790 + * @output:  Destination buffer. `output->pos` is updated to indicate how much
100791 + *           compressed data was written.
100792 + * @input:   Source buffer. `input->pos` is updated to indicate how much data
100793 + *           was read. Note that it may not consume the entire input, in which
100794 + *           case `input->pos < input->size`, and it's up to the caller to
100795 + *           present remaining data again.
100796   *
100797   * The `input` and `output` buffers may be any size. Guaranteed to make some
100798   * forward progress if `input` and `output` are not empty.
100799   *
100800 - * Return:  A hint for the number of bytes to use as the input for the next
100801 - *          function call or an error, which can be checked using
100802 - *          ZSTD_isError().
100803 + * Return:   A hint for the number of bytes to use as the input for the next
100804 + *           function call or an error, which can be checked using
100805 + *           zstd_is_error().
100806   */
100807 -size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output,
100808 -       ZSTD_inBuffer *input);
100809 +size_t zstd_compress_stream(zstd_cstream *cstream, zstd_out_buffer *output,
100810 +       zstd_in_buffer *input);
100812  /**
100813 - * ZSTD_flushStream() - flush internal buffers into output
100814 - * @zcs:    The zstd streaming compression context.
100815 - * @output: Destination buffer. `output->pos` is updated to indicate how much
100816 - *          compressed data was written.
100817 + * zstd_flush_stream() - flush internal buffers into output
100818 + * @cstream: The zstd streaming compression context.
100819 + * @output:  Destination buffer. `output->pos` is updated to indicate how much
100820 + *           compressed data was written.
100821   *
100822 - * ZSTD_flushStream() must be called until it returns 0, meaning all the data
100823 - * has been flushed. Since ZSTD_flushStream() causes a block to be ended,
100824 + * zstd_flush_stream() must be called until it returns 0, meaning all the data
100825 + * has been flushed. Since zstd_flush_stream() causes a block to be ended,
100826   * calling it too often will degrade the compression ratio.
100827   *
100828 - * Return:  The number of bytes still present within internal buffers or an
100829 - *          error, which can be checked using ZSTD_isError().
100830 + * Return:   The number of bytes still present within internal buffers or an
100831 + *           error, which can be checked using zstd_is_error().
100832   */
100833 -size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output);
100835 - * ZSTD_endStream() - flush internal buffers into output and end the frame
100836 - * @zcs:    The zstd streaming compression context.
100837 - * @output: Destination buffer. `output->pos` is updated to indicate how much
100838 - *          compressed data was written.
100840 - * ZSTD_endStream() must be called until it returns 0, meaning all the data has
100841 - * been flushed and the frame epilogue has been written.
100843 - * Return:  The number of bytes still present within internal buffers or an
100844 - *          error, which can be checked using ZSTD_isError().
100845 - */
100846 -size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output);
100847 +size_t zstd_flush_stream(zstd_cstream *cstream, zstd_out_buffer *output);
100849  /**
100850 - * ZSTD_CStreamInSize() - recommended size for the input buffer
100852 - * Return: The recommended size for the input buffer.
100853 - */
100854 -size_t ZSTD_CStreamInSize(void);
100856 - * ZSTD_CStreamOutSize() - recommended size for the output buffer
100857 + * zstd_end_stream() - flush internal buffers into output and end the frame
100858 + * @cstream: The zstd streaming compression context.
100859 + * @output:  Destination buffer. `output->pos` is updated to indicate how much
100860 + *           compressed data was written.
100861   *
100862 - * When the output buffer is at least this large, it is guaranteed to be large
100863 - * enough to flush at least one complete compressed block.
100864 + * zstd_end_stream() must be called until it returns 0, meaning all the data has
100865 + * been flushed and the frame epilogue has been written.
100866   *
100867 - * Return: The recommended size for the output buffer.
100868 + * Return:   The number of bytes still present within internal buffers or an
100869 + *           error, which can be checked using zstd_is_error().
100870   */
100871 -size_t ZSTD_CStreamOutSize(void);
100872 +size_t zstd_end_stream(zstd_cstream *cstream, zstd_out_buffer *output);
100874 +/* ======   Streaming Decompression   ====== */
100877 -/*-*****************************************************************************
100878 - * Streaming decompression - HowTo
100880 - * A ZSTD_DStream object is required to track streaming operations.
100881 - * Use ZSTD_initDStream() to initialize a ZSTD_DStream object.
100882 - * ZSTD_DStream objects can be re-used multiple times.
100884 - * Use ZSTD_decompressStream() repetitively to consume your input.
100885 - * The function will update both `pos` fields.
100886 - * If `input->pos < input->size`, some input has not been consumed.
100887 - * It's up to the caller to present again remaining data.
100888 - * If `output->pos < output->size`, decoder has flushed everything it could.
100889 - * Returns 0 iff a frame is completely decoded and fully flushed.
100890 - * Otherwise it returns a suggested next input size that will never load more
100891 - * than the current frame.
100892 - ******************************************************************************/
100893 +typedef ZSTD_DStream zstd_dstream;
100895  /**
100896 - * ZSTD_DStreamWorkspaceBound() - memory needed to initialize a ZSTD_DStream
100897 - * @maxWindowSize: The maximum window size allowed for compressed frames.
100898 + * zstd_dstream_workspace_bound() - memory needed to initialize a zstd_dstream
100899 + * @max_window_size: The maximum window size allowed for compressed frames.
100900   *
100901 - * Return:         A lower bound on the size of the workspace that is passed to
100902 - *                 ZSTD_initDStream() and ZSTD_initDStream_usingDDict().
100903 + * Return:           A lower bound on the size of the workspace that is passed
100904 + *                   to zstd_init_dstream().
100905   */
100906 -size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize);
100907 +size_t zstd_dstream_workspace_bound(size_t max_window_size);
100909  /**
100910 - * struct ZSTD_DStream - the zstd streaming decompression context
100911 - */
100912 -typedef struct ZSTD_DStream_s ZSTD_DStream;
100913 -/*===== ZSTD_DStream management functions =====*/
100915 - * ZSTD_initDStream() - initialize a zstd streaming decompression context
100916 - * @maxWindowSize: The maximum window size allowed for compressed frames.
100917 - * @workspace:     The workspace to emplace the context into. It must outlive
100918 - *                 the returned context.
100919 - * @workspaceSize: The size of workspace.
100920 - *                 Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine
100921 - *                 how large the workspace must be.
100923 - * Return:         The zstd streaming decompression context.
100924 - */
100925 -ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace,
100926 -       size_t workspaceSize);
100928 - * ZSTD_initDStream_usingDDict() - initialize streaming decompression context
100929 - * @maxWindowSize: The maximum window size allowed for compressed frames.
100930 - * @ddict:         The digested dictionary to use for decompression.
100931 - * @workspace:     The workspace to emplace the context into. It must outlive
100932 - *                 the returned context.
100933 - * @workspaceSize: The size of workspace.
100934 - *                 Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine
100935 - *                 how large the workspace must be.
100936 + * zstd_init_dstream() - initialize a zstd streaming decompression context
100937 + * @max_window_size: The maximum window size allowed for compressed frames.
100938 + * @workspace:       The workspace to emplace the context into. It must outlive
100939 + *                   the returned context.
100940 + * @workspaceSize:   The size of workspace.
100941 + *                   Use zstd_dstream_workspace_bound(max_window_size) to
100942 + *                   determine how large the workspace must be.
100943   *
100944 - * Return:         The zstd streaming decompression context.
100945 + * Return:           The zstd streaming decompression context.
100946   */
100947 -ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize,
100948 -       const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize);
100949 +zstd_dstream *zstd_init_dstream(size_t max_window_size, void *workspace,
100950 +       size_t workspace_size);
100952 -/*===== Streaming decompression functions =====*/
100953  /**
100954 - * ZSTD_resetDStream() - reset the context using parameters from creation
100955 - * @zds:   The zstd streaming decompression context to reset.
100956 + * zstd_reset_dstream() - reset the context using parameters from creation
100957 + * @dstream: The zstd streaming decompression context to reset.
100958   *
100959   * Resets the context using the parameters from creation. Skips dictionary
100960   * loading, since it can be reused.
100961   *
100962 - * Return: Zero or an error, which can be checked using ZSTD_isError().
100963 + * Return:   Zero or an error, which can be checked using zstd_is_error().
100964   */
100965 -size_t ZSTD_resetDStream(ZSTD_DStream *zds);
100966 +size_t zstd_reset_dstream(zstd_dstream *dstream);
100968  /**
100969 - * ZSTD_decompressStream() - streaming decompress some of input into output
100970 - * @zds:    The zstd streaming decompression context.
100971 - * @output: Destination buffer. `output.pos` is updated to indicate how much
100972 - *          decompressed data was written.
100973 - * @input:  Source buffer. `input.pos` is updated to indicate how much data was
100974 - *          read. Note that it may not consume the entire input, in which case
100975 - *          `input.pos < input.size`, and it's up to the caller to present
100976 - *          remaining data again.
100977 + * zstd_decompress_stream() - streaming decompress some of input into output
100978 + * @dstream: The zstd streaming decompression context.
100979 + * @output:  Destination buffer. `output.pos` is updated to indicate how much
100980 + *           decompressed data was written.
100981 + * @input:   Source buffer. `input.pos` is updated to indicate how much data was
100982 + *           read. Note that it may not consume the entire input, in which case
100983 + *           `input.pos < input.size`, and it's up to the caller to present
100984 + *           remaining data again.
100985   *
100986   * The `input` and `output` buffers may be any size. Guaranteed to make some
100987   * forward progress if `input` and `output` are not empty.
100988 - * ZSTD_decompressStream() will not consume the last byte of the frame until
100989 + * zstd_decompress_stream() will not consume the last byte of the frame until
100990   * the entire frame is flushed.
100991   *
100992 - * Return:  Returns 0 iff a frame is completely decoded and fully flushed.
100993 - *          Otherwise returns a hint for the number of bytes to use as the input
100994 - *          for the next function call or an error, which can be checked using
100995 - *          ZSTD_isError(). The size hint will never load more than the frame.
100996 + * Return:   Returns 0 iff a frame is completely decoded and fully flushed.
100997 + *           Otherwise returns a hint for the number of bytes to use as the
100998 + *           input for the next function call or an error, which can be checked
100999 + *           using zstd_is_error(). The size hint will never load more than the
101000 + *           frame.
101001   */
101002 -size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output,
101003 -       ZSTD_inBuffer *input);
101004 +size_t zstd_decompress_stream(zstd_dstream *dstream, zstd_out_buffer *output,
101005 +       zstd_in_buffer *input);
101008 - * ZSTD_DStreamInSize() - recommended size for the input buffer
101010 - * Return: The recommended size for the input buffer.
101011 - */
101012 -size_t ZSTD_DStreamInSize(void);
101014 - * ZSTD_DStreamOutSize() - recommended size for the output buffer
101016 - * When the output buffer is at least this large, it is guaranteed to be large
101017 - * enough to flush at least one complete decompressed block.
101019 - * Return: The recommended size for the output buffer.
101020 - */
101021 -size_t ZSTD_DStreamOutSize(void);
101024 -/* --- Constants ---*/
101025 -#define ZSTD_MAGICNUMBER            0xFD2FB528   /* >= v0.8.0 */
101026 -#define ZSTD_MAGIC_SKIPPABLE_START  0x184D2A50U
101028 -#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
101029 -#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
101031 -#define ZSTD_WINDOWLOG_MAX_32  27
101032 -#define ZSTD_WINDOWLOG_MAX_64  27
101033 -#define ZSTD_WINDOWLOG_MAX \
101034 -       ((unsigned int)(sizeof(size_t) == 4 \
101035 -               ? ZSTD_WINDOWLOG_MAX_32 \
101036 -               : ZSTD_WINDOWLOG_MAX_64))
101037 -#define ZSTD_WINDOWLOG_MIN 10
101038 -#define ZSTD_HASHLOG_MAX ZSTD_WINDOWLOG_MAX
101039 -#define ZSTD_HASHLOG_MIN        6
101040 -#define ZSTD_CHAINLOG_MAX     (ZSTD_WINDOWLOG_MAX+1)
101041 -#define ZSTD_CHAINLOG_MIN      ZSTD_HASHLOG_MIN
101042 -#define ZSTD_HASHLOG3_MAX      17
101043 -#define ZSTD_SEARCHLOG_MAX    (ZSTD_WINDOWLOG_MAX-1)
101044 -#define ZSTD_SEARCHLOG_MIN      1
101045 -/* only for ZSTD_fast, other strategies are limited to 6 */
101046 -#define ZSTD_SEARCHLENGTH_MAX   7
101047 -/* only for ZSTD_btopt, other strategies are limited to 4 */
101048 -#define ZSTD_SEARCHLENGTH_MIN   3
101049 -#define ZSTD_TARGETLENGTH_MIN   4
101050 -#define ZSTD_TARGETLENGTH_MAX 999
101052 -/* for static allocation */
101053 -#define ZSTD_FRAMEHEADERSIZE_MAX 18
101054 -#define ZSTD_FRAMEHEADERSIZE_MIN  6
101055 -#define ZSTD_frameHeaderSize_prefix 5
101056 -#define ZSTD_frameHeaderSize_min ZSTD_FRAMEHEADERSIZE_MIN
101057 -#define ZSTD_frameHeaderSize_max ZSTD_FRAMEHEADERSIZE_MAX
101058 -/* magic number + skippable frame length */
101059 -#define ZSTD_skippableHeaderSize 8
101062 -/*-*************************************
101063 - * Compressed size functions
101064 - **************************************/
101067 - * ZSTD_findFrameCompressedSize() - returns the size of a compressed frame
101068 - * @src:     Source buffer. It should point to the start of a zstd encoded frame
101069 - *           or a skippable frame.
101070 - * @srcSize: The size of the source buffer. It must be at least as large as the
101071 - *           size of the frame.
101073 - * Return:   The compressed size of the frame pointed to by `src` or an error,
101074 - *           which can be check with ZSTD_isError().
101075 - *           Suitable to pass to ZSTD_decompress() or similar functions.
101076 - */
101077 -size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize);
101079 -/*-*************************************
101080 - * Decompressed size functions
101081 - **************************************/
101083 - * ZSTD_getFrameContentSize() - returns the content size in a zstd frame header
101084 - * @src:     It should point to the start of a zstd encoded frame.
101085 - * @srcSize: The size of the source buffer. It must be at least as large as the
101086 - *           frame header. `ZSTD_frameHeaderSize_max` is always large enough.
101088 - * Return:   The frame content size stored in the frame header if known.
101089 - *           `ZSTD_CONTENTSIZE_UNKNOWN` if the content size isn't stored in the
101090 - *           frame header. `ZSTD_CONTENTSIZE_ERROR` on invalid input.
101091 - */
101092 -unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
101093 +/* ======   Frame Inspection Functions ====== */
101095  /**
101096 - * ZSTD_findDecompressedSize() - returns decompressed size of a series of frames
101097 - * @src:     It should point to the start of a series of zstd encoded and/or
101098 - *           skippable frames.
101099 - * @srcSize: The exact size of the series of frames.
101100 + * zstd_find_frame_compressed_size() - returns the size of a compressed frame
101101 + * @src:      Source buffer. It should point to the start of a zstd encoded
101102 + *            frame or a skippable frame.
101103 + * @src_size: The size of the source buffer. It must be at least as large as the
101104 + *            size of the frame.
101105   *
101106 - * If any zstd encoded frame in the series doesn't have the frame content size
101107 - * set, `ZSTD_CONTENTSIZE_UNKNOWN` is returned. But frame content size is always
101108 - * set when using ZSTD_compress(). The decompressed size can be very large.
101109 - * If the source is untrusted, the decompressed size could be wrong or
101110 - * intentionally modified. Always ensure the result fits within the
101111 - * application's authorized limits. ZSTD_findDecompressedSize() handles multiple
101112 - * frames, and so it must traverse the input to read each frame header. This is
101113 - * efficient as most of the data is skipped, however it does mean that all frame
101114 - * data must be present and valid.
101116 - * Return:   Decompressed size of all the data contained in the frames if known.
101117 - *           `ZSTD_CONTENTSIZE_UNKNOWN` if the decompressed size is unknown.
101118 - *           `ZSTD_CONTENTSIZE_ERROR` if an error occurred.
101119 - */
101120 -unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize);
101122 -/*-*************************************
101123 - * Advanced compression functions
101124 - **************************************/
101126 - * ZSTD_checkCParams() - ensure parameter values remain within authorized range
101127 - * @cParams: The zstd compression parameters.
101129 - * Return:   Zero or an error, which can be checked using ZSTD_isError().
101130 + * Return:    The compressed size of the frame pointed to by `src` or an error,
101131 + *            which can be check with zstd_is_error().
101132 + *            Suitable to pass to ZSTD_decompress() or similar functions.
101133   */
101134 -size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams);
101135 +size_t zstd_find_frame_compressed_size(const void *src, size_t src_size);
101137  /**
101138 - * ZSTD_adjustCParams() - optimize parameters for a given srcSize and dictSize
101139 - * @srcSize:  Optionally the estimated source size, or zero if unknown.
101140 - * @dictSize: Optionally the estimated dictionary size, or zero if unknown.
101142 - * Return:    The optimized parameters.
101143 - */
101144 -ZSTD_compressionParameters ZSTD_adjustCParams(
101145 -       ZSTD_compressionParameters cParams, unsigned long long srcSize,
101146 -       size_t dictSize);
101148 -/*--- Advanced decompression functions ---*/
101151 - * ZSTD_isFrame() - returns true iff the buffer starts with a valid frame
101152 - * @buffer: The source buffer to check.
101153 - * @size:   The size of the source buffer, must be at least 4 bytes.
101155 - * Return: True iff the buffer starts with a zstd or skippable frame identifier.
101156 - */
101157 -unsigned int ZSTD_isFrame(const void *buffer, size_t size);
101160 - * ZSTD_getDictID_fromDict() - returns the dictionary id stored in a dictionary
101161 - * @dict:     The dictionary buffer.
101162 - * @dictSize: The size of the dictionary buffer.
101164 - * Return:    The dictionary id stored within the dictionary or 0 if the
101165 - *            dictionary is not a zstd dictionary. If it returns 0 the
101166 - *            dictionary can still be loaded as a content-only dictionary.
101167 - */
101168 -unsigned int ZSTD_getDictID_fromDict(const void *dict, size_t dictSize);
101171 - * ZSTD_getDictID_fromDDict() - returns the dictionary id stored in a ZSTD_DDict
101172 - * @ddict: The ddict to find the id of.
101174 - * Return: The dictionary id stored within `ddict` or 0 if the dictionary is not
101175 - *         a zstd dictionary. If it returns 0 `ddict` will be loaded as a
101176 - *         content-only dictionary.
101177 - */
101178 -unsigned int ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict);
101181 - * ZSTD_getDictID_fromFrame() - returns the dictionary id stored in a zstd frame
101182 - * @src:     Source buffer. It must be a zstd encoded frame.
101183 - * @srcSize: The size of the source buffer. It must be at least as large as the
101184 - *           frame header. `ZSTD_frameHeaderSize_max` is always large enough.
101186 - * Return:   The dictionary id required to decompress the frame stored within
101187 - *           `src` or 0 if the dictionary id could not be decoded. It can return
101188 - *           0 if the frame does not require a dictionary, the dictionary id
101189 - *           wasn't stored in the frame, `src` is not a zstd frame, or `srcSize`
101190 - *           is too small.
101191 - */
101192 -unsigned int ZSTD_getDictID_fromFrame(const void *src, size_t srcSize);
101195 - * struct ZSTD_frameParams - zstd frame parameters stored in the frame header
101196 - * @frameContentSize: The frame content size, or 0 if not present.
101197 + * struct zstd_frame_params - zstd frame parameters stored in the frame header
101198 + * @frameContentSize: The frame content size, or ZSTD_CONTENTSIZE_UNKNOWN if not
101199 + *                    present.
101200   * @windowSize:       The window size, or 0 if the frame is a skippable frame.
101201 + * @blockSizeMax:     The maximum block size.
101202 + * @frameType:        The frame type (zstd or skippable)
101203 + * @headerSize:       The size of the frame header.
101204   * @dictID:           The dictionary id, or 0 if not present.
101205   * @checksumFlag:     Whether a checksum was used.
101207 + * See zstd_lib.h.
101208   */
101209 -typedef struct {
101210 -       unsigned long long frameContentSize;
101211 -       unsigned int windowSize;
101212 -       unsigned int dictID;
101213 -       unsigned int checksumFlag;
101214 -} ZSTD_frameParams;
101215 +typedef ZSTD_frameHeader zstd_frame_header;
101217  /**
101218 - * ZSTD_getFrameParams() - extracts parameters from a zstd or skippable frame
101219 - * @fparamsPtr: On success the frame parameters are written here.
101220 - * @src:        The source buffer. It must point to a zstd or skippable frame.
101221 - * @srcSize:    The size of the source buffer. `ZSTD_frameHeaderSize_max` is
101222 - *              always large enough to succeed.
101223 + * zstd_get_frame_header() - extracts parameters from a zstd or skippable frame
101224 + * @params:   On success the frame parameters are written here.
101225 + * @src:      The source buffer. It must point to a zstd or skippable frame.
101226 + * @src_size: The size of the source buffer.
101227   *
101228 - * Return:      0 on success. If more data is required it returns how many bytes
101229 - *              must be provided to make forward progress. Otherwise it returns
101230 - *              an error, which can be checked using ZSTD_isError().
101231 + * Return:    0 on success. If more data is required it returns how many bytes
101232 + *            must be provided to make forward progress. Otherwise it returns
101233 + *            an error, which can be checked using zstd_is_error().
101234   */
101235 -size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src,
101236 -       size_t srcSize);
101238 -/*-*****************************************************************************
101239 - * Buffer-less and synchronous inner streaming functions
101241 - * This is an advanced API, giving full control over buffer management, for
101242 - * users which need direct control over memory.
101243 - * But it's also a complex one, with many restrictions (documented below).
101244 - * Prefer using normal streaming API for an easier experience
101245 - ******************************************************************************/
101247 -/*-*****************************************************************************
101248 - * Buffer-less streaming compression (synchronous mode)
101250 - * A ZSTD_CCtx object is required to track streaming operations.
101251 - * Use ZSTD_initCCtx() to initialize a context.
101252 - * ZSTD_CCtx object can be re-used multiple times within successive compression
101253 - * operations.
101255 - * Start by initializing a context.
101256 - * Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary
101257 - * compression,
101258 - * or ZSTD_compressBegin_advanced(), for finer parameter control.
101259 - * It's also possible to duplicate a reference context which has already been
101260 - * initialized, using ZSTD_copyCCtx()
101262 - * Then, consume your input using ZSTD_compressContinue().
101263 - * There are some important considerations to keep in mind when using this
101264 - * advanced function :
101265 - * - ZSTD_compressContinue() has no internal buffer. It uses externally provided
101266 - *   buffer only.
101267 - * - Interface is synchronous : input is consumed entirely and produce 1+
101268 - *   (or more) compressed blocks.
101269 - * - Caller must ensure there is enough space in `dst` to store compressed data
101270 - *   under worst case scenario. Worst case evaluation is provided by
101271 - *   ZSTD_compressBound().
101272 - *   ZSTD_compressContinue() doesn't guarantee recover after a failed
101273 - *   compression.
101274 - * - ZSTD_compressContinue() presumes prior input ***is still accessible and
101275 - *   unmodified*** (up to maximum distance size, see WindowLog).
101276 - *   It remembers all previous contiguous blocks, plus one separated memory
101277 - *   segment (which can itself consists of multiple contiguous blocks)
101278 - * - ZSTD_compressContinue() detects that prior input has been overwritten when
101279 - *   `src` buffer overlaps. In which case, it will "discard" the relevant memory
101280 - *   section from its history.
101282 - * Finish a frame with ZSTD_compressEnd(), which will write the last block(s)
101283 - * and optional checksum. It's possible to use srcSize==0, in which case, it
101284 - * will write a final empty block to end the frame. Without last block mark,
101285 - * frames will be considered unfinished (corrupted) by decoders.
101287 - * `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress some new
101288 - * frame.
101289 - ******************************************************************************/
101291 -/*=====   Buffer-less streaming compression functions  =====*/
101292 -size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel);
101293 -size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict,
101294 -       size_t dictSize, int compressionLevel);
101295 -size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict,
101296 -       size_t dictSize, ZSTD_parameters params,
101297 -       unsigned long long pledgedSrcSize);
101298 -size_t ZSTD_copyCCtx(ZSTD_CCtx *cctx, const ZSTD_CCtx *preparedCCtx,
101299 -       unsigned long long pledgedSrcSize);
101300 -size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict,
101301 -       unsigned long long pledgedSrcSize);
101302 -size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
101303 -       const void *src, size_t srcSize);
101304 -size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
101305 -       const void *src, size_t srcSize);
101309 -/*-*****************************************************************************
101310 - * Buffer-less streaming decompression (synchronous mode)
101312 - * A ZSTD_DCtx object is required to track streaming operations.
101313 - * Use ZSTD_initDCtx() to initialize a context.
101314 - * A ZSTD_DCtx object can be re-used multiple times.
101316 - * First typical operation is to retrieve frame parameters, using
101317 - * ZSTD_getFrameParams(). It fills a ZSTD_frameParams structure which provide
101318 - * important information to correctly decode the frame, such as the minimum
101319 - * rolling buffer size to allocate to decompress data (`windowSize`), and the
101320 - * dictionary ID used.
101321 - * Note: content size is optional, it may not be present. 0 means unknown.
101322 - * Note that these values could be wrong, either because of data malformation,
101323 - * or because an attacker is spoofing deliberate false information. As a
101324 - * consequence, check that values remain within valid application range,
101325 - * especially `windowSize`, before allocation. Each application can set its own
101326 - * limit, depending on local restrictions. For extended interoperability, it is
101327 - * recommended to support at least 8 MB.
101328 - * Frame parameters are extracted from the beginning of the compressed frame.
101329 - * Data fragment must be large enough to ensure successful decoding, typically
101330 - * `ZSTD_frameHeaderSize_max` bytes.
101331 - * Result: 0: successful decoding, the `ZSTD_frameParams` structure is filled.
101332 - *        >0: `srcSize` is too small, provide at least this many bytes.
101333 - *        errorCode, which can be tested using ZSTD_isError().
101335 - * Start decompression, with ZSTD_decompressBegin() or
101336 - * ZSTD_decompressBegin_usingDict(). Alternatively, you can copy a prepared
101337 - * context, using ZSTD_copyDCtx().
101339 - * Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue()
101340 - * alternatively.
101341 - * ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize'
101342 - * to ZSTD_decompressContinue().
101343 - * ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will
101344 - * fail.
101346 - * The result of ZSTD_decompressContinue() is the number of bytes regenerated
101347 - * within 'dst' (necessarily <= dstCapacity). It can be zero, which is not an
101348 - * error; it just means ZSTD_decompressContinue() has decoded some metadata
101349 - * item. It can also be an error code, which can be tested with ZSTD_isError().
101351 - * ZSTD_decompressContinue() needs previous data blocks during decompression, up
101352 - * to `windowSize`. They should preferably be located contiguously, prior to
101353 - * current block. Alternatively, a round buffer of sufficient size is also
101354 - * possible. Sufficient size is determined by frame parameters.
101355 - * ZSTD_decompressContinue() is very sensitive to contiguity, if 2 blocks don't
101356 - * follow each other, make sure that either the compressor breaks contiguity at
101357 - * the same place, or that previous contiguous segment is large enough to
101358 - * properly handle maximum back-reference.
101360 - * A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
101361 - * Context can then be reset to start a new decompression.
101363 - * Note: it's possible to know if next input to present is a header or a block,
101364 - * using ZSTD_nextInputType(). This information is not required to properly
101365 - * decode a frame.
101367 - * == Special case: skippable frames ==
101369 - * Skippable frames allow integration of user-defined data into a flow of
101370 - * concatenated frames. Skippable frames will be ignored (skipped) by a
101371 - * decompressor. The format of skippable frames is as follows:
101372 - * a) Skippable frame ID - 4 Bytes, Little endian format, any value from
101373 - *    0x184D2A50 to 0x184D2A5F
101374 - * b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
101375 - * c) Frame Content - any content (User Data) of length equal to Frame Size
101376 - * For skippable frames ZSTD_decompressContinue() always returns 0.
101377 - * For skippable frames ZSTD_getFrameParams() returns fparamsPtr->windowLog==0
101378 - * what means that a frame is skippable.
101379 - * Note: If fparamsPtr->frameContentSize==0, it is ambiguous: the frame might
101380 - *       actually be a zstd encoded frame with no content. For purposes of
101381 - *       decompression, it is valid in both cases to skip the frame using
101382 - *       ZSTD_findFrameCompressedSize() to find its size in bytes.
101383 - * It also returns frame size as fparamsPtr->frameContentSize.
101384 - ******************************************************************************/
101386 -/*=====   Buffer-less streaming decompression functions  =====*/
101387 -size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx);
101388 -size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict,
101389 -       size_t dictSize);
101390 -void   ZSTD_copyDCtx(ZSTD_DCtx *dctx, const ZSTD_DCtx *preparedDCtx);
101391 -size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx);
101392 -size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity,
101393 -       const void *src, size_t srcSize);
101394 -typedef enum {
101395 -       ZSTDnit_frameHeader,
101396 -       ZSTDnit_blockHeader,
101397 -       ZSTDnit_block,
101398 -       ZSTDnit_lastBlock,
101399 -       ZSTDnit_checksum,
101400 -       ZSTDnit_skippableFrame
101401 -} ZSTD_nextInputType_e;
101402 -ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx);
101404 -/*-*****************************************************************************
101405 - * Block functions
101407 - * Block functions produce and decode raw zstd blocks, without frame metadata.
101408 - * Frame metadata cost is typically ~18 bytes, which can be non-negligible for
101409 - * very small blocks (< 100 bytes). User will have to take in charge required
101410 - * information to regenerate data, such as compressed and content sizes.
101412 - * A few rules to respect:
101413 - * - Compressing and decompressing require a context structure
101414 - *   + Use ZSTD_initCCtx() and ZSTD_initDCtx()
101415 - * - It is necessary to init context before starting
101416 - *   + compression : ZSTD_compressBegin()
101417 - *   + decompression : ZSTD_decompressBegin()
101418 - *   + variants _usingDict() are also allowed
101419 - *   + copyCCtx() and copyDCtx() work too
101420 - * - Block size is limited, it must be <= ZSTD_getBlockSizeMax()
101421 - *   + If you need to compress more, cut data into multiple blocks
101422 - *   + Consider using the regular ZSTD_compress() instead, as frame metadata
101423 - *     costs become negligible when source size is large.
101424 - * - When a block is considered not compressible enough, ZSTD_compressBlock()
101425 - *   result will be zero. In which case, nothing is produced into `dst`.
101426 - *   + User must test for such outcome and deal directly with uncompressed data
101427 - *   + ZSTD_decompressBlock() doesn't accept uncompressed data as input!!!
101428 - *   + In case of multiple successive blocks, decoder must be informed of
101429 - *     uncompressed block existence to follow proper history. Use
101430 - *     ZSTD_insertBlock() in such a case.
101431 - ******************************************************************************/
101433 -/* Define for static allocation */
101434 -#define ZSTD_BLOCKSIZE_ABSOLUTEMAX (128 * 1024)
101435 -/*=====   Raw zstd block functions  =====*/
101436 -size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx);
101437 -size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
101438 -       const void *src, size_t srcSize);
101439 -size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity,
101440 -       const void *src, size_t srcSize);
101441 -size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart,
101442 -       size_t blockSize);
101443 +size_t zstd_get_frame_header(zstd_frame_header *params, const void *src,
101444 +       size_t src_size);
101446 -#endif  /* ZSTD_H */
101447 +#endif  /* LINUX_ZSTD_H */
101448 diff --git a/include/linux/zstd_errors.h b/include/linux/zstd_errors.h
101449 new file mode 100644
101450 index 000000000000..ccb92064ef03
101451 --- /dev/null
101452 +++ b/include/linux/zstd_errors.h
101453 @@ -0,0 +1,77 @@
101455 + * Copyright (c) Yann Collet, Facebook, Inc.
101456 + * All rights reserved.
101458 + * This source code is licensed under both the BSD-style license (found in the
101459 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
101460 + * in the COPYING file in the root directory of this source tree).
101461 + * You may select, at your option, one of the above-listed licenses.
101462 + */
101464 +#ifndef ZSTD_ERRORS_H_398273423
101465 +#define ZSTD_ERRORS_H_398273423
101468 +/*===== dependency =====*/
101469 +#include <linux/types.h>   /* size_t */
101472 +/* =====   ZSTDERRORLIB_API : control library symbols visibility   ===== */
101473 +#define ZSTDERRORLIB_VISIBILITY
101474 +#define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY
101476 +/*-*********************************************
101477 + *  Error codes list
101478 + *-*********************************************
101479 + *  Error codes _values_ are pinned down since v1.3.1 only.
101480 + *  Therefore, don't rely on values if you may link to any version < v1.3.1.
101482 + *  Only values < 100 are considered stable.
101484 + *  note 1 : this API shall be used with static linking only.
101485 + *           dynamic linking is not yet officially supported.
101486 + *  note 2 : Prefer relying on the enum than on its value whenever possible
101487 + *           This is the only supported way to use the error list < v1.3.1
101488 + *  note 3 : ZSTD_isError() is always correct, whatever the library version.
101489 + **********************************************/
101490 +typedef enum {
101491 +  ZSTD_error_no_error = 0,
101492 +  ZSTD_error_GENERIC  = 1,
101493 +  ZSTD_error_prefix_unknown                = 10,
101494 +  ZSTD_error_version_unsupported           = 12,
101495 +  ZSTD_error_frameParameter_unsupported    = 14,
101496 +  ZSTD_error_frameParameter_windowTooLarge = 16,
101497 +  ZSTD_error_corruption_detected = 20,
101498 +  ZSTD_error_checksum_wrong      = 22,
101499 +  ZSTD_error_dictionary_corrupted      = 30,
101500 +  ZSTD_error_dictionary_wrong          = 32,
101501 +  ZSTD_error_dictionaryCreation_failed = 34,
101502 +  ZSTD_error_parameter_unsupported   = 40,
101503 +  ZSTD_error_parameter_outOfBound    = 42,
101504 +  ZSTD_error_tableLog_tooLarge       = 44,
101505 +  ZSTD_error_maxSymbolValue_tooLarge = 46,
101506 +  ZSTD_error_maxSymbolValue_tooSmall = 48,
101507 +  ZSTD_error_stage_wrong       = 60,
101508 +  ZSTD_error_init_missing      = 62,
101509 +  ZSTD_error_memory_allocation = 64,
101510 +  ZSTD_error_workSpace_tooSmall= 66,
101511 +  ZSTD_error_dstSize_tooSmall = 70,
101512 +  ZSTD_error_srcSize_wrong    = 72,
101513 +  ZSTD_error_dstBuffer_null   = 74,
101514 +  /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */
101515 +  ZSTD_error_frameIndex_tooLarge = 100,
101516 +  ZSTD_error_seekableIO          = 102,
101517 +  ZSTD_error_dstBuffer_wrong     = 104,
101518 +  ZSTD_error_srcBuffer_wrong     = 105,
101519 +  ZSTD_error_maxCode = 120  /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
101520 +} ZSTD_ErrorCode;
101522 +/*! ZSTD_getErrorCode() :
101523 +    convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
101524 +    which can be used to compare with enum list published above */
101525 +ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
101526 +ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code);   /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */
101530 +#endif /* ZSTD_ERRORS_H_398273423 */
101531 diff --git a/include/linux/zstd_lib.h b/include/linux/zstd_lib.h
101532 new file mode 100644
101533 index 000000000000..d81779076217
101534 --- /dev/null
101535 +++ b/include/linux/zstd_lib.h
101536 @@ -0,0 +1,2431 @@
101538 + * Copyright (c) Yann Collet, Facebook, Inc.
101539 + * All rights reserved.
101541 + * This source code is licensed under both the BSD-style license (found in the
101542 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
101543 + * in the COPYING file in the root directory of this source tree).
101544 + * You may select, at your option, one of the above-listed licenses.
101545 + */
101547 +#ifndef ZSTD_H_235446
101548 +#define ZSTD_H_235446
101550 +/* ======   Dependency   ======*/
101551 +#include <linux/limits.h>   /* INT_MAX */
101552 +#include <linux/types.h>   /* size_t */
101555 +/* =====   ZSTDLIB_API : control library symbols visibility   ===== */
101556 +#define ZSTDLIB_VISIBILITY
101557 +#define ZSTDLIB_API ZSTDLIB_VISIBILITY
101560 +/*******************************************************************************
101561 +  Introduction
101563 +  zstd, short for Zstandard, is a fast lossless compression algorithm, targeting
101564 +  real-time compression scenarios at zlib-level and better compression ratios.
101565 +  The zstd compression library provides in-memory compression and decompression
101566 +  functions.
101568 +  The library supports regular compression levels from 1 up to ZSTD_maxCLevel(),
101569 +  which is currently 22. Levels >= 20, labeled `--ultra`, should be used with
101570 +  caution, as they require more memory. The library also offers negative
101571 +  compression levels, which extend the range of speed vs. ratio preferences.
101572 +  The lower the level, the faster the speed (at the cost of compression).
101574 +  Compression can be done in:
101575 +    - a single step (described as Simple API)
101576 +    - a single step, reusing a context (described as Explicit context)
101577 +    - unbounded multiple steps (described as Streaming compression)
101579 +  The compression ratio achievable on small data can be highly improved using
101580 +  a dictionary. Dictionary compression can be performed in:
101581 +    - a single step (described as Simple dictionary API)
101582 +    - a single step, reusing a dictionary (described as Bulk-processing
101583 +      dictionary API)
101585 +  Advanced experimental functions can be accessed using
101586 +  `#define ZSTD_STATIC_LINKING_ONLY` before including zstd.h.
101588 +  Advanced experimental APIs should never be used with a dynamically-linked
101589 +  library. They are not "stable"; their definitions or signatures may change in
101590 +  the future. Only static linking is allowed.
101591 +*******************************************************************************/
101593 +/*------   Version   ------*/
101594 +#define ZSTD_VERSION_MAJOR    1
101595 +#define ZSTD_VERSION_MINOR    4
101596 +#define ZSTD_VERSION_RELEASE  10
101597 +#define ZSTD_VERSION_NUMBER  (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
101599 +/*! ZSTD_versionNumber() :
101600 + *  Return runtime library version, the value is (MAJOR*100*100 + MINOR*100 + RELEASE). */
101601 +ZSTDLIB_API unsigned ZSTD_versionNumber(void);
101603 +#define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE
101604 +#define ZSTD_QUOTE(str) #str
101605 +#define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str)
101606 +#define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION)
101608 +/*! ZSTD_versionString() :
101609 + *  Return runtime library version, like "1.4.5". Requires v1.3.0+. */
101610 +ZSTDLIB_API const char* ZSTD_versionString(void);
101612 +/* *************************************
101613 + *  Default constant
101614 + ***************************************/
101615 +#ifndef ZSTD_CLEVEL_DEFAULT
101616 +#  define ZSTD_CLEVEL_DEFAULT 3
101617 +#endif
101619 +/* *************************************
101620 + *  Constants
101621 + ***************************************/
101623 +/* All magic numbers are supposed read/written to/from files/memory using little-endian convention */
101624 +#define ZSTD_MAGICNUMBER            0xFD2FB528    /* valid since v0.8.0 */
101625 +#define ZSTD_MAGIC_DICTIONARY       0xEC30A437    /* valid since v0.7.0 */
101626 +#define ZSTD_MAGIC_SKIPPABLE_START  0x184D2A50    /* all 16 values, from 0x184D2A50 to 0x184D2A5F, signal the beginning of a skippable frame */
101627 +#define ZSTD_MAGIC_SKIPPABLE_MASK   0xFFFFFFF0
101629 +#define ZSTD_BLOCKSIZELOG_MAX  17
101630 +#define ZSTD_BLOCKSIZE_MAX     (1<<ZSTD_BLOCKSIZELOG_MAX)
101634 +/***************************************
101635 +*  Simple API
101636 +***************************************/
101637 +/*! ZSTD_compress() :
101638 + *  Compresses `src` content as a single zstd compressed frame into already allocated `dst`.
101639 + *  Hint : compression runs faster if `dstCapacity` >=  `ZSTD_compressBound(srcSize)`.
101640 + *  @return : compressed size written into `dst` (<= `dstCapacity),
101641 + *            or an error code if it fails (which can be tested using ZSTD_isError()). */
101642 +ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,
101643 +                            const void* src, size_t srcSize,
101644 +                                  int compressionLevel);
101646 +/*! ZSTD_decompress() :
101647 + *  `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
101648 + *  `dstCapacity` is an upper bound of originalSize to regenerate.
101649 + *  If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
101650 + *  @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
101651 + *            or an errorCode if it fails (which can be tested using ZSTD_isError()). */
101652 +ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity,
101653 +                              const void* src, size_t compressedSize);
101655 +/*! ZSTD_getFrameContentSize() : requires v1.3.0+
101656 + *  `src` should point to the start of a ZSTD encoded frame.
101657 + *  `srcSize` must be at least as large as the frame header.
101658 + *            hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
101659 + *  @return : - decompressed size of `src` frame content, if known
101660 + *            - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
101661 + *            - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
101662 + *   note 1 : a 0 return value means the frame is valid but "empty".
101663 + *   note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode.
101664 + *            When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
101665 + *            In which case, it's necessary to use streaming mode to decompress data.
101666 + *            Optionally, application can rely on some implicit limit,
101667 + *            as ZSTD_decompress() only needs an upper bound of decompressed size.
101668 + *            (For example, data could be necessarily cut into blocks <= 16 KB).
101669 + *   note 3 : decompressed size is always present when compression is completed using single-pass functions,
101670 + *            such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict().
101671 + *   note 4 : decompressed size can be very large (64-bits value),
101672 + *            potentially larger than what local system can handle as a single memory segment.
101673 + *            In which case, it's necessary to use streaming mode to decompress data.
101674 + *   note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified.
101675 + *            Always ensure return value fits within application's authorized limits.
101676 + *            Each application can set its own limits.
101677 + *   note 6 : This function replaces ZSTD_getDecompressedSize() */
101678 +#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
101679 +#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
101680 +ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
101682 +/*! ZSTD_getDecompressedSize() :
101683 + *  NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize().
101684 + *  Both functions work the same way, but ZSTD_getDecompressedSize() blends
101685 + *  "empty", "unknown" and "error" results to the same return value (0),
101686 + *  while ZSTD_getFrameContentSize() gives them separate return values.
101687 + * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */
101688 +ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
101690 +/*! ZSTD_findFrameCompressedSize() :
101691 + * `src` should point to the start of a ZSTD frame or skippable frame.
101692 + * `srcSize` must be >= first frame size
101693 + * @return : the compressed size of the first frame starting at `src`,
101694 + *           suitable to pass as `srcSize` to `ZSTD_decompress` or similar,
101695 + *        or an error code if input is invalid */
101696 +ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
101699 +/*======  Helper functions  ======*/
101700 +#define ZSTD_COMPRESSBOUND(srcSize)   ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0))  /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
101701 +ZSTDLIB_API size_t      ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
101702 +ZSTDLIB_API unsigned    ZSTD_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */
101703 +ZSTDLIB_API const char* ZSTD_getErrorName(size_t code);     /*!< provides readable string from an error code */
101704 +ZSTDLIB_API int         ZSTD_minCLevel(void);               /*!< minimum negative compression level allowed */
101705 +ZSTDLIB_API int         ZSTD_maxCLevel(void);               /*!< maximum compression level available */
101708 +/***************************************
101709 +*  Explicit context
101710 +***************************************/
101711 +/*= Compression context
101712 + *  When compressing many times,
101713 + *  it is recommended to allocate a context just once,
101714 + *  and re-use it for each successive compression operation.
101715 + *  This will make workload friendlier for system's memory.
101716 + *  Note : re-using context is just a speed / resource optimization.
101717 + *         It doesn't change the compression ratio, which remains identical.
101718 + *  Note 2 : In multi-threaded environments,
101719 + *         use one different context per thread for parallel execution.
101720 + */
101721 +typedef struct ZSTD_CCtx_s ZSTD_CCtx;
101722 +ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
101723 +ZSTDLIB_API size_t     ZSTD_freeCCtx(ZSTD_CCtx* cctx);  /* accept NULL pointer */
101725 +/*! ZSTD_compressCCtx() :
101726 + *  Same as ZSTD_compress(), using an explicit ZSTD_CCtx.
101727 + *  Important : in order to behave similarly to `ZSTD_compress()`,
101728 + *  this function compresses at requested compression level,
101729 + *  __ignoring any other parameter__ .
101730 + *  If any advanced parameter was set using the advanced API,
101731 + *  they will all be reset. Only `compressionLevel` remains.
101732 + */
101733 +ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
101734 +                                     void* dst, size_t dstCapacity,
101735 +                               const void* src, size_t srcSize,
101736 +                                     int compressionLevel);
101738 +/*= Decompression context
101739 + *  When decompressing many times,
101740 + *  it is recommended to allocate a context only once,
101741 + *  and re-use it for each successive compression operation.
101742 + *  This will make workload friendlier for system's memory.
101743 + *  Use one context per thread for parallel execution. */
101744 +typedef struct ZSTD_DCtx_s ZSTD_DCtx;
101745 +ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void);
101746 +ZSTDLIB_API size_t     ZSTD_freeDCtx(ZSTD_DCtx* dctx);  /* accept NULL pointer */
101748 +/*! ZSTD_decompressDCtx() :
101749 + *  Same as ZSTD_decompress(),
101750 + *  requires an allocated ZSTD_DCtx.
101751 + *  Compatible with sticky parameters.
101752 + */
101753 +ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx,
101754 +                                       void* dst, size_t dstCapacity,
101755 +                                 const void* src, size_t srcSize);
101758 +/***************************************
101759 +*  Advanced compression API
101760 +***************************************/
101762 +/* API design :
101763 + *   Parameters are pushed one by one into an existing context,
101764 + *   using ZSTD_CCtx_set*() functions.
101765 + *   Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame.
101766 + *   "sticky" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` !
101767 + *   __They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()__ .
101769 + *   It's possible to reset all parameters to "default" using ZSTD_CCtx_reset().
101771 + *   This API supercedes all other "advanced" API entry points in the experimental section.
101772 + *   In the future, we expect to remove from experimental API entry points which are redundant with this API.
101773 + */
101776 +/* Compression strategies, listed from fastest to strongest */
101777 +typedef enum { ZSTD_fast=1,
101778 +               ZSTD_dfast=2,
101779 +               ZSTD_greedy=3,
101780 +               ZSTD_lazy=4,
101781 +               ZSTD_lazy2=5,
101782 +               ZSTD_btlazy2=6,
101783 +               ZSTD_btopt=7,
101784 +               ZSTD_btultra=8,
101785 +               ZSTD_btultra2=9
101786 +               /* note : new strategies _might_ be added in the future.
101787 +                         Only the order (from fast to strong) is guaranteed */
101788 +} ZSTD_strategy;
101791 +typedef enum {
101793 +    /* compression parameters
101794 +     * Note: When compressing with a ZSTD_CDict these parameters are superseded
101795 +     * by the parameters used to construct the ZSTD_CDict.
101796 +     * See ZSTD_CCtx_refCDict() for more info (superseded-by-cdict). */
101797 +    ZSTD_c_compressionLevel=100, /* Set compression parameters according to pre-defined cLevel table.
101798 +                              * Note that exact compression parameters are dynamically determined,
101799 +                              * depending on both compression level and srcSize (when known).
101800 +                              * Default level is ZSTD_CLEVEL_DEFAULT==3.
101801 +                              * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT.
101802 +                              * Note 1 : it's possible to pass a negative compression level.
101803 +                              * Note 2 : setting a level does not automatically set all other compression parameters
101804 +                              *   to default. Setting this will however eventually dynamically impact the compression
101805 +                              *   parameters which have not been manually set. The manually set
101806 +                              *   ones will 'stick'. */
101807 +    /* Advanced compression parameters :
101808 +     * It's possible to pin down compression parameters to some specific values.
101809 +     * In which case, these values are no longer dynamically selected by the compressor */
101810 +    ZSTD_c_windowLog=101,    /* Maximum allowed back-reference distance, expressed as power of 2.
101811 +                              * This will set a memory budget for streaming decompression,
101812 +                              * with larger values requiring more memory
101813 +                              * and typically compressing more.
101814 +                              * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
101815 +                              * Special: value 0 means "use default windowLog".
101816 +                              * Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT
101817 +                              *       requires explicitly allowing such size at streaming decompression stage. */
101818 +    ZSTD_c_hashLog=102,      /* Size of the initial probe table, as a power of 2.
101819 +                              * Resulting memory usage is (1 << (hashLog+2)).
101820 +                              * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
101821 +                              * Larger tables improve compression ratio of strategies <= dFast,
101822 +                              * and improve speed of strategies > dFast.
101823 +                              * Special: value 0 means "use default hashLog". */
101824 +    ZSTD_c_chainLog=103,     /* Size of the multi-probe search table, as a power of 2.
101825 +                              * Resulting memory usage is (1 << (chainLog+2)).
101826 +                              * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX.
101827 +                              * Larger tables result in better and slower compression.
101828 +                              * This parameter is useless for "fast" strategy.
101829 +                              * It's still useful when using "dfast" strategy,
101830 +                              * in which case it defines a secondary probe table.
101831 +                              * Special: value 0 means "use default chainLog". */
101832 +    ZSTD_c_searchLog=104,    /* Number of search attempts, as a power of 2.
101833 +                              * More attempts result in better and slower compression.
101834 +                              * This parameter is useless for "fast" and "dFast" strategies.
101835 +                              * Special: value 0 means "use default searchLog". */
101836 +    ZSTD_c_minMatch=105,     /* Minimum size of searched matches.
101837 +                              * Note that Zstandard can still find matches of smaller size,
101838 +                              * it just tweaks its search algorithm to look for this size and larger.
101839 +                              * Larger values increase compression and decompression speed, but decrease ratio.
101840 +                              * Must be clamped between ZSTD_MINMATCH_MIN and ZSTD_MINMATCH_MAX.
101841 +                              * Note that currently, for all strategies < btopt, effective minimum is 4.
101842 +                              *                    , for all strategies > fast, effective maximum is 6.
101843 +                              * Special: value 0 means "use default minMatchLength". */
101844 +    ZSTD_c_targetLength=106, /* Impact of this field depends on strategy.
101845 +                              * For strategies btopt, btultra & btultra2:
101846 +                              *     Length of Match considered "good enough" to stop search.
101847 +                              *     Larger values make compression stronger, and slower.
101848 +                              * For strategy fast:
101849 +                              *     Distance between match sampling.
101850 +                              *     Larger values make compression faster, and weaker.
101851 +                              * Special: value 0 means "use default targetLength". */
101852 +    ZSTD_c_strategy=107,     /* See ZSTD_strategy enum definition.
101853 +                              * The higher the value of selected strategy, the more complex it is,
101854 +                              * resulting in stronger and slower compression.
101855 +                              * Special: value 0 means "use default strategy". */
101857 +    /* LDM mode parameters */
101858 +    ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching.
101859 +                                     * This parameter is designed to improve compression ratio
101860 +                                     * for large inputs, by finding large matches at long distance.
101861 +                                     * It increases memory usage and window size.
101862 +                                     * Note: enabling this parameter increases default ZSTD_c_windowLog to 128 MB
101863 +                                     * except when expressly set to a different value.
101864 +                                     * Note: will be enabled by default if ZSTD_c_windowLog >= 128 MB and
101865 +                                     * compression strategy >= ZSTD_btopt (== compression level 16+) */
101866 +    ZSTD_c_ldmHashLog=161,   /* Size of the table for long distance matching, as a power of 2.
101867 +                              * Larger values increase memory usage and compression ratio,
101868 +                              * but decrease compression speed.
101869 +                              * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX
101870 +                              * default: windowlog - 7.
101871 +                              * Special: value 0 means "automatically determine hashlog". */
101872 +    ZSTD_c_ldmMinMatch=162,  /* Minimum match size for long distance matcher.
101873 +                              * Larger/too small values usually decrease compression ratio.
101874 +                              * Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX.
101875 +                              * Special: value 0 means "use default value" (default: 64). */
101876 +    ZSTD_c_ldmBucketSizeLog=163, /* Log size of each bucket in the LDM hash table for collision resolution.
101877 +                              * Larger values improve collision resolution but decrease compression speed.
101878 +                              * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX.
101879 +                              * Special: value 0 means "use default value" (default: 3). */
101880 +    ZSTD_c_ldmHashRateLog=164, /* Frequency of inserting/looking up entries into the LDM hash table.
101881 +                              * Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN).
101882 +                              * Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage.
101883 +                              * Larger values improve compression speed.
101884 +                              * Deviating far from default value will likely result in a compression ratio decrease.
101885 +                              * Special: value 0 means "automatically determine hashRateLog". */
101887 +    /* frame parameters */
101888 +    ZSTD_c_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1)
101889 +                              * Content size must be known at the beginning of compression.
101890 +                              * This is automatically the case when using ZSTD_compress2(),
101891 +                              * For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */
101892 +    ZSTD_c_checksumFlag=201, /* A 32-bits checksum of content is written at end of frame (default:0) */
101893 +    ZSTD_c_dictIDFlag=202,   /* When applicable, dictionary's ID is written into frame header (default:1) */
101895 +    /* multi-threading parameters */
101896 +    /* These parameters are only active if multi-threading is enabled (compiled with build macro ZSTD_MULTITHREAD).
101897 +     * Otherwise, trying to set any other value than default (0) will be a no-op and return an error.
101898 +     * In a situation where it's unknown if the linked library supports multi-threading or not,
101899 +     * setting ZSTD_c_nbWorkers to any value >= 1 and consulting the return value provides a quick way to check this property.
101900 +     */
101901 +    ZSTD_c_nbWorkers=400,    /* Select how many threads will be spawned to compress in parallel.
101902 +                              * When nbWorkers >= 1, triggers asynchronous mode when invoking ZSTD_compressStream*() :
101903 +                              * ZSTD_compressStream*() consumes input and flush output if possible, but immediately gives back control to caller,
101904 +                              * while compression is performed in parallel, within worker thread(s).
101905 +                              * (note : a strong exception to this rule is when first invocation of ZSTD_compressStream2() sets ZSTD_e_end :
101906 +                              *  in which case, ZSTD_compressStream2() delegates to ZSTD_compress2(), which is always a blocking call).
101907 +                              * More workers improve speed, but also increase memory usage.
101908 +                              * Default value is `0`, aka "single-threaded mode" : no worker is spawned,
101909 +                              * compression is performed inside Caller's thread, and all invocations are blocking */
101910 +    ZSTD_c_jobSize=401,      /* Size of a compression job. This value is enforced only when nbWorkers >= 1.
101911 +                              * Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads.
101912 +                              * 0 means default, which is dynamically determined based on compression parameters.
101913 +                              * Job size must be a minimum of overlap size, or 1 MB, whichever is largest.
101914 +                              * The minimum size is automatically and transparently enforced. */
101915 +    ZSTD_c_overlapLog=402,   /* Control the overlap size, as a fraction of window size.
101916 +                              * The overlap size is an amount of data reloaded from previous job at the beginning of a new job.
101917 +                              * It helps preserve compression ratio, while each job is compressed in parallel.
101918 +                              * This value is enforced only when nbWorkers >= 1.
101919 +                              * Larger values increase compression ratio, but decrease speed.
101920 +                              * Possible values range from 0 to 9 :
101921 +                              * - 0 means "default" : value will be determined by the library, depending on strategy
101922 +                              * - 1 means "no overlap"
101923 +                              * - 9 means "full overlap", using a full window size.
101924 +                              * Each intermediate rank increases/decreases load size by a factor 2 :
101925 +                              * 9: full window;  8: w/2;  7: w/4;  6: w/8;  5:w/16;  4: w/32;  3:w/64;  2:w/128;  1:no overlap;  0:default
101926 +                              * default value varies between 6 and 9, depending on strategy */
101928 +    /* note : additional experimental parameters are also available
101929 +     * within the experimental section of the API.
101930 +     * At the time of this writing, they include :
101931 +     * ZSTD_c_rsyncable
101932 +     * ZSTD_c_format
101933 +     * ZSTD_c_forceMaxWindow
101934 +     * ZSTD_c_forceAttachDict
101935 +     * ZSTD_c_literalCompressionMode
101936 +     * ZSTD_c_targetCBlockSize
101937 +     * ZSTD_c_srcSizeHint
101938 +     * ZSTD_c_enableDedicatedDictSearch
101939 +     * ZSTD_c_stableInBuffer
101940 +     * ZSTD_c_stableOutBuffer
101941 +     * ZSTD_c_blockDelimiters
101942 +     * ZSTD_c_validateSequences
101943 +     * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
101944 +     * note : never ever use experimentalParam? names directly;
101945 +     *        also, the enums values themselves are unstable and can still change.
101946 +     */
101947 +     ZSTD_c_experimentalParam1=500,
101948 +     ZSTD_c_experimentalParam2=10,
101949 +     ZSTD_c_experimentalParam3=1000,
101950 +     ZSTD_c_experimentalParam4=1001,
101951 +     ZSTD_c_experimentalParam5=1002,
101952 +     ZSTD_c_experimentalParam6=1003,
101953 +     ZSTD_c_experimentalParam7=1004,
101954 +     ZSTD_c_experimentalParam8=1005,
101955 +     ZSTD_c_experimentalParam9=1006,
101956 +     ZSTD_c_experimentalParam10=1007,
101957 +     ZSTD_c_experimentalParam11=1008,
101958 +     ZSTD_c_experimentalParam12=1009
101959 +} ZSTD_cParameter;
101961 +typedef struct {
101962 +    size_t error;
101963 +    int lowerBound;
101964 +    int upperBound;
101965 +} ZSTD_bounds;
101967 +/*! ZSTD_cParam_getBounds() :
101968 + *  All parameters must belong to an interval with lower and upper bounds,
101969 + *  otherwise they will either trigger an error or be automatically clamped.
101970 + * @return : a structure, ZSTD_bounds, which contains
101971 + *         - an error status field, which must be tested using ZSTD_isError()
101972 + *         - lower and upper bounds, both inclusive
101973 + */
101974 +ZSTDLIB_API ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter cParam);
101976 +/*! ZSTD_CCtx_setParameter() :
101977 + *  Set one compression parameter, selected by enum ZSTD_cParameter.
101978 + *  All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds().
101979 + *  Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).
101980 + *  Setting a parameter is generally only possible during frame initialization (before starting compression).
101981 + *  Exception : when using multi-threading mode (nbWorkers >= 1),
101982 + *              the following parameters can be updated _during_ compression (within same frame):
101983 + *              => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy.
101984 + *              new parameters will be active for next job only (after a flush()).
101985 + * @return : an error code (which can be tested using ZSTD_isError()).
101986 + */
101987 +ZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value);
101989 +/*! ZSTD_CCtx_setPledgedSrcSize() :
101990 + *  Total input data size to be compressed as a single frame.
101991 + *  Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag.
101992 + *  This value will also be controlled at end of frame, and trigger an error if not respected.
101993 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
101994 + *  Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame.
101995 + *           In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN.
101996 + *           ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame.
101997 + *  Note 2 : pledgedSrcSize is only valid once, for the next frame.
101998 + *           It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN.
101999 + *  Note 3 : Whenever all input data is provided and consumed in a single round,
102000 + *           for example with ZSTD_compress2(),
102001 + *           or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end),
102002 + *           this value is automatically overridden by srcSize instead.
102003 + */
102004 +ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize);
102006 +typedef enum {
102007 +    ZSTD_reset_session_only = 1,
102008 +    ZSTD_reset_parameters = 2,
102009 +    ZSTD_reset_session_and_parameters = 3
102010 +} ZSTD_ResetDirective;
102012 +/*! ZSTD_CCtx_reset() :
102013 + *  There are 2 different things that can be reset, independently or jointly :
102014 + *  - The session : will stop compressing current frame, and make CCtx ready to start a new one.
102015 + *                  Useful after an error, or to interrupt any ongoing compression.
102016 + *                  Any internal data not yet flushed is cancelled.
102017 + *                  Compression parameters and dictionary remain unchanged.
102018 + *                  They will be used to compress next frame.
102019 + *                  Resetting session never fails.
102020 + *  - The parameters : changes all parameters back to "default".
102021 + *                  This removes any reference to any dictionary too.
102022 + *                  Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)
102023 + *                  otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())
102024 + *  - Both : similar to resetting the session, followed by resetting parameters.
102025 + */
102026 +ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset);
102028 +/*! ZSTD_compress2() :
102029 + *  Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.
102030 + *  ZSTD_compress2() always starts a new frame.
102031 + *  Should cctx hold data from a previously unfinished frame, everything about it is forgotten.
102032 + *  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
102033 + *  - The function is always blocking, returns when compression is completed.
102034 + *  Hint : compression runs faster if `dstCapacity` >=  `ZSTD_compressBound(srcSize)`.
102035 + * @return : compressed size written into `dst` (<= `dstCapacity),
102036 + *           or an error code if it fails (which can be tested using ZSTD_isError()).
102037 + */
102038 +ZSTDLIB_API size_t ZSTD_compress2( ZSTD_CCtx* cctx,
102039 +                                   void* dst, size_t dstCapacity,
102040 +                             const void* src, size_t srcSize);
102043 +/***************************************
102044 +*  Advanced decompression API
102045 +***************************************/
102047 +/* The advanced API pushes parameters one by one into an existing DCtx context.
102048 + * Parameters are sticky, and remain valid for all following frames
102049 + * using the same DCtx context.
102050 + * It's possible to reset parameters to default values using ZSTD_DCtx_reset().
102051 + * Note : This API is compatible with existing ZSTD_decompressDCtx() and ZSTD_decompressStream().
102052 + *        Therefore, no new decompression function is necessary.
102053 + */
102055 +typedef enum {
102057 +    ZSTD_d_windowLogMax=100, /* Select a size limit (in power of 2) beyond which
102058 +                              * the streaming API will refuse to allocate memory buffer
102059 +                              * in order to protect the host from unreasonable memory requirements.
102060 +                              * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
102061 +                              * By default, a decompression context accepts window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT).
102062 +                              * Special: value 0 means "use default maximum windowLog". */
102064 +    /* note : additional experimental parameters are also available
102065 +     * within the experimental section of the API.
102066 +     * At the time of this writing, they include :
102067 +     * ZSTD_d_format
102068 +     * ZSTD_d_stableOutBuffer
102069 +     * ZSTD_d_forceIgnoreChecksum
102070 +     * ZSTD_d_refMultipleDDicts
102071 +     * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
102072 +     * note : never ever use experimentalParam? names directly
102073 +     */
102074 +     ZSTD_d_experimentalParam1=1000,
102075 +     ZSTD_d_experimentalParam2=1001,
102076 +     ZSTD_d_experimentalParam3=1002,
102077 +     ZSTD_d_experimentalParam4=1003
102079 +} ZSTD_dParameter;
102081 +/*! ZSTD_dParam_getBounds() :
102082 + *  All parameters must belong to an interval with lower and upper bounds,
102083 + *  otherwise they will either trigger an error or be automatically clamped.
102084 + * @return : a structure, ZSTD_bounds, which contains
102085 + *         - an error status field, which must be tested using ZSTD_isError()
102086 + *         - both lower and upper bounds, inclusive
102087 + */
102088 +ZSTDLIB_API ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam);
102090 +/*! ZSTD_DCtx_setParameter() :
102091 + *  Set one compression parameter, selected by enum ZSTD_dParameter.
102092 + *  All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds().
102093 + *  Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).
102094 + *  Setting a parameter is only possible during frame initialization (before starting decompression).
102095 + * @return : 0, or an error code (which can be tested using ZSTD_isError()).
102096 + */
102097 +ZSTDLIB_API size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int value);
102099 +/*! ZSTD_DCtx_reset() :
102100 + *  Return a DCtx to clean state.
102101 + *  Session and parameters can be reset jointly or separately.
102102 + *  Parameters can only be reset when no active frame is being decompressed.
102103 + * @return : 0, or an error code, which can be tested with ZSTD_isError()
102104 + */
102105 +ZSTDLIB_API size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset);
102108 +/****************************
102109 +*  Streaming
102110 +****************************/
102112 +typedef struct ZSTD_inBuffer_s {
102113 +  const void* src;    /**< start of input buffer */
102114 +  size_t size;        /**< size of input buffer */
102115 +  size_t pos;         /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */
102116 +} ZSTD_inBuffer;
102118 +typedef struct ZSTD_outBuffer_s {
102119 +  void*  dst;         /**< start of output buffer */
102120 +  size_t size;        /**< size of output buffer */
102121 +  size_t pos;         /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */
102122 +} ZSTD_outBuffer;
102126 +/*-***********************************************************************
102127 +*  Streaming compression - HowTo
102129 +*  A ZSTD_CStream object is required to track streaming operation.
102130 +*  Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.
102131 +*  ZSTD_CStream objects can be reused multiple times on consecutive compression operations.
102132 +*  It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
102134 +*  For parallel execution, use one separate ZSTD_CStream per thread.
102136 +*  note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing.
102138 +*  Parameters are sticky : when starting a new compression on the same context,
102139 +*  it will re-use the same sticky parameters as previous compression session.
102140 +*  When in doubt, it's recommended to fully initialize the context before usage.
102141 +*  Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(),
102142 +*  ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to
102143 +*  set more specific parameters, the pledged source size, or load a dictionary.
102145 +*  Use ZSTD_compressStream2() with ZSTD_e_continue as many times as necessary to
102146 +*  consume input stream. The function will automatically update both `pos`
102147 +*  fields within `input` and `output`.
102148 +*  Note that the function may not consume the entire input, for example, because
102149 +*  the output buffer is already full, in which case `input.pos < input.size`.
102150 +*  The caller must check if input has been entirely consumed.
102151 +*  If not, the caller must make some room to receive more compressed data,
102152 +*  and then present again remaining input data.
102153 +*  note: ZSTD_e_continue is guaranteed to make some forward progress when called,
102154 +*        but doesn't guarantee maximal forward progress. This is especially relevant
102155 +*        when compressing with multiple threads. The call won't block if it can
102156 +*        consume some input, but if it can't it will wait for some, but not all,
102157 +*        output to be flushed.
102158 +* @return : provides a minimum amount of data remaining to be flushed from internal buffers
102159 +*           or an error code, which can be tested using ZSTD_isError().
102161 +*  At any moment, it's possible to flush whatever data might remain stuck within internal buffer,
102162 +*  using ZSTD_compressStream2() with ZSTD_e_flush. `output->pos` will be updated.
102163 +*  Note that, if `output->size` is too small, a single invocation with ZSTD_e_flush might not be enough (return code > 0).
102164 +*  In which case, make some room to receive more compressed data, and call again ZSTD_compressStream2() with ZSTD_e_flush.
102165 +*  You must continue calling ZSTD_compressStream2() with ZSTD_e_flush until it returns 0, at which point you can change the
102166 +*  operation.
102167 +*  note: ZSTD_e_flush will flush as much output as possible, meaning when compressing with multiple threads, it will
102168 +*        block until the flush is complete or the output buffer is full.
102169 +*  @return : 0 if internal buffers are entirely flushed,
102170 +*            >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
102171 +*            or an error code, which can be tested using ZSTD_isError().
102173 +*  Calling ZSTD_compressStream2() with ZSTD_e_end instructs to finish a frame.
102174 +*  It will perform a flush and write frame epilogue.
102175 +*  The epilogue is required for decoders to consider a frame completed.
102176 +*  flush operation is the same, and follows same rules as calling ZSTD_compressStream2() with ZSTD_e_flush.
102177 +*  You must continue calling ZSTD_compressStream2() with ZSTD_e_end until it returns 0, at which point you are free to
102178 +*  start a new frame.
102179 +*  note: ZSTD_e_end will flush as much output as possible, meaning when compressing with multiple threads, it will
102180 +*        block until the flush is complete or the output buffer is full.
102181 +*  @return : 0 if frame fully completed and fully flushed,
102182 +*            >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
102183 +*            or an error code, which can be tested using ZSTD_isError().
102185 +* *******************************************************************/
102187 +typedef ZSTD_CCtx ZSTD_CStream;  /**< CCtx and CStream are now effectively same object (>= v1.3.0) */
102188 +                                 /* Continue to distinguish them for compatibility with older versions <= v1.2.0 */
102189 +/*===== ZSTD_CStream management functions =====*/
102190 +ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);
102191 +ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs);  /* accept NULL pointer */
102193 +/*===== Streaming compression functions =====*/
102194 +typedef enum {
102195 +    ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal compression ratio */
102196 +    ZSTD_e_flush=1,    /* flush any data provided so far,
102197 +                        * it creates (at least) one new block, that can be decoded immediately on reception;
102198 +                        * frame will continue: any future data can still reference previously compressed data, improving compression.
102199 +                        * note : multithreaded compression will block to flush as much output as possible. */
102200 +    ZSTD_e_end=2       /* flush any remaining data _and_ close current frame.
102201 +                        * note that frame is only closed after compressed data is fully flushed (return value == 0).
102202 +                        * After that point, any additional data starts a new frame.
102203 +                        * note : each frame is independent (does not reference any content from previous frame).
102204 +                        : note : multithreaded compression will block to flush as much output as possible. */
102205 +} ZSTD_EndDirective;
102207 +/*! ZSTD_compressStream2() :
102208 + *  Behaves about the same as ZSTD_compressStream, with additional control on end directive.
102209 + *  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
102210 + *  - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)
102211 + *  - output->pos must be <= dstCapacity, input->pos must be <= srcSize
102212 + *  - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
102213 + *  - endOp must be a valid directive
102214 + *  - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.
102215 + *  - When nbWorkers>=1, function is non-blocking : it copies a portion of input, distributes jobs to internal worker threads, flush to output whatever is available,
102216 + *                                                  and then immediately returns, just indicating that there is some data remaining to be flushed.
102217 + *                                                  The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.
102218 + *  - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.
102219 + *  - @return provides a minimum amount of data remaining to be flushed from internal buffers
102220 + *            or an error code, which can be tested using ZSTD_isError().
102221 + *            if @return != 0, flush is not fully completed, there is still some data left within internal buffers.
102222 + *            This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.
102223 + *            For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.
102224 + *  - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),
102225 + *            only ZSTD_e_end or ZSTD_e_flush operations are allowed.
102226 + *            Before starting a new compression job, or changing compression parameters,
102227 + *            it is required to fully flush internal buffers.
102228 + */
102229 +ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
102230 +                                         ZSTD_outBuffer* output,
102231 +                                         ZSTD_inBuffer* input,
102232 +                                         ZSTD_EndDirective endOp);
102235 +/* These buffer sizes are softly recommended.
102236 + * They are not required : ZSTD_compressStream*() happily accepts any buffer size, for both input and output.
102237 + * Respecting the recommended size just makes it a bit easier for ZSTD_compressStream*(),
102238 + * reducing the amount of memory shuffling and buffering, resulting in minor performance savings.
102240 + * However, note that these recommendations are from the perspective of a C caller program.
102241 + * If the streaming interface is invoked from some other language,
102242 + * especially managed ones such as Java or Go, through a foreign function interface such as jni or cgo,
102243 + * a major performance rule is to reduce crossing such interface to an absolute minimum.
102244 + * It's not rare that performance ends being spent more into the interface, rather than compression itself.
102245 + * In which cases, prefer using large buffers, as large as practical,
102246 + * for both input and output, to reduce the nb of roundtrips.
102247 + */
102248 +ZSTDLIB_API size_t ZSTD_CStreamInSize(void);    /**< recommended size for input buffer */
102249 +ZSTDLIB_API size_t ZSTD_CStreamOutSize(void);   /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block. */
102252 +/* *****************************************************************************
102253 + * This following is a legacy streaming API.
102254 + * It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2().
102255 + * It is redundant, but remains fully supported.
102256 + * Advanced parameters and dictionary compression can only be used through the
102257 + * new API.
102258 + ******************************************************************************/
102261 + * Equivalent to:
102263 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
102264 + *     ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
102265 + *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
102266 + */
102267 +ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
102269 + * Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue).
102270 + * NOTE: The return value is different. ZSTD_compressStream() returns a hint for
102271 + * the next read size (if non-zero and not an error). ZSTD_compressStream2()
102272 + * returns the minimum nb of bytes left to flush (if non-zero and not an error).
102273 + */
102274 +ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
102275 +/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush). */
102276 +ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
102277 +/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */
102278 +ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
102281 +/*-***************************************************************************
102282 +*  Streaming decompression - HowTo
102284 +*  A ZSTD_DStream object is required to track streaming operations.
102285 +*  Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
102286 +*  ZSTD_DStream objects can be re-used multiple times.
102288 +*  Use ZSTD_initDStream() to start a new decompression operation.
102289 +* @return : recommended first input size
102290 +*  Alternatively, use advanced API to set specific properties.
102292 +*  Use ZSTD_decompressStream() repetitively to consume your input.
102293 +*  The function will update both `pos` fields.
102294 +*  If `input.pos < input.size`, some input has not been consumed.
102295 +*  It's up to the caller to present again remaining data.
102296 +*  The function tries to flush all data decoded immediately, respecting output buffer size.
102297 +*  If `output.pos < output.size`, decoder has flushed everything it could.
102298 +*  But if `output.pos == output.size`, there might be some data left within internal buffers.,
102299 +*  In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer.
102300 +*  Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX.
102301 +* @return : 0 when a frame is completely decoded and fully flushed,
102302 +*        or an error code, which can be tested using ZSTD_isError(),
102303 +*        or any other value > 0, which means there is still some decoding or flushing to do to complete current frame :
102304 +*                                the return value is a suggested next input size (just a hint for better latency)
102305 +*                                that will never request more than the remaining frame size.
102306 +* *******************************************************************************/
102308 +typedef ZSTD_DCtx ZSTD_DStream;  /**< DCtx and DStream are now effectively same object (>= v1.3.0) */
102309 +                                 /* For compatibility with versions <= v1.2.0, prefer differentiating them. */
102310 +/*===== ZSTD_DStream management functions =====*/
102311 +ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);
102312 +ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds);  /* accept NULL pointer */
102314 +/*===== Streaming decompression functions =====*/
102316 +/* This function is redundant with the advanced API and equivalent to:
102318 + *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
102319 + *     ZSTD_DCtx_refDDict(zds, NULL);
102320 + */
102321 +ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
102323 +ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
102325 +ZSTDLIB_API size_t ZSTD_DStreamInSize(void);    /*!< recommended size for input buffer */
102326 +ZSTDLIB_API size_t ZSTD_DStreamOutSize(void);   /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
102329 +/**************************
102330 +*  Simple dictionary API
102331 +***************************/
102332 +/*! ZSTD_compress_usingDict() :
102333 + *  Compression at an explicit compression level using a Dictionary.
102334 + *  A dictionary can be any arbitrary data segment (also called a prefix),
102335 + *  or a buffer with specified information (see dictBuilder/zdict.h).
102336 + *  Note : This function loads the dictionary, resulting in significant startup delay.
102337 + *         It's intended for a dictionary used only once.
102338 + *  Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */
102339 +ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
102340 +                                           void* dst, size_t dstCapacity,
102341 +                                     const void* src, size_t srcSize,
102342 +                                     const void* dict,size_t dictSize,
102343 +                                           int compressionLevel);
102345 +/*! ZSTD_decompress_usingDict() :
102346 + *  Decompression using a known Dictionary.
102347 + *  Dictionary must be identical to the one used during compression.
102348 + *  Note : This function loads the dictionary, resulting in significant startup delay.
102349 + *         It's intended for a dictionary used only once.
102350 + *  Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
102351 +ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
102352 +                                             void* dst, size_t dstCapacity,
102353 +                                       const void* src, size_t srcSize,
102354 +                                       const void* dict,size_t dictSize);
102357 +/***********************************
102358 + *  Bulk processing dictionary API
102359 + **********************************/
102360 +typedef struct ZSTD_CDict_s ZSTD_CDict;
102362 +/*! ZSTD_createCDict() :
102363 + *  When compressing multiple messages or blocks using the same dictionary,
102364 + *  it's recommended to digest the dictionary only once, since it's a costly operation.
102365 + *  ZSTD_createCDict() will create a state from digesting a dictionary.
102366 + *  The resulting state can be used for future compression operations with very limited startup cost.
102367 + *  ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
102368 + * @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict.
102369 + *  Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content.
102370 + *  Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer,
102371 + *      in which case the only thing that it transports is the @compressionLevel.
102372 + *      This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively,
102373 + *      expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. */
102374 +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
102375 +                                         int compressionLevel);
102377 +/*! ZSTD_freeCDict() :
102378 + *  Function frees memory allocated by ZSTD_createCDict().
102379 + *  If a NULL pointer is passed, no operation is performed. */
102380 +ZSTDLIB_API size_t      ZSTD_freeCDict(ZSTD_CDict* CDict);
102382 +/*! ZSTD_compress_usingCDict() :
102383 + *  Compression using a digested Dictionary.
102384 + *  Recommended when same dictionary is used multiple times.
102385 + *  Note : compression level is _decided at dictionary creation time_,
102386 + *     and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */
102387 +ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
102388 +                                            void* dst, size_t dstCapacity,
102389 +                                      const void* src, size_t srcSize,
102390 +                                      const ZSTD_CDict* cdict);
102393 +typedef struct ZSTD_DDict_s ZSTD_DDict;
102395 +/*! ZSTD_createDDict() :
102396 + *  Create a digested dictionary, ready to start decompression operation without startup delay.
102397 + *  dictBuffer can be released after DDict creation, as its content is copied inside DDict. */
102398 +ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
102400 +/*! ZSTD_freeDDict() :
102401 + *  Function frees memory allocated with ZSTD_createDDict()
102402 + *  If a NULL pointer is passed, no operation is performed. */
102403 +ZSTDLIB_API size_t      ZSTD_freeDDict(ZSTD_DDict* ddict);
102405 +/*! ZSTD_decompress_usingDDict() :
102406 + *  Decompression using a digested Dictionary.
102407 + *  Recommended when same dictionary is used multiple times. */
102408 +ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
102409 +                                              void* dst, size_t dstCapacity,
102410 +                                        const void* src, size_t srcSize,
102411 +                                        const ZSTD_DDict* ddict);
102414 +/********************************
102415 + *  Dictionary helper functions
102416 + *******************************/
102418 +/*! ZSTD_getDictID_fromDict() :
102419 + *  Provides the dictID stored within dictionary.
102420 + *  if @return == 0, the dictionary is not conformant with Zstandard specification.
102421 + *  It can still be loaded, but as a content-only dictionary. */
102422 +ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
102424 +/*! ZSTD_getDictID_fromDDict() :
102425 + *  Provides the dictID of the dictionary loaded into `ddict`.
102426 + *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
102427 + *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
102428 +ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
102430 +/*! ZSTD_getDictID_fromFrame() :
102431 + *  Provides the dictID required to decompressed the frame stored within `src`.
102432 + *  If @return == 0, the dictID could not be decoded.
102433 + *  This could for one of the following reasons :
102434 + *  - The frame does not require a dictionary to be decoded (most common case).
102435 + *  - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
102436 + *    Note : this use case also happens when using a non-conformant dictionary.
102437 + *  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
102438 + *  - This is not a Zstandard frame.
102439 + *  When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */
102440 +ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
102443 +/*******************************************************************************
102444 + * Advanced dictionary and prefix API
102446 + * This API allows dictionaries to be used with ZSTD_compress2(),
102447 + * ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and
102448 + * only reset with the context is reset with ZSTD_reset_parameters or
102449 + * ZSTD_reset_session_and_parameters. Prefixes are single-use.
102450 + ******************************************************************************/
102453 +/*! ZSTD_CCtx_loadDictionary() :
102454 + *  Create an internal CDict from `dict` buffer.
102455 + *  Decompression will have to use same dictionary.
102456 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
102457 + *  Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,
102458 + *           meaning "return to no-dictionary mode".
102459 + *  Note 1 : Dictionary is sticky, it will be used for all future compressed frames.
102460 + *           To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters).
102461 + *  Note 2 : Loading a dictionary involves building tables.
102462 + *           It's also a CPU consuming operation, with non-negligible impact on latency.
102463 + *           Tables are dependent on compression parameters, and for this reason,
102464 + *           compression parameters can no longer be changed after loading a dictionary.
102465 + *  Note 3 :`dict` content will be copied internally.
102466 + *           Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.
102467 + *           In such a case, dictionary buffer must outlive its users.
102468 + *  Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()
102469 + *           to precisely select how dictionary content must be interpreted. */
102470 +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
102472 +/*! ZSTD_CCtx_refCDict() :
102473 + *  Reference a prepared dictionary, to be used for all next compressed frames.
102474 + *  Note that compression parameters are enforced from within CDict,
102475 + *  and supersede any compression parameter previously set within CCtx.
102476 + *  The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs.
102477 + *  The ignored parameters will be used again if the CCtx is returned to no-dictionary mode.
102478 + *  The dictionary will remain valid for future compressed frames using same CCtx.
102479 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
102480 + *  Special : Referencing a NULL CDict means "return to no-dictionary mode".
102481 + *  Note 1 : Currently, only one dictionary can be managed.
102482 + *           Referencing a new dictionary effectively "discards" any previous one.
102483 + *  Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */
102484 +ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
102486 +/*! ZSTD_CCtx_refPrefix() :
102487 + *  Reference a prefix (single-usage dictionary) for next compressed frame.
102488 + *  A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end).
102489 + *  Decompression will need same prefix to properly regenerate data.
102490 + *  Compressing with a prefix is similar in outcome as performing a diff and compressing it,
102491 + *  but performs much faster, especially during decompression (compression speed is tunable with compression level).
102492 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
102493 + *  Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary
102494 + *  Note 1 : Prefix buffer is referenced. It **must** outlive compression.
102495 + *           Its content must remain unmodified during compression.
102496 + *  Note 2 : If the intention is to diff some large src data blob with some prior version of itself,
102497 + *           ensure that the window size is large enough to contain the entire source.
102498 + *           See ZSTD_c_windowLog.
102499 + *  Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.
102500 + *           It's a CPU consuming operation, with non-negligible impact on latency.
102501 + *           If there is a need to use the same prefix multiple times, consider loadDictionary instead.
102502 + *  Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent).
102503 + *           Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */
102504 +ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
102505 +                                 const void* prefix, size_t prefixSize);
102507 +/*! ZSTD_DCtx_loadDictionary() :
102508 + *  Create an internal DDict from dict buffer,
102509 + *  to be used to decompress next frames.
102510 + *  The dictionary remains valid for all future frames, until explicitly invalidated.
102511 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
102512 + *  Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
102513 + *            meaning "return to no-dictionary mode".
102514 + *  Note 1 : Loading a dictionary involves building tables,
102515 + *           which has a non-negligible impact on CPU usage and latency.
102516 + *           It's recommended to "load once, use many times", to amortize the cost
102517 + *  Note 2 :`dict` content will be copied internally, so `dict` can be released after loading.
102518 + *           Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead.
102519 + *  Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of
102520 + *           how dictionary content is loaded and interpreted.
102521 + */
102522 +ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
102524 +/*! ZSTD_DCtx_refDDict() :
102525 + *  Reference a prepared dictionary, to be used to decompress next frames.
102526 + *  The dictionary remains active for decompression of future frames using same DCtx.
102528 + *  If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function
102529 + *  will store the DDict references in a table, and the DDict used for decompression
102530 + *  will be determined at decompression time, as per the dict ID in the frame.
102531 + *  The memory for the table is allocated on the first call to refDDict, and can be
102532 + *  freed with ZSTD_freeDCtx().
102534 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
102535 + *  Note 1 : Currently, only one dictionary can be managed.
102536 + *           Referencing a new dictionary effectively "discards" any previous one.
102537 + *  Special: referencing a NULL DDict means "return to no-dictionary mode".
102538 + *  Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.
102539 + */
102540 +ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
102542 +/*! ZSTD_DCtx_refPrefix() :
102543 + *  Reference a prefix (single-usage dictionary) to decompress next frame.
102544 + *  This is the reverse operation of ZSTD_CCtx_refPrefix(),
102545 + *  and must use the same prefix as the one used during compression.
102546 + *  Prefix is **only used once**. Reference is discarded at end of frame.
102547 + *  End of frame is reached when ZSTD_decompressStream() returns 0.
102548 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
102549 + *  Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary
102550 + *  Note 2 : Prefix buffer is referenced. It **must** outlive decompression.
102551 + *           Prefix buffer must remain unmodified up to the end of frame,
102552 + *           reached when ZSTD_decompressStream() returns 0.
102553 + *  Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent).
102554 + *           Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section)
102555 + *  Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.
102556 + *           A full dictionary is more costly, as it requires building tables.
102557 + */
102558 +ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx,
102559 +                                 const void* prefix, size_t prefixSize);
102561 +/* ===   Memory management   === */
102563 +/*! ZSTD_sizeof_*() :
102564 + *  These functions give the _current_ memory usage of selected object.
102565 + *  Note that object memory usage can evolve (increase or decrease) over time. */
102566 +ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
102567 +ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
102568 +ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
102569 +ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
102570 +ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
102571 +ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
102573 +#endif  /* ZSTD_H_235446 */
102576 +/* **************************************************************************************
102577 + *   ADVANCED AND EXPERIMENTAL FUNCTIONS
102578 + ****************************************************************************************
102579 + * The definitions in the following section are considered experimental.
102580 + * They are provided for advanced scenarios.
102581 + * They should never be used with a dynamic library, as prototypes may change in the future.
102582 + * Use them only in association with static linking.
102583 + * ***************************************************************************************/
102585 +#if !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
102586 +#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
102588 +/****************************************************************************************
102589 + *   experimental API (static linking only)
102590 + ****************************************************************************************
102591 + * The following symbols and constants
102592 + * are not planned to join "stable API" status in the near future.
102593 + * They can still change in future versions.
102594 + * Some of them are planned to remain in the static_only section indefinitely.
102595 + * Some of them might be removed in the future (especially when redundant with existing stable functions)
102596 + * ***************************************************************************************/
102598 +#define ZSTD_FRAMEHEADERSIZE_PREFIX(format) ((format) == ZSTD_f_zstd1 ? 5 : 1)   /* minimum input size required to query frame header size */
102599 +#define ZSTD_FRAMEHEADERSIZE_MIN(format)    ((format) == ZSTD_f_zstd1 ? 6 : 2)
102600 +#define ZSTD_FRAMEHEADERSIZE_MAX   18   /* can be useful for static allocation */
102601 +#define ZSTD_SKIPPABLEHEADERSIZE    8
102603 +/* compression parameter bounds */
102604 +#define ZSTD_WINDOWLOG_MAX_32    30
102605 +#define ZSTD_WINDOWLOG_MAX_64    31
102606 +#define ZSTD_WINDOWLOG_MAX     ((int)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64))
102607 +#define ZSTD_WINDOWLOG_MIN       10
102608 +#define ZSTD_HASHLOG_MAX       ((ZSTD_WINDOWLOG_MAX < 30) ? ZSTD_WINDOWLOG_MAX : 30)
102609 +#define ZSTD_HASHLOG_MIN          6
102610 +#define ZSTD_CHAINLOG_MAX_32     29
102611 +#define ZSTD_CHAINLOG_MAX_64     30
102612 +#define ZSTD_CHAINLOG_MAX      ((int)(sizeof(size_t) == 4 ? ZSTD_CHAINLOG_MAX_32 : ZSTD_CHAINLOG_MAX_64))
102613 +#define ZSTD_CHAINLOG_MIN        ZSTD_HASHLOG_MIN
102614 +#define ZSTD_SEARCHLOG_MAX      (ZSTD_WINDOWLOG_MAX-1)
102615 +#define ZSTD_SEARCHLOG_MIN        1
102616 +#define ZSTD_MINMATCH_MAX         7   /* only for ZSTD_fast, other strategies are limited to 6 */
102617 +#define ZSTD_MINMATCH_MIN         3   /* only for ZSTD_btopt+, faster strategies are limited to 4 */
102618 +#define ZSTD_TARGETLENGTH_MAX    ZSTD_BLOCKSIZE_MAX
102619 +#define ZSTD_TARGETLENGTH_MIN     0   /* note : comparing this constant to an unsigned results in a tautological test */
102620 +#define ZSTD_STRATEGY_MIN        ZSTD_fast
102621 +#define ZSTD_STRATEGY_MAX        ZSTD_btultra2
102624 +#define ZSTD_OVERLAPLOG_MIN       0
102625 +#define ZSTD_OVERLAPLOG_MAX       9
102627 +#define ZSTD_WINDOWLOG_LIMIT_DEFAULT 27   /* by default, the streaming decoder will refuse any frame
102628 +                                           * requiring larger than (1<<ZSTD_WINDOWLOG_LIMIT_DEFAULT) window size,
102629 +                                           * to preserve host's memory from unreasonable requirements.
102630 +                                           * This limit can be overridden using ZSTD_DCtx_setParameter(,ZSTD_d_windowLogMax,).
102631 +                                           * The limit does not apply for one-pass decoders (such as ZSTD_decompress()), since no additional memory is allocated */
102634 +/* LDM parameter bounds */
102635 +#define ZSTD_LDM_HASHLOG_MIN      ZSTD_HASHLOG_MIN
102636 +#define ZSTD_LDM_HASHLOG_MAX      ZSTD_HASHLOG_MAX
102637 +#define ZSTD_LDM_MINMATCH_MIN        4
102638 +#define ZSTD_LDM_MINMATCH_MAX     4096
102639 +#define ZSTD_LDM_BUCKETSIZELOG_MIN   1
102640 +#define ZSTD_LDM_BUCKETSIZELOG_MAX   8
102641 +#define ZSTD_LDM_HASHRATELOG_MIN     0
102642 +#define ZSTD_LDM_HASHRATELOG_MAX (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
102644 +/* Advanced parameter bounds */
102645 +#define ZSTD_TARGETCBLOCKSIZE_MIN   64
102646 +#define ZSTD_TARGETCBLOCKSIZE_MAX   ZSTD_BLOCKSIZE_MAX
102647 +#define ZSTD_SRCSIZEHINT_MIN        0
102648 +#define ZSTD_SRCSIZEHINT_MAX        INT_MAX
102650 +/* internal */
102651 +#define ZSTD_HASHLOG3_MAX           17
102654 +/* ---  Advanced types  --- */
102656 +typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params;
102658 +typedef struct {
102659 +    unsigned int offset;      /* The offset of the match. (NOT the same as the offset code)
102660 +                               * If offset == 0 and matchLength == 0, this sequence represents the last
102661 +                               * literals in the block of litLength size.
102662 +                               */
102664 +    unsigned int litLength;   /* Literal length of the sequence. */
102665 +    unsigned int matchLength; /* Match length of the sequence. */
102667 +                              /* Note: Users of this API may provide a sequence with matchLength == litLength == offset == 0.
102668 +                               * In this case, we will treat the sequence as a marker for a block boundary.
102669 +                               */
102671 +    unsigned int rep;         /* Represents which repeat offset is represented by the field 'offset'.
102672 +                               * Ranges from [0, 3].
102673 +                               *
102674 +                               * Repeat offsets are essentially previous offsets from previous sequences sorted in
102675 +                               * recency order. For more detail, see doc/zstd_compression_format.md
102676 +                               *
102677 +                               * If rep == 0, then 'offset' does not contain a repeat offset.
102678 +                               * If rep > 0:
102679 +                               *  If litLength != 0:
102680 +                               *      rep == 1 --> offset == repeat_offset_1
102681 +                               *      rep == 2 --> offset == repeat_offset_2
102682 +                               *      rep == 3 --> offset == repeat_offset_3
102683 +                               *  If litLength == 0:
102684 +                               *      rep == 1 --> offset == repeat_offset_2
102685 +                               *      rep == 2 --> offset == repeat_offset_3
102686 +                               *      rep == 3 --> offset == repeat_offset_1 - 1
102687 +                               *
102688 +                               * Note: This field is optional. ZSTD_generateSequences() will calculate the value of
102689 +                               * 'rep', but repeat offsets do not necessarily need to be calculated from an external
102690 +                               * sequence provider's perspective. For example, ZSTD_compressSequences() does not
102691 +                               * use this 'rep' field at all (as of now).
102692 +                               */
102693 +} ZSTD_Sequence;
102695 +typedef struct {
102696 +    unsigned windowLog;       /**< largest match distance : larger == more compression, more memory needed during decompression */
102697 +    unsigned chainLog;        /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */
102698 +    unsigned hashLog;         /**< dispatch table : larger == faster, more memory */
102699 +    unsigned searchLog;       /**< nb of searches : larger == more compression, slower */
102700 +    unsigned minMatch;        /**< match length searched : larger == faster decompression, sometimes less compression */
102701 +    unsigned targetLength;    /**< acceptable match size for optimal parser (only) : larger == more compression, slower */
102702 +    ZSTD_strategy strategy;   /**< see ZSTD_strategy definition above */
102703 +} ZSTD_compressionParameters;
102705 +typedef struct {
102706 +    int contentSizeFlag; /**< 1: content size will be in frame header (when known) */
102707 +    int checksumFlag;    /**< 1: generate a 32-bits checksum using XXH64 algorithm at end of frame, for error detection */
102708 +    int noDictIDFlag;    /**< 1: no dictID will be saved into frame header (dictID is only useful for dictionary compression) */
102709 +} ZSTD_frameParameters;
102711 +typedef struct {
102712 +    ZSTD_compressionParameters cParams;
102713 +    ZSTD_frameParameters fParams;
102714 +} ZSTD_parameters;
102716 +typedef enum {
102717 +    ZSTD_dct_auto = 0,       /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */
102718 +    ZSTD_dct_rawContent = 1, /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */
102719 +    ZSTD_dct_fullDict = 2    /* refuses to load a dictionary if it does not respect Zstandard's specification, starting with ZSTD_MAGIC_DICTIONARY */
102720 +} ZSTD_dictContentType_e;
102722 +typedef enum {
102723 +    ZSTD_dlm_byCopy = 0,  /**< Copy dictionary content internally */
102724 +    ZSTD_dlm_byRef = 1    /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
102725 +} ZSTD_dictLoadMethod_e;
102727 +typedef enum {
102728 +    ZSTD_f_zstd1 = 0,           /* zstd frame format, specified in zstd_compression_format.md (default) */
102729 +    ZSTD_f_zstd1_magicless = 1  /* Variant of zstd frame format, without initial 4-bytes magic number.
102730 +                                 * Useful to save 4 bytes per generated frame.
102731 +                                 * Decoder cannot recognise automatically this format, requiring this instruction. */
102732 +} ZSTD_format_e;
102734 +typedef enum {
102735 +    /* Note: this enum controls ZSTD_d_forceIgnoreChecksum */
102736 +    ZSTD_d_validateChecksum = 0,
102737 +    ZSTD_d_ignoreChecksum = 1
102738 +} ZSTD_forceIgnoreChecksum_e;
102740 +typedef enum {
102741 +    /* Note: this enum controls ZSTD_d_refMultipleDDicts */
102742 +    ZSTD_rmd_refSingleDDict = 0,
102743 +    ZSTD_rmd_refMultipleDDicts = 1
102744 +} ZSTD_refMultipleDDicts_e;
102746 +typedef enum {
102747 +    /* Note: this enum and the behavior it controls are effectively internal
102748 +     * implementation details of the compressor. They are expected to continue
102749 +     * to evolve and should be considered only in the context of extremely
102750 +     * advanced performance tuning.
102751 +     *
102752 +     * Zstd currently supports the use of a CDict in three ways:
102753 +     *
102754 +     * - The contents of the CDict can be copied into the working context. This
102755 +     *   means that the compression can search both the dictionary and input
102756 +     *   while operating on a single set of internal tables. This makes
102757 +     *   the compression faster per-byte of input. However, the initial copy of
102758 +     *   the CDict's tables incurs a fixed cost at the beginning of the
102759 +     *   compression. For small compressions (< 8 KB), that copy can dominate
102760 +     *   the cost of the compression.
102761 +     *
102762 +     * - The CDict's tables can be used in-place. In this model, compression is
102763 +     *   slower per input byte, because the compressor has to search two sets of
102764 +     *   tables. However, this model incurs no start-up cost (as long as the
102765 +     *   working context's tables can be reused). For small inputs, this can be
102766 +     *   faster than copying the CDict's tables.
102767 +     *
102768 +     * - The CDict's tables are not used at all, and instead we use the working
102769 +     *   context alone to reload the dictionary and use params based on the source
102770 +     *   size. See ZSTD_compress_insertDictionary() and ZSTD_compress_usingDict().
102771 +     *   This method is effective when the dictionary sizes are very small relative
102772 +     *   to the input size, and the input size is fairly large to begin with.
102773 +     *
102774 +     * Zstd has a simple internal heuristic that selects which strategy to use
102775 +     * at the beginning of a compression. However, if experimentation shows that
102776 +     * Zstd is making poor choices, it is possible to override that choice with
102777 +     * this enum.
102778 +     */
102779 +    ZSTD_dictDefaultAttach = 0, /* Use the default heuristic. */
102780 +    ZSTD_dictForceAttach   = 1, /* Never copy the dictionary. */
102781 +    ZSTD_dictForceCopy     = 2, /* Always copy the dictionary. */
102782 +    ZSTD_dictForceLoad     = 3  /* Always reload the dictionary */
102783 +} ZSTD_dictAttachPref_e;
102785 +typedef enum {
102786 +  ZSTD_lcm_auto = 0,          /**< Automatically determine the compression mode based on the compression level.
102787 +                               *   Negative compression levels will be uncompressed, and positive compression
102788 +                               *   levels will be compressed. */
102789 +  ZSTD_lcm_huffman = 1,       /**< Always attempt Huffman compression. Uncompressed literals will still be
102790 +                               *   emitted if Huffman compression is not profitable. */
102791 +  ZSTD_lcm_uncompressed = 2   /**< Always emit uncompressed literals. */
102792 +} ZSTD_literalCompressionMode_e;
102795 +/***************************************
102796 +*  Frame size functions
102797 +***************************************/
102799 +/*! ZSTD_findDecompressedSize() :
102800 + *  `src` should point to the start of a series of ZSTD encoded and/or skippable frames
102801 + *  `srcSize` must be the _exact_ size of this series
102802 + *       (i.e. there should be a frame boundary at `src + srcSize`)
102803 + *  @return : - decompressed size of all data in all successive frames
102804 + *            - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
102805 + *            - if an error occurred: ZSTD_CONTENTSIZE_ERROR
102807 + *   note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
102808 + *            When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
102809 + *            In which case, it's necessary to use streaming mode to decompress data.
102810 + *   note 2 : decompressed size is always present when compression is done with ZSTD_compress()
102811 + *   note 3 : decompressed size can be very large (64-bits value),
102812 + *            potentially larger than what local system can handle as a single memory segment.
102813 + *            In which case, it's necessary to use streaming mode to decompress data.
102814 + *   note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
102815 + *            Always ensure result fits within application's authorized limits.
102816 + *            Each application can set its own limits.
102817 + *   note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
102818 + *            read each contained frame header.  This is fast as most of the data is skipped,
102819 + *            however it does mean that all frame data must be present and valid. */
102820 +ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
102822 +/*! ZSTD_decompressBound() :
102823 + *  `src` should point to the start of a series of ZSTD encoded and/or skippable frames
102824 + *  `srcSize` must be the _exact_ size of this series
102825 + *       (i.e. there should be a frame boundary at `src + srcSize`)
102826 + *  @return : - upper-bound for the decompressed size of all data in all successive frames
102827 + *            - if an error occurred: ZSTD_CONTENTSIZE_ERROR
102829 + *  note 1  : an error can occur if `src` contains an invalid or incorrectly formatted frame.
102830 + *  note 2  : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`.
102831 + *            in this case, `ZSTD_findDecompressedSize` and `ZSTD_decompressBound` return the same value.
102832 + *  note 3  : when the decompressed size field isn't available, the upper-bound for that frame is calculated by:
102833 + *              upper-bound = # blocks * min(128 KB, Window_Size)
102834 + */
102835 +ZSTDLIB_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize);
102837 +/*! ZSTD_frameHeaderSize() :
102838 + *  srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX.
102839 + * @return : size of the Frame Header,
102840 + *           or an error code (if srcSize is too small) */
102841 +ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
102843 +typedef enum {
102844 +  ZSTD_sf_noBlockDelimiters = 0,         /* Representation of ZSTD_Sequence has no block delimiters, sequences only */
102845 +  ZSTD_sf_explicitBlockDelimiters = 1    /* Representation of ZSTD_Sequence contains explicit block delimiters */
102846 +} ZSTD_sequenceFormat_e;
102848 +/*! ZSTD_generateSequences() :
102849 + * Generate sequences using ZSTD_compress2, given a source buffer.
102851 + * Each block will end with a dummy sequence
102852 + * with offset == 0, matchLength == 0, and litLength == length of last literals.
102853 + * litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0)
102854 + * simply acts as a block delimiter.
102856 + * zc can be used to insert custom compression params.
102857 + * This function invokes ZSTD_compress2
102859 + * The output of this function can be fed into ZSTD_compressSequences() with CCtx
102860 + * setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters
102861 + * @return : number of sequences generated
102862 + */
102864 +ZSTDLIB_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
102865 +                                          size_t outSeqsSize, const void* src, size_t srcSize);
102867 +/*! ZSTD_mergeBlockDelimiters() :
102868 + * Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals
102869 + * by merging them into into the literals of the next sequence.
102871 + * As such, the final generated result has no explicit representation of block boundaries,
102872 + * and the final last literals segment is not represented in the sequences.
102874 + * The output of this function can be fed into ZSTD_compressSequences() with CCtx
102875 + * setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters
102876 + * @return : number of sequences left after merging
102877 + */
102878 +ZSTDLIB_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize);
102880 +/*! ZSTD_compressSequences() :
102881 + * Compress an array of ZSTD_Sequence, generated from the original source buffer, into dst.
102882 + * If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.)
102883 + * The entire source is compressed into a single frame.
102885 + * The compression behavior changes based on cctx params. In particular:
102886 + *    If ZSTD_c_blockDelimiters == ZSTD_sf_noBlockDelimiters, the array of ZSTD_Sequence is expected to contain
102887 + *    no block delimiters (defined in ZSTD_Sequence). Block boundaries are roughly determined based on
102888 + *    the block size derived from the cctx, and sequences may be split. This is the default setting.
102890 + *    If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain
102891 + *    block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided.
102893 + *    If ZSTD_c_validateSequences == 0, this function will blindly accept the sequences provided. Invalid sequences cause undefined
102894 + *    behavior. If ZSTD_c_validateSequences == 1, then if sequence is invalid (see doc/zstd_compression_format.md for
102895 + *    specifics regarding offset/matchlength requirements) then the function will bail out and return an error.
102897 + *    In addition to the two adjustable experimental params, there are other important cctx params.
102898 + *    - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN.
102899 + *    - ZSTD_c_compressionLevel accordingly adjusts the strength of the entropy coder, as it would in typical compression.
102900 + *    - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset
102901 + *      is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md
102903 + * Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused.
102904 + * Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly,
102905 + *         and cannot emit an RLE block that disagrees with the repcode history
102906 + * @return : final compressed size or a ZSTD error.
102907 + */
102908 +ZSTDLIB_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize,
102909 +                                  const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
102910 +                                  const void* src, size_t srcSize);
102913 +/*! ZSTD_writeSkippableFrame() :
102914 + * Generates a zstd skippable frame containing data given by src, and writes it to dst buffer.
102916 + * Skippable frames begin with a a 4-byte magic number. There are 16 possible choices of magic number,
102917 + * ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15.
102918 + * As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so
102919 + * the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant.
102921 + * Returns an error if destination buffer is not large enough, if the source size is not representable
102922 + * with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid).
102924 + * @return : number of bytes written or a ZSTD error.
102925 + */
102926 +ZSTDLIB_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
102927 +                                            const void* src, size_t srcSize, unsigned magicVariant);
102930 +/***************************************
102931 +*  Memory management
102932 +***************************************/
102934 +/*! ZSTD_estimate*() :
102935 + *  These functions make it possible to estimate memory usage
102936 + *  of a future {D,C}Ctx, before its creation.
102938 + *  ZSTD_estimateCCtxSize() will provide a memory budget large enough
102939 + *  for any compression level up to selected one.
102940 + *  Note : Unlike ZSTD_estimateCStreamSize*(), this estimate
102941 + *         does not include space for a window buffer.
102942 + *         Therefore, the estimation is only guaranteed for single-shot compressions, not streaming.
102943 + *  The estimate will assume the input may be arbitrarily large,
102944 + *  which is the worst case.
102946 + *  When srcSize can be bound by a known and rather "small" value,
102947 + *  this fact can be used to provide a tighter estimation
102948 + *  because the CCtx compression context will need less memory.
102949 + *  This tighter estimation can be provided by more advanced functions
102950 + *  ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(),
102951 + *  and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter().
102952 + *  Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits.
102954 + *  Note 2 : only single-threaded compression is supported.
102955 + *  ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.
102956 + */
102957 +ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
102958 +ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
102959 +ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
102960 +ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
102962 +/*! ZSTD_estimateCStreamSize() :
102963 + *  ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.
102964 + *  It will also consider src size to be arbitrarily "large", which is worst case.
102965 + *  If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.
102966 + *  ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
102967 + *  ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
102968 + *  Note : CStream size estimation is only correct for single-threaded compression.
102969 + *  ZSTD_DStream memory budget depends on window Size.
102970 + *  This information can be passed manually, using ZSTD_estimateDStreamSize,
102971 + *  or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();
102972 + *  Note : if streaming is init with function ZSTD_init?Stream_usingDict(),
102973 + *         an internal ?Dict will be created, which additional size is not estimated here.
102974 + *         In this case, get total size by adding ZSTD_estimate?DictSize */
102975 +ZSTDLIB_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
102976 +ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
102977 +ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
102978 +ZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize);
102979 +ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
102981 +/*! ZSTD_estimate?DictSize() :
102982 + *  ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict().
102983 + *  ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced().
102984 + *  Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller.
102985 + */
102986 +ZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
102987 +ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
102988 +ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
102990 +/*! ZSTD_initStatic*() :
102991 + *  Initialize an object using a pre-allocated fixed-size buffer.
102992 + *  workspace: The memory area to emplace the object into.
102993 + *             Provided pointer *must be 8-bytes aligned*.
102994 + *             Buffer must outlive object.
102995 + *  workspaceSize: Use ZSTD_estimate*Size() to determine
102996 + *                 how large workspace must be to support target scenario.
102997 + * @return : pointer to object (same address as workspace, just different type),
102998 + *           or NULL if error (size too small, incorrect alignment, etc.)
102999 + *  Note : zstd will never resize nor malloc() when using a static buffer.
103000 + *         If the object requires more memory than available,
103001 + *         zstd will just error out (typically ZSTD_error_memory_allocation).
103002 + *  Note 2 : there is no corresponding "free" function.
103003 + *           Since workspace is allocated externally, it must be freed externally too.
103004 + *  Note 3 : cParams : use ZSTD_getCParams() to convert a compression level
103005 + *           into its associated cParams.
103006 + *  Limitation 1 : currently not compatible with internal dictionary creation, triggered by
103007 + *                 ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict().
103008 + *  Limitation 2 : static cctx currently not compatible with multi-threading.
103009 + *  Limitation 3 : static dctx is incompatible with legacy support.
103010 + */
103011 +ZSTDLIB_API ZSTD_CCtx*    ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
103012 +ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize);    /**< same as ZSTD_initStaticCCtx() */
103014 +ZSTDLIB_API ZSTD_DCtx*    ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);
103015 +ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize);    /**< same as ZSTD_initStaticDCtx() */
103017 +ZSTDLIB_API const ZSTD_CDict* ZSTD_initStaticCDict(
103018 +                                        void* workspace, size_t workspaceSize,
103019 +                                        const void* dict, size_t dictSize,
103020 +                                        ZSTD_dictLoadMethod_e dictLoadMethod,
103021 +                                        ZSTD_dictContentType_e dictContentType,
103022 +                                        ZSTD_compressionParameters cParams);
103024 +ZSTDLIB_API const ZSTD_DDict* ZSTD_initStaticDDict(
103025 +                                        void* workspace, size_t workspaceSize,
103026 +                                        const void* dict, size_t dictSize,
103027 +                                        ZSTD_dictLoadMethod_e dictLoadMethod,
103028 +                                        ZSTD_dictContentType_e dictContentType);
103031 +/*! Custom memory allocation :
103032 + *  These prototypes make it possible to pass your own allocation/free functions.
103033 + *  ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below.
103034 + *  All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones.
103035 + */
103036 +typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
103037 +typedef void  (*ZSTD_freeFunction) (void* opaque, void* address);
103038 +typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
103039 +static
103040 +__attribute__((__unused__))
103041 +ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL };  /**< this constant defers to stdlib's functions */
103043 +ZSTDLIB_API ZSTD_CCtx*    ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
103044 +ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
103045 +ZSTDLIB_API ZSTD_DCtx*    ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
103046 +ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
103048 +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
103049 +                                                  ZSTD_dictLoadMethod_e dictLoadMethod,
103050 +                                                  ZSTD_dictContentType_e dictContentType,
103051 +                                                  ZSTD_compressionParameters cParams,
103052 +                                                  ZSTD_customMem customMem);
103054 +/* ! Thread pool :
103055 + * These prototypes make it possible to share a thread pool among multiple compression contexts.
103056 + * This can limit resources for applications with multiple threads where each one uses
103057 + * a threaded compression mode (via ZSTD_c_nbWorkers parameter).
103058 + * ZSTD_createThreadPool creates a new thread pool with a given number of threads.
103059 + * Note that the lifetime of such pool must exist while being used.
103060 + * ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value
103061 + * to use an internal thread pool).
103062 + * ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer.
103063 + */
103064 +typedef struct POOL_ctx_s ZSTD_threadPool;
103065 +ZSTDLIB_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads);
103066 +ZSTDLIB_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool);  /* accept NULL pointer */
103067 +ZSTDLIB_API size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool);
103071 + * This API is temporary and is expected to change or disappear in the future!
103072 + */
103073 +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2(
103074 +    const void* dict, size_t dictSize,
103075 +    ZSTD_dictLoadMethod_e dictLoadMethod,
103076 +    ZSTD_dictContentType_e dictContentType,
103077 +    const ZSTD_CCtx_params* cctxParams,
103078 +    ZSTD_customMem customMem);
103080 +ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(
103081 +    const void* dict, size_t dictSize,
103082 +    ZSTD_dictLoadMethod_e dictLoadMethod,
103083 +    ZSTD_dictContentType_e dictContentType,
103084 +    ZSTD_customMem customMem);
103087 +/***************************************
103088 +*  Advanced compression functions
103089 +***************************************/
103091 +/*! ZSTD_createCDict_byReference() :
103092 + *  Create a digested dictionary for compression
103093 + *  Dictionary content is just referenced, not duplicated.
103094 + *  As a consequence, `dictBuffer` **must** outlive CDict,
103095 + *  and its content must remain unmodified throughout the lifetime of CDict.
103096 + *  note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */
103097 +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
103099 +/*! ZSTD_getDictID_fromCDict() :
103100 + *  Provides the dictID of the dictionary loaded into `cdict`.
103101 + *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
103102 + *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
103103 +ZSTDLIB_API unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict);
103105 +/*! ZSTD_getCParams() :
103106 + * @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
103107 + * `estimatedSrcSize` value is optional, select 0 if not known */
103108 +ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
103110 +/*! ZSTD_getParams() :
103111 + *  same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.
103112 + *  All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */
103113 +ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
103115 +/*! ZSTD_checkCParams() :
103116 + *  Ensure param values remain within authorized range.
103117 + * @return 0 on success, or an error code (can be checked with ZSTD_isError()) */
103118 +ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
103120 +/*! ZSTD_adjustCParams() :
103121 + *  optimize params for a given `srcSize` and `dictSize`.
103122 + * `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN.
103123 + * `dictSize` must be `0` when there is no dictionary.
103124 + *  cPar can be invalid : all parameters will be clamped within valid range in the @return struct.
103125 + *  This function never fails (wide contract) */
103126 +ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
103128 +/*! ZSTD_compress_advanced() :
103129 + *  Note : this function is now DEPRECATED.
103130 + *         It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters.
103131 + *  This prototype will be marked as deprecated and generate compilation warning on reaching v1.5.x */
103132 +ZSTDLIB_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
103133 +                                          void* dst, size_t dstCapacity,
103134 +                                    const void* src, size_t srcSize,
103135 +                                    const void* dict,size_t dictSize,
103136 +                                          ZSTD_parameters params);
103138 +/*! ZSTD_compress_usingCDict_advanced() :
103139 + *  Note : this function is now REDUNDANT.
103140 + *         It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters.
103141 + *  This prototype will be marked as deprecated and generate compilation warning in some future version */
103142 +ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
103143 +                                              void* dst, size_t dstCapacity,
103144 +                                        const void* src, size_t srcSize,
103145 +                                        const ZSTD_CDict* cdict,
103146 +                                              ZSTD_frameParameters fParams);
103149 +/*! ZSTD_CCtx_loadDictionary_byReference() :
103150 + *  Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx.
103151 + *  It saves some memory, but also requires that `dict` outlives its usage within `cctx` */
103152 +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
103154 +/*! ZSTD_CCtx_loadDictionary_advanced() :
103155 + *  Same as ZSTD_CCtx_loadDictionary(), but gives finer control over
103156 + *  how to load the dictionary (by copy ? by reference ?)
103157 + *  and how to interpret it (automatic ? force raw mode ? full mode only ?) */
103158 +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
103160 +/*! ZSTD_CCtx_refPrefix_advanced() :
103161 + *  Same as ZSTD_CCtx_refPrefix(), but gives finer control over
103162 + *  how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
103163 +ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
103165 +/* ===   experimental parameters   === */
103166 +/* these parameters can be used with ZSTD_setParameter()
103167 + * they are not guaranteed to remain supported in the future */
103169 + /* Enables rsyncable mode,
103170 +  * which makes compressed files more rsync friendly
103171 +  * by adding periodic synchronization points to the compressed data.
103172 +  * The target average block size is ZSTD_c_jobSize / 2.
103173 +  * It's possible to modify the job size to increase or decrease
103174 +  * the granularity of the synchronization point.
103175 +  * Once the jobSize is smaller than the window size,
103176 +  * it will result in compression ratio degradation.
103177 +  * NOTE 1: rsyncable mode only works when multithreading is enabled.
103178 +  * NOTE 2: rsyncable performs poorly in combination with long range mode,
103179 +  * since it will decrease the effectiveness of synchronization points,
103180 +  * though mileage may vary.
103181 +  * NOTE 3: Rsyncable mode limits maximum compression speed to ~400 MB/s.
103182 +  * If the selected compression level is already running significantly slower,
103183 +  * the overall speed won't be significantly impacted.
103184 +  */
103185 + #define ZSTD_c_rsyncable ZSTD_c_experimentalParam1
103187 +/* Select a compression format.
103188 + * The value must be of type ZSTD_format_e.
103189 + * See ZSTD_format_e enum definition for details */
103190 +#define ZSTD_c_format ZSTD_c_experimentalParam2
103192 +/* Force back-reference distances to remain < windowSize,
103193 + * even when referencing into Dictionary content (default:0) */
103194 +#define ZSTD_c_forceMaxWindow ZSTD_c_experimentalParam3
103196 +/* Controls whether the contents of a CDict
103197 + * are used in place, or copied into the working context.
103198 + * Accepts values from the ZSTD_dictAttachPref_e enum.
103199 + * See the comments on that enum for an explanation of the feature. */
103200 +#define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4
103202 +/* Controls how the literals are compressed (default is auto).
103203 + * The value must be of type ZSTD_literalCompressionMode_e.
103204 + * See ZSTD_literalCompressionMode_t enum definition for details.
103205 + */
103206 +#define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5
103208 +/* Tries to fit compressed block size to be around targetCBlockSize.
103209 + * No target when targetCBlockSize == 0.
103210 + * There is no guarantee on compressed block size (default:0) */
103211 +#define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6
103213 +/* User's best guess of source size.
103214 + * Hint is not valid when srcSizeHint == 0.
103215 + * There is no guarantee that hint is close to actual source size,
103216 + * but compression ratio may regress significantly if guess considerably underestimates */
103217 +#define ZSTD_c_srcSizeHint ZSTD_c_experimentalParam7
103219 +/* Controls whether the new and experimental "dedicated dictionary search
103220 + * structure" can be used. This feature is still rough around the edges, be
103221 + * prepared for surprising behavior!
103223 + * How to use it:
103225 + * When using a CDict, whether to use this feature or not is controlled at
103226 + * CDict creation, and it must be set in a CCtxParams set passed into that
103227 + * construction (via ZSTD_createCDict_advanced2()). A compression will then
103228 + * use the feature or not based on how the CDict was constructed; the value of
103229 + * this param, set in the CCtx, will have no effect.
103231 + * However, when a dictionary buffer is passed into a CCtx, such as via
103232 + * ZSTD_CCtx_loadDictionary(), this param can be set on the CCtx to control
103233 + * whether the CDict that is created internally can use the feature or not.
103235 + * What it does:
103237 + * Normally, the internal data structures of the CDict are analogous to what
103238 + * would be stored in a CCtx after compressing the contents of a dictionary.
103239 + * To an approximation, a compression using a dictionary can then use those
103240 + * data structures to simply continue what is effectively a streaming
103241 + * compression where the simulated compression of the dictionary left off.
103242 + * Which is to say, the search structures in the CDict are normally the same
103243 + * format as in the CCtx.
103245 + * It is possible to do better, since the CDict is not like a CCtx: the search
103246 + * structures are written once during CDict creation, and then are only read
103247 + * after that, while the search structures in the CCtx are both read and
103248 + * written as the compression goes along. This means we can choose a search
103249 + * structure for the dictionary that is read-optimized.
103251 + * This feature enables the use of that different structure.
103253 + * Note that some of the members of the ZSTD_compressionParameters struct have
103254 + * different semantics and constraints in the dedicated search structure. It is
103255 + * highly recommended that you simply set a compression level in the CCtxParams
103256 + * you pass into the CDict creation call, and avoid messing with the cParams
103257 + * directly.
103259 + * Effects:
103261 + * This will only have any effect when the selected ZSTD_strategy
103262 + * implementation supports this feature. Currently, that's limited to
103263 + * ZSTD_greedy, ZSTD_lazy, and ZSTD_lazy2.
103265 + * Note that this means that the CDict tables can no longer be copied into the
103266 + * CCtx, so the dict attachment mode ZSTD_dictForceCopy will no longer be
103267 + * useable. The dictionary can only be attached or reloaded.
103269 + * In general, you should expect compression to be faster--sometimes very much
103270 + * so--and CDict creation to be slightly slower. Eventually, we will probably
103271 + * make this mode the default.
103272 + */
103273 +#define ZSTD_c_enableDedicatedDictSearch ZSTD_c_experimentalParam8
103275 +/* ZSTD_c_stableInBuffer
103276 + * Experimental parameter.
103277 + * Default is 0 == disabled. Set to 1 to enable.
103279 + * Tells the compressor that the ZSTD_inBuffer will ALWAYS be the same
103280 + * between calls, except for the modifications that zstd makes to pos (the
103281 + * caller must not modify pos). This is checked by the compressor, and
103282 + * compression will fail if it ever changes. This means the only flush
103283 + * mode that makes sense is ZSTD_e_end, so zstd will error if ZSTD_e_end
103284 + * is not used. The data in the ZSTD_inBuffer in the range [src, src + pos)
103285 + * MUST not be modified during compression or you will get data corruption.
103287 + * When this flag is enabled zstd won't allocate an input window buffer,
103288 + * because the user guarantees it can reference the ZSTD_inBuffer until
103289 + * the frame is complete. But, it will still allocate an output buffer
103290 + * large enough to fit a block (see ZSTD_c_stableOutBuffer). This will also
103291 + * avoid the memcpy() from the input buffer to the input window buffer.
103293 + * NOTE: ZSTD_compressStream2() will error if ZSTD_e_end is not used.
103294 + * That means this flag cannot be used with ZSTD_compressStream().
103296 + * NOTE: So long as the ZSTD_inBuffer always points to valid memory, using
103297 + * this flag is ALWAYS memory safe, and will never access out-of-bounds
103298 + * memory. However, compression WILL fail if you violate the preconditions.
103300 + * WARNING: The data in the ZSTD_inBuffer in the range [dst, dst + pos) MUST
103301 + * not be modified during compression or you will get data corruption. This
103302 + * is because zstd needs to reference data in the ZSTD_inBuffer to find
103303 + * matches. Normally zstd maintains its own window buffer for this purpose,
103304 + * but passing this flag tells zstd to use the user provided buffer.
103305 + */
103306 +#define ZSTD_c_stableInBuffer ZSTD_c_experimentalParam9
103308 +/* ZSTD_c_stableOutBuffer
103309 + * Experimental parameter.
103310 + * Default is 0 == disabled. Set to 1 to enable.
103312 + * Tells he compressor that the ZSTD_outBuffer will not be resized between
103313 + * calls. Specifically: (out.size - out.pos) will never grow. This gives the
103314 + * compressor the freedom to say: If the compressed data doesn't fit in the
103315 + * output buffer then return ZSTD_error_dstSizeTooSmall. This allows us to
103316 + * always decompress directly into the output buffer, instead of decompressing
103317 + * into an internal buffer and copying to the output buffer.
103319 + * When this flag is enabled zstd won't allocate an output buffer, because
103320 + * it can write directly to the ZSTD_outBuffer. It will still allocate the
103321 + * input window buffer (see ZSTD_c_stableInBuffer).
103323 + * Zstd will check that (out.size - out.pos) never grows and return an error
103324 + * if it does. While not strictly necessary, this should prevent surprises.
103325 + */
103326 +#define ZSTD_c_stableOutBuffer ZSTD_c_experimentalParam10
103328 +/* ZSTD_c_blockDelimiters
103329 + * Default is 0 == ZSTD_sf_noBlockDelimiters.
103331 + * For use with sequence compression API: ZSTD_compressSequences().
103333 + * Designates whether or not the given array of ZSTD_Sequence contains block delimiters
103334 + * and last literals, which are defined as sequences with offset == 0 and matchLength == 0.
103335 + * See the definition of ZSTD_Sequence for more specifics.
103336 + */
103337 +#define ZSTD_c_blockDelimiters ZSTD_c_experimentalParam11
103339 +/* ZSTD_c_validateSequences
103340 + * Default is 0 == disabled. Set to 1 to enable sequence validation.
103342 + * For use with sequence compression API: ZSTD_compressSequences().
103343 + * Designates whether or not we validate sequences provided to ZSTD_compressSequences()
103344 + * during function execution.
103346 + * Without validation, providing a sequence that does not conform to the zstd spec will cause
103347 + * undefined behavior, and may produce a corrupted block.
103349 + * With validation enabled, a if sequence is invalid (see doc/zstd_compression_format.md for
103350 + * specifics regarding offset/matchlength requirements) then the function will bail out and
103351 + * return an error.
103353 + */
103354 +#define ZSTD_c_validateSequences ZSTD_c_experimentalParam12
103356 +/*! ZSTD_CCtx_getParameter() :
103357 + *  Get the requested compression parameter value, selected by enum ZSTD_cParameter,
103358 + *  and store it into int* value.
103359 + * @return : 0, or an error code (which can be tested with ZSTD_isError()).
103360 + */
103361 +ZSTDLIB_API size_t ZSTD_CCtx_getParameter(const ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value);
103364 +/*! ZSTD_CCtx_params :
103365 + *  Quick howto :
103366 + *  - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure
103367 + *  - ZSTD_CCtxParams_setParameter() : Push parameters one by one into
103368 + *                                     an existing ZSTD_CCtx_params structure.
103369 + *                                     This is similar to
103370 + *                                     ZSTD_CCtx_setParameter().
103371 + *  - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to
103372 + *                                    an existing CCtx.
103373 + *                                    These parameters will be applied to
103374 + *                                    all subsequent frames.
103375 + *  - ZSTD_compressStream2() : Do compression using the CCtx.
103376 + *  - ZSTD_freeCCtxParams() : Free the memory, accept NULL pointer.
103378 + *  This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()
103379 + *  for static allocation of CCtx for single-threaded compression.
103380 + */
103381 +ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
103382 +ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params);  /* accept NULL pointer */
103384 +/*! ZSTD_CCtxParams_reset() :
103385 + *  Reset params to default values.
103386 + */
103387 +ZSTDLIB_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params);
103389 +/*! ZSTD_CCtxParams_init() :
103390 + *  Initializes the compression parameters of cctxParams according to
103391 + *  compression level. All other parameters are reset to their default values.
103392 + */
103393 +ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel);
103395 +/*! ZSTD_CCtxParams_init_advanced() :
103396 + *  Initializes the compression and frame parameters of cctxParams according to
103397 + *  params. All other parameters are reset to their default values.
103398 + */
103399 +ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
103401 +/*! ZSTD_CCtxParams_setParameter() :
103402 + *  Similar to ZSTD_CCtx_setParameter.
103403 + *  Set one compression parameter, selected by enum ZSTD_cParameter.
103404 + *  Parameters must be applied to a ZSTD_CCtx using
103405 + *  ZSTD_CCtx_setParametersUsingCCtxParams().
103406 + * @result : a code representing success or failure (which can be tested with
103407 + *           ZSTD_isError()).
103408 + */
103409 +ZSTDLIB_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value);
103411 +/*! ZSTD_CCtxParams_getParameter() :
103412 + * Similar to ZSTD_CCtx_getParameter.
103413 + * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.
103414 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
103415 + */
103416 +ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(const ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value);
103418 +/*! ZSTD_CCtx_setParametersUsingCCtxParams() :
103419 + *  Apply a set of ZSTD_CCtx_params to the compression context.
103420 + *  This can be done even after compression is started,
103421 + *    if nbWorkers==0, this will have no impact until a new compression is started.
103422 + *    if nbWorkers>=1, new parameters will be picked up at next job,
103423 + *       with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated).
103424 + */
103425 +ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams(
103426 +        ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params);
103428 +/*! ZSTD_compressStream2_simpleArgs() :
103429 + *  Same as ZSTD_compressStream2(),
103430 + *  but using only integral types as arguments.
103431 + *  This variant might be helpful for binders from dynamic languages
103432 + *  which have troubles handling structures containing memory pointers.
103433 + */
103434 +ZSTDLIB_API size_t ZSTD_compressStream2_simpleArgs (
103435 +                            ZSTD_CCtx* cctx,
103436 +                            void* dst, size_t dstCapacity, size_t* dstPos,
103437 +                      const void* src, size_t srcSize, size_t* srcPos,
103438 +                            ZSTD_EndDirective endOp);
103441 +/***************************************
103442 +*  Advanced decompression functions
103443 +***************************************/
103445 +/*! ZSTD_isFrame() :
103446 + *  Tells if the content of `buffer` starts with a valid Frame Identifier.
103447 + *  Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
103448 + *  Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
103449 + *  Note 3 : Skippable Frame Identifiers are considered valid. */
103450 +ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size);
103452 +/*! ZSTD_createDDict_byReference() :
103453 + *  Create a digested dictionary, ready to start decompression operation without startup delay.
103454 + *  Dictionary content is referenced, and therefore stays in dictBuffer.
103455 + *  It is important that dictBuffer outlives DDict,
103456 + *  it must remain read accessible throughout the lifetime of DDict */
103457 +ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
103459 +/*! ZSTD_DCtx_loadDictionary_byReference() :
103460 + *  Same as ZSTD_DCtx_loadDictionary(),
103461 + *  but references `dict` content instead of copying it into `dctx`.
103462 + *  This saves memory if `dict` remains around.,
103463 + *  However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */
103464 +ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
103466 +/*! ZSTD_DCtx_loadDictionary_advanced() :
103467 + *  Same as ZSTD_DCtx_loadDictionary(),
103468 + *  but gives direct control over
103469 + *  how to load the dictionary (by copy ? by reference ?)
103470 + *  and how to interpret it (automatic ? force raw mode ? full mode only ?). */
103471 +ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
103473 +/*! ZSTD_DCtx_refPrefix_advanced() :
103474 + *  Same as ZSTD_DCtx_refPrefix(), but gives finer control over
103475 + *  how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
103476 +ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
103478 +/*! ZSTD_DCtx_setMaxWindowSize() :
103479 + *  Refuses allocating internal buffers for frames requiring a window size larger than provided limit.
103480 + *  This protects a decoder context from reserving too much memory for itself (potential attack scenario).
103481 + *  This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
103482 + *  By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT)
103483 + * @return : 0, or an error code (which can be tested using ZSTD_isError()).
103484 + */
103485 +ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);
103487 +/*! ZSTD_DCtx_getParameter() :
103488 + *  Get the requested decompression parameter value, selected by enum ZSTD_dParameter,
103489 + *  and store it into int* value.
103490 + * @return : 0, or an error code (which can be tested with ZSTD_isError()).
103491 + */
103492 +ZSTDLIB_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value);
103494 +/* ZSTD_d_format
103495 + * experimental parameter,
103496 + * allowing selection between ZSTD_format_e input compression formats
103497 + */
103498 +#define ZSTD_d_format ZSTD_d_experimentalParam1
103499 +/* ZSTD_d_stableOutBuffer
103500 + * Experimental parameter.
103501 + * Default is 0 == disabled. Set to 1 to enable.
103503 + * Tells the decompressor that the ZSTD_outBuffer will ALWAYS be the same
103504 + * between calls, except for the modifications that zstd makes to pos (the
103505 + * caller must not modify pos). This is checked by the decompressor, and
103506 + * decompression will fail if it ever changes. Therefore the ZSTD_outBuffer
103507 + * MUST be large enough to fit the entire decompressed frame. This will be
103508 + * checked when the frame content size is known. The data in the ZSTD_outBuffer
103509 + * in the range [dst, dst + pos) MUST not be modified during decompression
103510 + * or you will get data corruption.
103512 + * When this flags is enabled zstd won't allocate an output buffer, because
103513 + * it can write directly to the ZSTD_outBuffer, but it will still allocate
103514 + * an input buffer large enough to fit any compressed block. This will also
103515 + * avoid the memcpy() from the internal output buffer to the ZSTD_outBuffer.
103516 + * If you need to avoid the input buffer allocation use the buffer-less
103517 + * streaming API.
103519 + * NOTE: So long as the ZSTD_outBuffer always points to valid memory, using
103520 + * this flag is ALWAYS memory safe, and will never access out-of-bounds
103521 + * memory. However, decompression WILL fail if you violate the preconditions.
103523 + * WARNING: The data in the ZSTD_outBuffer in the range [dst, dst + pos) MUST
103524 + * not be modified during decompression or you will get data corruption. This
103525 + * is because zstd needs to reference data in the ZSTD_outBuffer to regenerate
103526 + * matches. Normally zstd maintains its own buffer for this purpose, but passing
103527 + * this flag tells zstd to use the user provided buffer.
103528 + */
103529 +#define ZSTD_d_stableOutBuffer ZSTD_d_experimentalParam2
103531 +/* ZSTD_d_forceIgnoreChecksum
103532 + * Experimental parameter.
103533 + * Default is 0 == disabled. Set to 1 to enable
103535 + * Tells the decompressor to skip checksum validation during decompression, regardless
103536 + * of whether checksumming was specified during compression. This offers some
103537 + * slight performance benefits, and may be useful for debugging.
103538 + * Param has values of type ZSTD_forceIgnoreChecksum_e
103539 + */
103540 +#define ZSTD_d_forceIgnoreChecksum ZSTD_d_experimentalParam3
103542 +/* ZSTD_d_refMultipleDDicts
103543 + * Experimental parameter.
103544 + * Default is 0 == disabled. Set to 1 to enable
103546 + * If enabled and dctx is allocated on the heap, then additional memory will be allocated
103547 + * to store references to multiple ZSTD_DDict. That is, multiple calls of ZSTD_refDDict()
103548 + * using a given ZSTD_DCtx, rather than overwriting the previous DDict reference, will instead
103549 + * store all references. At decompression time, the appropriate dictID is selected
103550 + * from the set of DDicts based on the dictID in the frame.
103552 + * Usage is simply calling ZSTD_refDDict() on multiple dict buffers.
103554 + * Param has values of byte ZSTD_refMultipleDDicts_e
103556 + * WARNING: Enabling this parameter and calling ZSTD_DCtx_refDDict(), will trigger memory
103557 + * allocation for the hash table. ZSTD_freeDCtx() also frees this memory.
103558 + * Memory is allocated as per ZSTD_DCtx::customMem.
103560 + * Although this function allocates memory for the table, the user is still responsible for
103561 + * memory management of the underlying ZSTD_DDict* themselves.
103562 + */
103563 +#define ZSTD_d_refMultipleDDicts ZSTD_d_experimentalParam4
103566 +/*! ZSTD_DCtx_setFormat() :
103567 + *  Instruct the decoder context about what kind of data to decode next.
103568 + *  This instruction is mandatory to decode data without a fully-formed header,
103569 + *  such ZSTD_f_zstd1_magicless for example.
103570 + * @return : 0, or an error code (which can be tested using ZSTD_isError()). */
103571 +ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
103573 +/*! ZSTD_decompressStream_simpleArgs() :
103574 + *  Same as ZSTD_decompressStream(),
103575 + *  but using only integral types as arguments.
103576 + *  This can be helpful for binders from dynamic languages
103577 + *  which have troubles handling structures containing memory pointers.
103578 + */
103579 +ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs (
103580 +                            ZSTD_DCtx* dctx,
103581 +                            void* dst, size_t dstCapacity, size_t* dstPos,
103582 +                      const void* src, size_t srcSize, size_t* srcPos);
103585 +/********************************************************************
103586 +*  Advanced streaming functions
103587 +*  Warning : most of these functions are now redundant with the Advanced API.
103588 +*  Once Advanced API reaches "stable" status,
103589 +*  redundant functions will be deprecated, and then at some point removed.
103590 +********************************************************************/
103592 +/*=====   Advanced Streaming compression functions  =====*/
103594 +/*! ZSTD_initCStream_srcSize() :
103595 + * This function is deprecated, and equivalent to:
103596 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
103597 + *     ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
103598 + *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
103599 + *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
103601 + * pledgedSrcSize must be correct. If it is not known at init time, use
103602 + * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs,
103603 + * "0" also disables frame content size field. It may be enabled in the future.
103604 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
103605 + */
103606 +ZSTDLIB_API size_t
103607 +ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
103608 +                         int compressionLevel,
103609 +                         unsigned long long pledgedSrcSize);
103611 +/*! ZSTD_initCStream_usingDict() :
103612 + * This function is deprecated, and is equivalent to:
103613 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
103614 + *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
103615 + *     ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
103617 + * Creates of an internal CDict (incompatible with static CCtx), except if
103618 + * dict == NULL or dictSize < 8, in which case no dict is used.
103619 + * Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if
103620 + * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.
103621 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
103622 + */
103623 +ZSTDLIB_API size_t
103624 +ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
103625 +                     const void* dict, size_t dictSize,
103626 +                           int compressionLevel);
103628 +/*! ZSTD_initCStream_advanced() :
103629 + * This function is deprecated, and is approximately equivalent to:
103630 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
103631 + *     // Pseudocode: Set each zstd parameter and leave the rest as-is.
103632 + *     for ((param, value) : params) {
103633 + *         ZSTD_CCtx_setParameter(zcs, param, value);
103634 + *     }
103635 + *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
103636 + *     ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
103638 + * dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy.
103639 + * pledgedSrcSize must be correct.
103640 + * If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
103641 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
103642 + */
103643 +ZSTDLIB_API size_t
103644 +ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
103645 +                    const void* dict, size_t dictSize,
103646 +                          ZSTD_parameters params,
103647 +                          unsigned long long pledgedSrcSize);
103649 +/*! ZSTD_initCStream_usingCDict() :
103650 + * This function is deprecated, and equivalent to:
103651 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
103652 + *     ZSTD_CCtx_refCDict(zcs, cdict);
103654 + * note : cdict will just be referenced, and must outlive compression session
103655 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
103656 + */
103657 +ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
103659 +/*! ZSTD_initCStream_usingCDict_advanced() :
103660 + *   This function is DEPRECATED, and is approximately equivalent to:
103661 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
103662 + *     // Pseudocode: Set each zstd frame parameter and leave the rest as-is.
103663 + *     for ((fParam, value) : fParams) {
103664 + *         ZSTD_CCtx_setParameter(zcs, fParam, value);
103665 + *     }
103666 + *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
103667 + *     ZSTD_CCtx_refCDict(zcs, cdict);
103669 + * same as ZSTD_initCStream_usingCDict(), with control over frame parameters.
103670 + * pledgedSrcSize must be correct. If srcSize is not known at init time, use
103671 + * value ZSTD_CONTENTSIZE_UNKNOWN.
103672 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
103673 + */
103674 +ZSTDLIB_API size_t
103675 +ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
103676 +                               const ZSTD_CDict* cdict,
103677 +                                     ZSTD_frameParameters fParams,
103678 +                                     unsigned long long pledgedSrcSize);
103680 +/*! ZSTD_resetCStream() :
103681 + * This function is deprecated, and is equivalent to:
103682 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
103683 + *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
103685 + *  start a new frame, using same parameters from previous frame.
103686 + *  This is typically useful to skip dictionary loading stage, since it will re-use it in-place.
103687 + *  Note that zcs must be init at least once before using ZSTD_resetCStream().
103688 + *  If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.
103689 + *  If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.
103690 + *  For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs,
103691 + *  but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.
103692 + * @return : 0, or an error code (which can be tested using ZSTD_isError())
103693 + *  Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
103694 + */
103695 +ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
103698 +typedef struct {
103699 +    unsigned long long ingested;   /* nb input bytes read and buffered */
103700 +    unsigned long long consumed;   /* nb input bytes actually compressed */
103701 +    unsigned long long produced;   /* nb of compressed bytes generated and buffered */
103702 +    unsigned long long flushed;    /* nb of compressed bytes flushed : not provided; can be tracked from caller side */
103703 +    unsigned currentJobID;         /* MT only : latest started job nb */
103704 +    unsigned nbActiveWorkers;      /* MT only : nb of workers actively compressing at probe time */
103705 +} ZSTD_frameProgression;
103707 +/* ZSTD_getFrameProgression() :
103708 + * tells how much data has been ingested (read from input)
103709 + * consumed (input actually compressed) and produced (output) for current frame.
103710 + * Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed.
103711 + * Aggregates progression inside active worker threads.
103712 + */
103713 +ZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx);
103715 +/*! ZSTD_toFlushNow() :
103716 + *  Tell how many bytes are ready to be flushed immediately.
103717 + *  Useful for multithreading scenarios (nbWorkers >= 1).
103718 + *  Probe the oldest active job, defined as oldest job not yet entirely flushed,
103719 + *  and check its output buffer.
103720 + * @return : amount of data stored in oldest job and ready to be flushed immediately.
103721 + *  if @return == 0, it means either :
103722 + *  + there is no active job (could be checked with ZSTD_frameProgression()), or
103723 + *  + oldest job is still actively compressing data,
103724 + *    but everything it has produced has also been flushed so far,
103725 + *    therefore flush speed is limited by production speed of oldest job
103726 + *    irrespective of the speed of concurrent (and newer) jobs.
103727 + */
103728 +ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
103731 +/*=====   Advanced Streaming decompression functions  =====*/
103734 + * This function is deprecated, and is equivalent to:
103736 + *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
103737 + *     ZSTD_DCtx_loadDictionary(zds, dict, dictSize);
103739 + * note: no dictionary will be used if dict == NULL or dictSize < 8
103740 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
103741 + */
103742 +ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
103745 + * This function is deprecated, and is equivalent to:
103747 + *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
103748 + *     ZSTD_DCtx_refDDict(zds, ddict);
103750 + * note : ddict is referenced, it must outlive decompression session
103751 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
103752 + */
103753 +ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
103756 + * This function is deprecated, and is equivalent to:
103758 + *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
103760 + * re-use decompression parameters from previous init; saves dictionary loading
103761 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
103762 + */
103763 +ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
103766 +/*********************************************************************
103767 +*  Buffer-less and synchronous inner streaming functions
103769 +*  This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
103770 +*  But it's also a complex one, with several restrictions, documented below.
103771 +*  Prefer normal streaming API for an easier experience.
103772 +********************************************************************* */
103775 +  Buffer-less streaming compression (synchronous mode)
103777 +  A ZSTD_CCtx object is required to track streaming operations.
103778 +  Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.
103779 +  ZSTD_CCtx object can be re-used multiple times within successive compression operations.
103781 +  Start by initializing a context.
103782 +  Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression,
103783 +  or ZSTD_compressBegin_advanced(), for finer parameter control.
103784 +  It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx()
103786 +  Then, consume your input using ZSTD_compressContinue().
103787 +  There are some important considerations to keep in mind when using this advanced function :
103788 +  - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.
103789 +  - Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.
103790 +  - Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.
103791 +    Worst case evaluation is provided by ZSTD_compressBound().
103792 +    ZSTD_compressContinue() doesn't guarantee recover after a failed compression.
103793 +  - ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog).
103794 +    It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)
103795 +  - ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps.
103796 +    In which case, it will "discard" the relevant memory section from its history.
103798 +  Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.
103799 +  It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
103800 +  Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.
103802 +  `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.
103805 +/*=====   Buffer-less streaming compression functions  =====*/
103806 +ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
103807 +ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
103808 +ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
103809 +ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
103810 +ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize);   /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */
103811 +ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**<  note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
103813 +ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
103814 +ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
103818 +  Buffer-less streaming decompression (synchronous mode)
103820 +  A ZSTD_DCtx object is required to track streaming operations.
103821 +  Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
103822 +  A ZSTD_DCtx object can be re-used multiple times.
103824 +  First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
103825 +  Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
103826 +  Data fragment must be large enough to ensure successful decoding.
103827 + `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
103828 +  @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
103829 +           >0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
103830 +           errorCode, which can be tested using ZSTD_isError().
103832 +  It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
103833 +  such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
103834 +  Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
103835 +  As a consequence, check that values remain within valid application range.
103836 +  For example, do not allocate memory blindly, check that `windowSize` is within expectation.
103837 +  Each application can set its own limits, depending on local restrictions.
103838 +  For extended interoperability, it is recommended to support `windowSize` of at least 8 MB.
103840 +  ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.
103841 +  ZSTD_decompressContinue() is very sensitive to contiguity,
103842 +  if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
103843 +  or that previous contiguous segment is large enough to properly handle maximum back-reference distance.
103844 +  There are multiple ways to guarantee this condition.
103846 +  The most memory efficient way is to use a round buffer of sufficient size.
103847 +  Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
103848 +  which can @return an error code if required value is too large for current system (in 32-bits mode).
103849 +  In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
103850 +  up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
103851 +  which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
103852 +  At which point, decoding can resume from the beginning of the buffer.
103853 +  Note that already decoded data stored in the buffer should be flushed before being overwritten.
103855 +  There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.
103857 +  Finally, if you control the compression process, you can also ignore all buffer size rules,
103858 +  as long as the encoder and decoder progress in "lock-step",
103859 +  aka use exactly the same buffer sizes, break contiguity at the same place, etc.
103861 +  Once buffers are setup, start decompression, with ZSTD_decompressBegin().
103862 +  If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().
103864 +  Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
103865 +  ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
103866 +  ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
103868 + @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
103869 +  It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
103870 +  It can also be an error code, which can be tested with ZSTD_isError().
103872 +  A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
103873 +  Context can then be reset to start a new decompression.
103875 +  Note : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType().
103876 +  This information is not required to properly decode a frame.
103878 +  == Special case : skippable frames ==
103880 +  Skippable frames allow integration of user-defined data into a flow of concatenated frames.
103881 +  Skippable frames will be ignored (skipped) by decompressor.
103882 +  The format of skippable frames is as follows :
103883 +  a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F
103884 +  b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
103885 +  c) Frame Content - any content (User Data) of length equal to Frame Size
103886 +  For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.
103887 +  For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.
103890 +/*=====   Buffer-less streaming decompression functions  =====*/
103891 +typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
103892 +typedef struct {
103893 +    unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
103894 +    unsigned long long windowSize;       /* can be very large, up to <= frameContentSize */
103895 +    unsigned blockSizeMax;
103896 +    ZSTD_frameType_e frameType;          /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
103897 +    unsigned headerSize;
103898 +    unsigned dictID;
103899 +    unsigned checksumFlag;
103900 +} ZSTD_frameHeader;
103902 +/*! ZSTD_getFrameHeader() :
103903 + *  decode Frame Header, or requires larger `srcSize`.
103904 + * @return : 0, `zfhPtr` is correctly filled,
103905 + *          >0, `srcSize` is too small, value is wanted `srcSize` amount,
103906 + *           or an error code, which can be tested using ZSTD_isError() */
103907 +ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize);   /**< doesn't consume input */
103908 +/*! ZSTD_getFrameHeader_advanced() :
103909 + *  same as ZSTD_getFrameHeader(),
103910 + *  with added capability to select a format (like ZSTD_f_zstd1_magicless) */
103911 +ZSTDLIB_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
103912 +ZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize);  /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
103914 +ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
103915 +ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
103916 +ZSTDLIB_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
103918 +ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
103919 +ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
103921 +/* misc */
103922 +ZSTDLIB_API void   ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
103923 +typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
103924 +ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
103929 +/* ============================ */
103930 +/**       Block level API       */
103931 +/* ============================ */
103934 +    Block functions produce and decode raw zstd blocks, without frame metadata.
103935 +    Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).
103936 +    But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.
103938 +    A few rules to respect :
103939 +    - Compressing and decompressing require a context structure
103940 +      + Use ZSTD_createCCtx() and ZSTD_createDCtx()
103941 +    - It is necessary to init context before starting
103942 +      + compression : any ZSTD_compressBegin*() variant, including with dictionary
103943 +      + decompression : any ZSTD_decompressBegin*() variant, including with dictionary
103944 +      + copyCCtx() and copyDCtx() can be used too
103945 +    - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
103946 +      + If input is larger than a block size, it's necessary to split input data into multiple blocks
103947 +      + For inputs larger than a single block, consider using regular ZSTD_compress() instead.
103948 +        Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.
103949 +    - When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) !
103950 +      ===> In which case, nothing is produced into `dst` !
103951 +      + User __must__ test for such outcome and deal directly with uncompressed data
103952 +      + A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0.
103953 +        Doing so would mess up with statistics history, leading to potential data corruption.
103954 +      + ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !!
103955 +      + In case of multiple successive blocks, should some of them be uncompressed,
103956 +        decoder must be informed of their existence in order to follow proper history.
103957 +        Use ZSTD_insertBlock() for such a case.
103960 +/*=====   Raw zstd block functions  =====*/
103961 +ZSTDLIB_API size_t ZSTD_getBlockSize   (const ZSTD_CCtx* cctx);
103962 +ZSTDLIB_API size_t ZSTD_compressBlock  (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
103963 +ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
103964 +ZSTDLIB_API size_t ZSTD_insertBlock    (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize);  /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
103967 +#endif   /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */
103968 diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
103969 index 167ca8c8424f..2fe4019b749f 100644
103970 --- a/include/media/v4l2-ctrls.h
103971 +++ b/include/media/v4l2-ctrls.h
103972 @@ -301,12 +301,14 @@ struct v4l2_ctrl {
103973   *             the control has been applied. This prevents applying controls
103974   *             from a cluster with multiple controls twice (when the first
103975   *             control of a cluster is applied, they all are).
103976 - * @req:       If set, this refers to another request that sets this control.
103977 + * @valid_p_req: If set, then p_req contains the control value for the request.
103978   * @p_req:     If the control handler containing this control reference
103979   *             is bound to a media request, then this points to the
103980 - *             value of the control that should be applied when the request
103981 + *             value of the control that must be applied when the request
103982   *             is executed, or to the value of the control at the time
103983 - *             that the request was completed.
103984 + *             that the request was completed. If @valid_p_req is false,
103985 + *             then this control was never set for this request and the
103986 + *             control will not be updated when this request is applied.
103987   *
103988   * Each control handler has a list of these refs. The list_head is used to
103989   * keep a sorted-by-control-ID list of all controls, while the next pointer
103990 @@ -319,7 +321,7 @@ struct v4l2_ctrl_ref {
103991         struct v4l2_ctrl_helper *helper;
103992         bool from_other_dev;
103993         bool req_done;
103994 -       struct v4l2_ctrl_ref *req;
103995 +       bool valid_p_req;
103996         union v4l2_ctrl_ptr p_req;
103999 @@ -346,7 +348,7 @@ struct v4l2_ctrl_ref {
104000   * @error:     The error code of the first failed control addition.
104001   * @request_is_queued: True if the request was queued.
104002   * @requests:  List to keep track of open control handler request objects.
104003 - *             For the parent control handler (@req_obj.req == NULL) this
104004 + *             For the parent control handler (@req_obj.ops == NULL) this
104005   *             is the list header. When the parent control handler is
104006   *             removed, it has to unbind and put all these requests since
104007   *             they refer to the parent.
104008 diff --git a/include/net/addrconf.h b/include/net/addrconf.h
104009 index 18f783dcd55f..78ea3e332688 100644
104010 --- a/include/net/addrconf.h
104011 +++ b/include/net/addrconf.h
104012 @@ -233,7 +233,6 @@ void ipv6_mc_unmap(struct inet6_dev *idev);
104013  void ipv6_mc_remap(struct inet6_dev *idev);
104014  void ipv6_mc_init_dev(struct inet6_dev *idev);
104015  void ipv6_mc_destroy_dev(struct inet6_dev *idev);
104016 -int ipv6_mc_check_icmpv6(struct sk_buff *skb);
104017  int ipv6_mc_check_mld(struct sk_buff *skb);
104018  void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp);
104020 diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
104021 index ebdd4afe30d2..ca4ac6603b9a 100644
104022 --- a/include/net/bluetooth/hci_core.h
104023 +++ b/include/net/bluetooth/hci_core.h
104024 @@ -704,6 +704,7 @@ struct hci_chan {
104025         struct sk_buff_head data_q;
104026         unsigned int    sent;
104027         __u8            state;
104028 +       bool            amp;
104031  struct hci_conn_params {
104032 diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
104033 index 3c8c59471bc1..2cdc5a0709fe 100644
104034 --- a/include/net/inet_connection_sock.h
104035 +++ b/include/net/inet_connection_sock.h
104036 @@ -134,8 +134,9 @@ struct inet_connection_sock {
104037         u32                       icsk_probes_tstamp;
104038         u32                       icsk_user_timeout;
104040 -       u64                       icsk_ca_priv[104 / sizeof(u64)];
104041 -#define ICSK_CA_PRIV_SIZE      (13 * sizeof(u64))
104042 +/* XXX inflated by temporary internal debugging info */
104043 +#define ICSK_CA_PRIV_SIZE      (216)
104044 +       u64                       icsk_ca_priv[ICSK_CA_PRIV_SIZE / sizeof(u64)];
104047  #define ICSK_TIME_RETRANS      1       /* Retransmit timer */
104048 diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
104049 index 1d34fe154fe0..434a6158852f 100644
104050 --- a/include/net/netfilter/nf_tables_offload.h
104051 +++ b/include/net/netfilter/nf_tables_offload.h
104052 @@ -4,11 +4,16 @@
104053  #include <net/flow_offload.h>
104054  #include <net/netfilter/nf_tables.h>
104056 +enum nft_offload_reg_flags {
104057 +       NFT_OFFLOAD_F_NETWORK2HOST      = (1 << 0),
104060  struct nft_offload_reg {
104061         u32             key;
104062         u32             len;
104063         u32             base_offset;
104064         u32             offset;
104065 +       u32             flags;
104066         struct nft_data data;
104067         struct nft_data mask;
104069 @@ -45,6 +50,7 @@ struct nft_flow_key {
104070         struct flow_dissector_key_ports                 tp;
104071         struct flow_dissector_key_ip                    ip;
104072         struct flow_dissector_key_vlan                  vlan;
104073 +       struct flow_dissector_key_vlan                  cvlan;
104074         struct flow_dissector_key_eth_addrs             eth_addrs;
104075         struct flow_dissector_key_meta                  meta;
104076  } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
104077 @@ -71,13 +77,17 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net, const struct nft_rul
104078  void nft_flow_rule_destroy(struct nft_flow_rule *flow);
104079  int nft_flow_rule_offload_commit(struct net *net);
104081 -#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg)                \
104082 +#define NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, __flags) \
104083         (__reg)->base_offset    =                                       \
104084                 offsetof(struct nft_flow_key, __base);                  \
104085         (__reg)->offset         =                                       \
104086                 offsetof(struct nft_flow_key, __base.__field);          \
104087         (__reg)->len            = __len;                                \
104088         (__reg)->key            = __key;                                \
104089 +       (__reg)->flags          = __flags;
104091 +#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg)                \
104092 +       NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, 0)
104094  #define NFT_OFFLOAD_MATCH_EXACT(__key, __base, __field, __len, __reg)  \
104095         NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg)         \
104096 diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
104097 index 43c9c5d2bedb..33979017b782 100644
104098 --- a/include/net/nfc/nci_core.h
104099 +++ b/include/net/nfc/nci_core.h
104100 @@ -298,6 +298,7 @@ int nci_nfcc_loopback(struct nci_dev *ndev, void *data, size_t data_len,
104101                       struct sk_buff **resp);
104103  struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev);
104104 +void nci_hci_deallocate(struct nci_dev *ndev);
104105  int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event,
104106                        const u8 *param, size_t param_len);
104107  int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate,
104108 diff --git a/include/net/page_pool.h b/include/net/page_pool.h
104109 index b5b195305346..e05744b9a1bc 100644
104110 --- a/include/net/page_pool.h
104111 +++ b/include/net/page_pool.h
104112 @@ -198,7 +198,17 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
104114  static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
104116 -       return page->dma_addr;
104117 +       dma_addr_t ret = page->dma_addr[0];
104118 +       if (sizeof(dma_addr_t) > sizeof(unsigned long))
104119 +               ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
104120 +       return ret;
104123 +static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
104125 +       page->dma_addr[0] = addr;
104126 +       if (sizeof(dma_addr_t) > sizeof(unsigned long))
104127 +               page->dma_addr[1] = upper_32_bits(addr);
104130  static inline bool is_page_pool_compiled_in(void)
104131 diff --git a/include/net/tcp.h b/include/net/tcp.h
104132 index 963cd86d12dd..5a86fa1d2ff1 100644
104133 --- a/include/net/tcp.h
104134 +++ b/include/net/tcp.h
104135 @@ -799,6 +799,11 @@ static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
104136         return max_t(s64, t1 - t0, 0);
104139 +static inline u32 tcp_stamp32_us_delta(u32 t1, u32 t0)
104141 +       return max_t(s32, t1 - t0, 0);
104144  static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
104146         return tcp_ns_to_ts(skb->skb_mstamp_ns);
104147 @@ -866,16 +871,22 @@ struct tcp_skb_cb {
104148         __u32           ack_seq;        /* Sequence number ACK'd        */
104149         union {
104150                 struct {
104151 +#define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
104152                         /* There is space for up to 24 bytes */
104153 -                       __u32 in_flight:30,/* Bytes in flight at transmit */
104154 -                             is_app_limited:1, /* cwnd not fully used? */
104155 -                             unused:1;
104156 +                       __u32 is_app_limited:1, /* cwnd not fully used? */
104157 +                             delivered_ce:20,
104158 +                             unused:11;
104159                         /* pkts S/ACKed so far upon tx of skb, incl retrans: */
104160                         __u32 delivered;
104161                         /* start of send pipeline phase */
104162 -                       u64 first_tx_mstamp;
104163 +                       u32 first_tx_mstamp;
104164                         /* when we reached the "delivered" count */
104165 -                       u64 delivered_mstamp;
104166 +                       u32 delivered_mstamp;
104167 +#define TCPCB_IN_FLIGHT_BITS 20
104168 +#define TCPCB_IN_FLIGHT_MAX ((1U << TCPCB_IN_FLIGHT_BITS) - 1)
104169 +                       u32 in_flight:20,   /* packets in flight at transmit */
104170 +                           unused2:12;
104171 +                       u32 lost;       /* packets lost so far upon tx of skb */
104172                 } tx;   /* only used for outgoing skbs */
104173                 union {
104174                         struct inet_skb_parm    h4;
104175 @@ -1025,7 +1036,11 @@ enum tcp_ca_ack_event_flags {
104176  #define TCP_CONG_NON_RESTRICTED 0x1
104177  /* Requires ECN/ECT set on all packets */
104178  #define TCP_CONG_NEEDS_ECN     0x2
104179 -#define TCP_CONG_MASK  (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
104180 +/* Wants notification of CE events (CA_EVENT_ECN_IS_CE, CA_EVENT_ECN_NO_CE). */
104181 +#define TCP_CONG_WANTS_CE_EVENTS       0x4
104182 +#define TCP_CONG_MASK  (TCP_CONG_NON_RESTRICTED | \
104183 +                        TCP_CONG_NEEDS_ECN | \
104184 +                        TCP_CONG_WANTS_CE_EVENTS)
104186  union tcp_cc_info;
104188 @@ -1045,8 +1060,13 @@ struct ack_sample {
104189   */
104190  struct rate_sample {
104191         u64  prior_mstamp; /* starting timestamp for interval */
104192 +       u32  prior_lost;        /* tp->lost at "prior_mstamp" */
104193         u32  prior_delivered;   /* tp->delivered at "prior_mstamp" */
104194 +       u32  prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
104195 +       u32 tx_in_flight;       /* packets in flight at starting timestamp */
104196 +       s32  lost;              /* number of packets lost over interval */
104197         s32  delivered;         /* number of packets delivered over interval */
104198 +       s32  delivered_ce;      /* packets delivered w/ CE mark over interval */
104199         long interval_us;       /* time for tp->delivered to incr "delivered" */
104200         u32 snd_interval_us;    /* snd interval for delivered packets */
104201         u32 rcv_interval_us;    /* rcv interval for delivered packets */
104202 @@ -1057,6 +1077,7 @@ struct rate_sample {
104203         bool is_app_limited;    /* is sample from packet with bubble in pipe? */
104204         bool is_retrans;        /* is sample from retransmission? */
104205         bool is_ack_delayed;    /* is this (likely) a delayed ACK? */
104206 +       bool is_ece;            /* did this ACK have ECN marked? */
104209  struct tcp_congestion_ops {
104210 @@ -1083,10 +1104,12 @@ struct tcp_congestion_ops {
104211         u32  (*undo_cwnd)(struct sock *sk);
104212         /* hook for packet ack accounting (optional) */
104213         void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
104214 -       /* override sysctl_tcp_min_tso_segs */
104215 -       u32 (*min_tso_segs)(struct sock *sk);
104216 +       /* pick target number of segments per TSO/GSO skb (optional): */
104217 +       u32 (*tso_segs)(struct sock *sk, unsigned int mss_now);
104218         /* returns the multiplier used in tcp_sndbuf_expand (optional) */
104219         u32 (*sndbuf_expand)(struct sock *sk);
104220 +       /* react to a specific lost skb (optional) */
104221 +       void (*skb_marked_lost)(struct sock *sk, const struct sk_buff *skb);
104222         /* call when packets are delivered to update cwnd and pacing rate,
104223          * after all the ca_state processing. (optional)
104224          */
104225 @@ -1132,6 +1155,14 @@ static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
104227  #endif
104229 +static inline bool tcp_ca_wants_ce_events(const struct sock *sk)
104231 +       const struct inet_connection_sock *icsk = inet_csk(sk);
104233 +       return icsk->icsk_ca_ops->flags & (TCP_CONG_NEEDS_ECN |
104234 +                                          TCP_CONG_WANTS_CE_EVENTS);
104237  static inline bool tcp_ca_needs_ecn(const struct sock *sk)
104239         const struct inet_connection_sock *icsk = inet_csk(sk);
104240 @@ -1157,6 +1188,7 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
104243  /* From tcp_rate.c */
104244 +void tcp_set_tx_in_flight(struct sock *sk, struct sk_buff *skb);
104245  void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
104246  void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
104247                             struct rate_sample *rs);
104248 diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
104249 index 2568cb0627ec..fac8e89aed81 100644
104250 --- a/include/scsi/libfcoe.h
104251 +++ b/include/scsi/libfcoe.h
104252 @@ -249,7 +249,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *,
104253                          struct fc_frame *);
104255  /* libfcoe funcs */
104256 -u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int);
104257 +u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], unsigned int, unsigned int);
104258  int fcoe_libfc_config(struct fc_lport *, struct fcoe_ctlr *,
104259                       const struct libfc_function_template *, int init_fcp);
104260  u32 fcoe_fc_crc(struct fc_frame *fp);
104261 diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
104262 index 036eb1f5c133..2f01314de73a 100644
104263 --- a/include/trace/events/sunrpc.h
104264 +++ b/include/trace/events/sunrpc.h
104265 @@ -1141,7 +1141,6 @@ DECLARE_EVENT_CLASS(xprt_writelock_event,
104267  DEFINE_WRITELOCK_EVENT(reserve_xprt);
104268  DEFINE_WRITELOCK_EVENT(release_xprt);
104269 -DEFINE_WRITELOCK_EVENT(transmit_queued);
104271  DECLARE_EVENT_CLASS(xprt_cong_event,
104272         TP_PROTO(
104273 diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
104274 index ce58cff99b66..2778da551846 100644
104275 --- a/include/uapi/asm-generic/unistd.h
104276 +++ b/include/uapi/asm-generic/unistd.h
104277 @@ -864,8 +864,20 @@ __SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
104278  #define __NR_mount_setattr 442
104279  __SYSCALL(__NR_mount_setattr, sys_mount_setattr)
104281 +#define __NR_futex_wait 443
104282 +__SYSCALL(__NR_futex_wait, sys_futex_wait)
104284 +#define __NR_futex_wake 444
104285 +__SYSCALL(__NR_futex_wake, sys_futex_wake)
104287 +#define __NR_futex_waitv 445
104288 +__SC_COMP(__NR_futex_waitv, sys_futex_waitv, compat_sys_futex_waitv)
104290 +#define __NR_futex_requeue 446
104291 +__SC_COMP(__NR_futex_requeue, sys_futex_requeue, compat_sys_futex_requeue)
104293  #undef __NR_syscalls
104294 -#define __NR_syscalls 443
104295 +#define __NR_syscalls 447
104298   * 32 bit systems traditionally used different
104299 diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h
104300 index a89eb0accd5e..afc3245e5728 100644
104301 --- a/include/uapi/linux/futex.h
104302 +++ b/include/uapi/linux/futex.h
104303 @@ -21,6 +21,7 @@
104304  #define FUTEX_WAKE_BITSET      10
104305  #define FUTEX_WAIT_REQUEUE_PI  11
104306  #define FUTEX_CMP_REQUEUE_PI   12
104307 +#define FUTEX_WAIT_MULTIPLE    31
104309  #define FUTEX_PRIVATE_FLAG     128
104310  #define FUTEX_CLOCK_REALTIME   256
104311 @@ -40,6 +41,39 @@
104312                                          FUTEX_PRIVATE_FLAG)
104313  #define FUTEX_CMP_REQUEUE_PI_PRIVATE   (FUTEX_CMP_REQUEUE_PI | \
104314                                          FUTEX_PRIVATE_FLAG)
104315 +#define FUTEX_WAIT_MULTIPLE_PRIVATE    (FUTEX_WAIT_MULTIPLE | \
104316 +                                        FUTEX_PRIVATE_FLAG)
104318 +/* Size argument to futex2 syscall */
104319 +#define FUTEX_32       2
104321 +#define FUTEX_SIZE_MASK        0x3
104323 +#define FUTEX_SHARED_FLAG 8
104325 +#define FUTEX_WAITV_MAX 128
104328 + * struct futex_waitv - A waiter for vectorized wait
104329 + * @uaddr: User address to wait on
104330 + * @val:   Expected value at uaddr
104331 + * @flags: Flags for this waiter
104332 + */
104333 +struct futex_waitv {
104334 +       void __user *uaddr;
104335 +       unsigned int val;
104336 +       unsigned int flags;
104340 + * struct futex_requeue - Define an address and its flags for requeue operation
104341 + * @uaddr: User address of one of the requeue arguments
104342 + * @flags: Flags for this address
104343 + */
104344 +struct futex_requeue {
104345 +       void __user *uaddr;
104346 +       unsigned int flags;
104350   * Support for robust futexes: the kernel cleans up held futexes at
104351 @@ -150,4 +184,21 @@ struct robust_list_head {
104352    (((op & 0xf) << 28) | ((cmp & 0xf) << 24)            \
104353     | ((oparg & 0xfff) << 12) | (cmparg & 0xfff))
104356 + * Maximum number of multiple futexes to wait for
104357 + */
104358 +#define FUTEX_MULTIPLE_MAX_COUNT       128
104361 + * struct futex_wait_block - Block of futexes to be waited for
104362 + * @uaddr:     User address of the futex
104363 + * @val:       Futex value expected by userspace
104364 + * @bitset:    Bitset for the optional bitmasked wakeup
104365 + */
104366 +struct futex_wait_block {
104367 +       __u32 __user *uaddr;
104368 +       __u32 val;
104369 +       __u32 bitset;
104372  #endif /* _UAPI_LINUX_FUTEX_H */
104373 diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
104374 index 20ee93f0f876..96d52dd9c48a 100644
104375 --- a/include/uapi/linux/inet_diag.h
104376 +++ b/include/uapi/linux/inet_diag.h
104377 @@ -231,9 +231,42 @@ struct tcp_bbr_info {
104378         __u32   bbr_cwnd_gain;          /* cwnd gain shifted left 8 bits */
104381 +/* Phase as reported in netlink/ss stats. */
104382 +enum tcp_bbr2_phase {
104383 +       BBR2_PHASE_INVALID              = 0,
104384 +       BBR2_PHASE_STARTUP              = 1,
104385 +       BBR2_PHASE_DRAIN                = 2,
104386 +       BBR2_PHASE_PROBE_RTT            = 3,
104387 +       BBR2_PHASE_PROBE_BW_UP          = 4,
104388 +       BBR2_PHASE_PROBE_BW_DOWN        = 5,
104389 +       BBR2_PHASE_PROBE_BW_CRUISE      = 6,
104390 +       BBR2_PHASE_PROBE_BW_REFILL      = 7
104393 +struct tcp_bbr2_info {
104394 +       /* u64 bw: bandwidth (app throughput) estimate in Byte per sec: */
104395 +       __u32   bbr_bw_lsb;             /* lower 32 bits of bw */
104396 +       __u32   bbr_bw_msb;             /* upper 32 bits of bw */
104397 +       __u32   bbr_min_rtt;            /* min-filtered RTT in uSec */
104398 +       __u32   bbr_pacing_gain;        /* pacing gain shifted left 8 bits */
104399 +       __u32   bbr_cwnd_gain;          /* cwnd gain shifted left 8 bits */
104400 +       __u32   bbr_bw_hi_lsb;          /* lower 32 bits of bw_hi */
104401 +       __u32   bbr_bw_hi_msb;          /* upper 32 bits of bw_hi */
104402 +       __u32   bbr_bw_lo_lsb;          /* lower 32 bits of bw_lo */
104403 +       __u32   bbr_bw_lo_msb;          /* upper 32 bits of bw_lo */
104404 +       __u8    bbr_mode;               /* current bbr_mode in state machine */
104405 +       __u8    bbr_phase;              /* current state machine phase */
104406 +       __u8    unused1;                /* alignment padding; not used yet */
104407 +       __u8    bbr_version;            /* MUST be at this offset in struct */
104408 +       __u32   bbr_inflight_lo;        /* lower/short-term data volume bound */
104409 +       __u32   bbr_inflight_hi;        /* higher/long-term data volume bound */
104410 +       __u32   bbr_extra_acked;        /* max excess packets ACKed in epoch */
104413  union tcp_cc_info {
104414         struct tcpvegas_info    vegas;
104415         struct tcp_dctcp_info   dctcp;
104416         struct tcp_bbr_info     bbr;
104417 +       struct tcp_bbr2_info    bbr2;
104419  #endif /* _UAPI_INET_DIAG_H_ */
104420 diff --git a/include/uapi/linux/netfilter/xt_SECMARK.h b/include/uapi/linux/netfilter/xt_SECMARK.h
104421 index 1f2a708413f5..beb2cadba8a9 100644
104422 --- a/include/uapi/linux/netfilter/xt_SECMARK.h
104423 +++ b/include/uapi/linux/netfilter/xt_SECMARK.h
104424 @@ -20,4 +20,10 @@ struct xt_secmark_target_info {
104425         char secctx[SECMARK_SECCTX_MAX];
104428 +struct xt_secmark_target_info_v1 {
104429 +       __u8 mode;
104430 +       char secctx[SECMARK_SECCTX_MAX];
104431 +       __u32 secid;
104434  #endif /*_XT_SECMARK_H_target */
104435 diff --git a/include/uapi/linux/tty_flags.h b/include/uapi/linux/tty_flags.h
104436 index 900a32e63424..6a3ac496a56c 100644
104437 --- a/include/uapi/linux/tty_flags.h
104438 +++ b/include/uapi/linux/tty_flags.h
104439 @@ -39,7 +39,7 @@
104440   * WARNING: These flags are no longer used and have been superceded by the
104441   *         TTY_PORT_ flags in the iflags field (and not userspace-visible)
104442   */
104443 -#ifndef _KERNEL_
104444 +#ifndef __KERNEL__
104445  #define ASYNCB_INITIALIZED     31 /* Serial port was initialized */
104446  #define ASYNCB_SUSPENDED       30 /* Serial port is suspended */
104447  #define ASYNCB_NORMAL_ACTIVE   29 /* Normal device is active */
104448 @@ -81,7 +81,7 @@
104449  #define ASYNC_SPD_WARP         (ASYNC_SPD_HI|ASYNC_SPD_SHI)
104450  #define ASYNC_SPD_MASK         (ASYNC_SPD_HI|ASYNC_SPD_VHI|ASYNC_SPD_SHI)
104452 -#ifndef _KERNEL_
104453 +#ifndef __KERNEL__
104454  /* These flags are no longer used (and were always masked from userspace) */
104455  #define ASYNC_INITIALIZED      (1U << ASYNCB_INITIALIZED)
104456  #define ASYNC_NORMAL_ACTIVE    (1U << ASYNCB_NORMAL_ACTIVE)
104457 diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h
104458 index d854cb19c42c..bfdae12cdacf 100644
104459 --- a/include/uapi/linux/usb/video.h
104460 +++ b/include/uapi/linux/usb/video.h
104461 @@ -302,9 +302,10 @@ struct uvc_processing_unit_descriptor {
104462         __u8   bControlSize;
104463         __u8   bmControls[2];
104464         __u8   iProcessing;
104465 +       __u8   bmVideoStandards;
104466  } __attribute__((__packed__));
104468 -#define UVC_DT_PROCESSING_UNIT_SIZE(n)                 (9+(n))
104469 +#define UVC_DT_PROCESSING_UNIT_SIZE(n)                 (10+(n))
104471  /* 3.7.2.6. Extension Unit Descriptor */
104472  struct uvc_extension_unit_descriptor {
104473 diff --git a/init/Kconfig b/init/Kconfig
104474 index 5f5c776ef192..b8054b654d61 100644
104475 --- a/init/Kconfig
104476 +++ b/init/Kconfig
104477 @@ -1220,6 +1220,18 @@ config SCHED_AUTOGROUP
104478           desktop applications.  Task group autogeneration is currently based
104479           upon task session.
104481 +config SCHED_AUTOGROUP_DEFAULT_ENABLED
104482 +       bool "Enable automatic process group scheduling feature"
104483 +       default y
104484 +       depends on SCHED_AUTOGROUP
104485 +       help
104486 +         If set, automatic process group scheduling will be enabled per
104487 +         default but can be disabled through passing autogroup=0 on the
104488 +         kernel commandline during boot or a value of 0 via the file
104489 +         proc/sys/kernel/sched_autogroup_enabled.
104491 +         If unsure say Y.
104493  config SYSFS_DEPRECATED
104494         bool "Enable deprecated sysfs features to support old userspace tools"
104495         depends on SYSFS
104496 @@ -1316,7 +1328,6 @@ config CC_OPTIMIZE_FOR_PERFORMANCE
104498  config CC_OPTIMIZE_FOR_PERFORMANCE_O3
104499         bool "Optimize more for performance (-O3)"
104500 -       depends on ARC
104501         help
104502           Choosing this option will pass "-O3" to your compiler to optimize
104503           the kernel yet more for performance.
104504 @@ -1537,6 +1548,13 @@ config FUTEX
104505           support for "fast userspace mutexes".  The resulting kernel may not
104506           run glibc-based applications correctly.
104508 +config FUTEX2
104509 +       bool "Enable futex2 support" if EXPERT
104510 +       depends on FUTEX
104511 +       default y
104512 +       help
104513 +         Support for futex2 interface.
104515  config FUTEX_PI
104516         bool
104517         depends on FUTEX && RT_MUTEXES
104518 @@ -2217,8 +2235,8 @@ config MODULE_COMPRESS
104519         bool "Compress modules on installation"
104520         help
104522 -         Compresses kernel modules when 'make modules_install' is run; gzip or
104523 -         xz depending on "Compression algorithm" below.
104524 +         Compresses kernel modules when 'make modules_install' is run; gzip,
104525 +         xz, or zstd depending on "Compression algorithm" below.
104527           module-init-tools MAY support gzip, and kmod MAY support gzip and xz.
104529 @@ -2240,7 +2258,7 @@ choice
104530           This determines which sort of compression will be used during
104531           'make modules_install'.
104533 -         GZIP (default) and XZ are supported.
104534 +         GZIP (default), XZ, and ZSTD are supported.
104536  config MODULE_COMPRESS_GZIP
104537         bool "GZIP"
104538 @@ -2248,6 +2266,9 @@ config MODULE_COMPRESS_GZIP
104539  config MODULE_COMPRESS_XZ
104540         bool "XZ"
104542 +config MODULE_COMPRESS_ZSTD
104543 +       bool "ZSTD"
104545  endchoice
104547  config MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
104548 diff --git a/init/init_task.c b/init/init_task.c
104549 index 3711cdaafed2..8b08c2e19cbb 100644
104550 --- a/init/init_task.c
104551 +++ b/init/init_task.c
104552 @@ -210,7 +210,7 @@ struct task_struct init_task
104553  #ifdef CONFIG_SECURITY
104554         .security       = NULL,
104555  #endif
104556 -#ifdef CONFIG_SECCOMP
104557 +#ifdef CONFIG_SECCOMP_FILTER
104558         .seccomp        = { .filter_count = ATOMIC_INIT(0) },
104559  #endif
104561 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
104562 index 8031464ed4ae..4e4e61111500 100644
104563 --- a/ipc/mqueue.c
104564 +++ b/ipc/mqueue.c
104565 @@ -1004,12 +1004,14 @@ static inline void __pipelined_op(struct wake_q_head *wake_q,
104566                                   struct mqueue_inode_info *info,
104567                                   struct ext_wait_queue *this)
104569 +       struct task_struct *task;
104571         list_del(&this->list);
104572 -       get_task_struct(this->task);
104573 +       task = get_task_struct(this->task);
104575         /* see MQ_BARRIER for purpose/pairing */
104576         smp_store_release(&this->state, STATE_READY);
104577 -       wake_q_add_safe(wake_q, this->task);
104578 +       wake_q_add_safe(wake_q, task);
104581  /* pipelined_send() - send a message directly to the task waiting in
104582 diff --git a/ipc/msg.c b/ipc/msg.c
104583 index acd1bc7af55a..6e6c8e0c9380 100644
104584 --- a/ipc/msg.c
104585 +++ b/ipc/msg.c
104586 @@ -251,11 +251,13 @@ static void expunge_all(struct msg_queue *msq, int res,
104587         struct msg_receiver *msr, *t;
104589         list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
104590 -               get_task_struct(msr->r_tsk);
104591 +               struct task_struct *r_tsk;
104593 +               r_tsk = get_task_struct(msr->r_tsk);
104595                 /* see MSG_BARRIER for purpose/pairing */
104596                 smp_store_release(&msr->r_msg, ERR_PTR(res));
104597 -               wake_q_add_safe(wake_q, msr->r_tsk);
104598 +               wake_q_add_safe(wake_q, r_tsk);
104599         }
104602 diff --git a/ipc/namespace.c b/ipc/namespace.c
104603 index 7bd0766ddc3b..2bb05b2dacd1 100644
104604 --- a/ipc/namespace.c
104605 +++ b/ipc/namespace.c
104606 @@ -172,6 +172,23 @@ void put_ipc_ns(struct ipc_namespace *ns)
104607                         schedule_work(&free_ipc_work);
104608         }
104610 +EXPORT_SYMBOL(put_ipc_ns);
104612 +struct ipc_namespace *get_ipc_ns_exported(struct ipc_namespace *ns)
104614 +       return get_ipc_ns(ns);
104616 +EXPORT_SYMBOL(get_ipc_ns_exported);
104618 +struct ipc_namespace *show_init_ipc_ns(void)
104620 +#if defined(CONFIG_IPC_NS)
104621 +       return &init_ipc_ns;
104622 +#else
104623 +       return NULL;
104624 +#endif
104626 +EXPORT_SYMBOL(show_init_ipc_ns);
104628  static inline struct ipc_namespace *to_ipc_ns(struct ns_common *ns)
104630 diff --git a/ipc/sem.c b/ipc/sem.c
104631 index f6c30a85dadf..7d9c06b0ad6e 100644
104632 --- a/ipc/sem.c
104633 +++ b/ipc/sem.c
104634 @@ -784,12 +784,14 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
104635  static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
104636                                              struct wake_q_head *wake_q)
104638 -       get_task_struct(q->sleeper);
104639 +       struct task_struct *sleeper;
104641 +       sleeper = get_task_struct(q->sleeper);
104643         /* see SEM_BARRIER_2 for purpuse/pairing */
104644         smp_store_release(&q->status, error);
104646 -       wake_q_add_safe(wake_q, q->sleeper);
104647 +       wake_q_add_safe(wake_q, sleeper);
104650  static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
104651 diff --git a/kernel/.gitignore b/kernel/.gitignore
104652 index 78701ea37c97..5518835ac35c 100644
104653 --- a/kernel/.gitignore
104654 +++ b/kernel/.gitignore
104655 @@ -1,4 +1,5 @@
104656  # SPDX-License-Identifier: GPL-2.0-only
104657 +/config_data
104658  kheaders.md5
104659  timeconst.h
104660  hz.bc
104661 diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
104662 index 38ef6d06888e..b4a1995149d0 100644
104663 --- a/kernel/Kconfig.hz
104664 +++ b/kernel/Kconfig.hz
104665 @@ -5,7 +5,7 @@
104667  choice
104668         prompt "Timer frequency"
104669 -       default HZ_250
104670 +       default HZ_500
104671         help
104672          Allows the configuration of the timer frequency. It is customary
104673          to have the timer interrupt run at 1000 Hz but 100 Hz may be more
104674 @@ -40,6 +40,13 @@ choice
104675          on SMP and NUMA systems and exactly dividing by both PAL and
104676          NTSC frame rates for video and multimedia work.
104678 +       config HZ_500
104679 +               bool "500 HZ"
104680 +       help
104681 +        500 Hz is a balanced timer frequency. Provides fast interactivity
104682 +        on desktops with great smoothness without increasing CPU power
104683 +        consumption and sacrificing the battery life on laptops.
104685         config HZ_1000
104686                 bool "1000 HZ"
104687         help
104688 @@ -53,6 +60,7 @@ config HZ
104689         default 100 if HZ_100
104690         default 250 if HZ_250
104691         default 300 if HZ_300
104692 +       default 500 if HZ_500
104693         default 1000 if HZ_1000
104695  config SCHED_HRTICK
104696 diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
104697 index 416017301660..293725c44cbb 100644
104698 --- a/kernel/Kconfig.preempt
104699 +++ b/kernel/Kconfig.preempt
104700 @@ -2,7 +2,7 @@
104702  choice
104703         prompt "Preemption Model"
104704 -       default PREEMPT_NONE
104705 +       default PREEMPT
104707  config PREEMPT_NONE
104708         bool "No Forced Preemption (Server)"
104709 diff --git a/kernel/Makefile b/kernel/Makefile
104710 index 320f1f3941b7..caf7fca27b62 100644
104711 --- a/kernel/Makefile
104712 +++ b/kernel/Makefile
104713 @@ -57,6 +57,7 @@ obj-$(CONFIG_PROFILING) += profile.o
104714  obj-$(CONFIG_STACKTRACE) += stacktrace.o
104715  obj-y += time/
104716  obj-$(CONFIG_FUTEX) += futex.o
104717 +obj-$(CONFIG_FUTEX2) += futex2.o
104718  obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
104719  obj-$(CONFIG_SMP) += smp.o
104720  ifneq ($(CONFIG_SMP),y)
104721 @@ -138,10 +139,15 @@ obj-$(CONFIG_SCF_TORTURE_TEST) += scftorture.o
104723  $(obj)/configs.o: $(obj)/config_data.gz
104725 -targets += config_data.gz
104726 -$(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE
104727 +targets += config_data config_data.gz
104728 +$(obj)/config_data.gz: $(obj)/config_data FORCE
104729         $(call if_changed,gzip)
104731 +filechk_cat = cat $<
104733 +$(obj)/config_data: $(KCONFIG_CONFIG) FORCE
104734 +       $(call filechk,cat)
104736  $(obj)/kheaders.o: $(obj)/kheaders_data.tar.xz
104738  quiet_cmd_genikh = CHK     $(obj)/kheaders_data.tar.xz
104739 diff --git a/kernel/bounds.c b/kernel/bounds.c
104740 index 9795d75b09b2..a8cbf2d0b11a 100644
104741 --- a/kernel/bounds.c
104742 +++ b/kernel/bounds.c
104743 @@ -22,6 +22,12 @@ int main(void)
104744         DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS));
104745  #endif
104746         DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
104747 +#ifdef CONFIG_LRU_GEN
104748 +       /* bits needed to represent internal values stored in page->flags */
104749 +       DEFINE(LRU_GEN_WIDTH, order_base_2(CONFIG_NR_LRU_GENS + 1));
104750 +       /* bits needed to represent normalized values for external uses */
104751 +       DEFINE(LRU_GEN_SHIFT, order_base_2(CONFIG_NR_LRU_GENS));
104752 +#endif
104753         /* End of constants */
104755         return 0;
104756 diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
104757 index f25b719ac786..84b3b35fc0d0 100644
104758 --- a/kernel/bpf/ringbuf.c
104759 +++ b/kernel/bpf/ringbuf.c
104760 @@ -221,25 +221,20 @@ static int ringbuf_map_get_next_key(struct bpf_map *map, void *key,
104761         return -ENOTSUPP;
104764 -static size_t bpf_ringbuf_mmap_page_cnt(const struct bpf_ringbuf *rb)
104766 -       size_t data_pages = (rb->mask + 1) >> PAGE_SHIFT;
104768 -       /* consumer page + producer page + 2 x data pages */
104769 -       return RINGBUF_POS_PAGES + 2 * data_pages;
104772  static int ringbuf_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
104774         struct bpf_ringbuf_map *rb_map;
104775 -       size_t mmap_sz;
104777         rb_map = container_of(map, struct bpf_ringbuf_map, map);
104778 -       mmap_sz = bpf_ringbuf_mmap_page_cnt(rb_map->rb) << PAGE_SHIFT;
104780 -       if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > mmap_sz)
104781 -               return -EINVAL;
104783 +       if (vma->vm_flags & VM_WRITE) {
104784 +               /* allow writable mapping for the consumer_pos only */
104785 +               if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
104786 +                       return -EPERM;
104787 +       } else {
104788 +               vma->vm_flags &= ~VM_MAYWRITE;
104789 +       }
104790 +       /* remap_vmalloc_range() checks size and offset constraints */
104791         return remap_vmalloc_range(vma, rb_map->rb,
104792                                    vma->vm_pgoff + RINGBUF_PGOFF);
104794 @@ -315,6 +310,9 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
104795                 return NULL;
104797         len = round_up(size + BPF_RINGBUF_HDR_SZ, 8);
104798 +       if (len > rb->mask + 1)
104799 +               return NULL;
104801         cons_pos = smp_load_acquire(&rb->consumer_pos);
104803         if (in_nmi()) {
104804 diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
104805 index 0399ac092b36..21247e49fe82 100644
104806 --- a/kernel/bpf/verifier.c
104807 +++ b/kernel/bpf/verifier.c
104808 @@ -1362,9 +1362,7 @@ static bool __reg64_bound_s32(s64 a)
104810  static bool __reg64_bound_u32(u64 a)
104812 -       if (a > U32_MIN && a < U32_MAX)
104813 -               return true;
104814 -       return false;
104815 +       return a > U32_MIN && a < U32_MAX;
104818  static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
104819 @@ -1375,10 +1373,10 @@ static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
104820                 reg->s32_min_value = (s32)reg->smin_value;
104821                 reg->s32_max_value = (s32)reg->smax_value;
104822         }
104823 -       if (__reg64_bound_u32(reg->umin_value))
104824 +       if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) {
104825                 reg->u32_min_value = (u32)reg->umin_value;
104826 -       if (__reg64_bound_u32(reg->umax_value))
104827                 reg->u32_max_value = (u32)reg->umax_value;
104828 +       }
104830         /* Intersecting with the old var_off might have improved our bounds
104831          * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
104832 @@ -5865,18 +5863,10 @@ enum {
104835  static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
104836 -                             const struct bpf_reg_state *off_reg,
104837 -                             u32 *alu_limit, u8 opcode)
104838 +                             u32 *alu_limit, bool mask_to_left)
104840 -       bool off_is_neg = off_reg->smin_value < 0;
104841 -       bool mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
104842 -                           (opcode == BPF_SUB && !off_is_neg);
104843         u32 max = 0, ptr_limit = 0;
104845 -       if (!tnum_is_const(off_reg->var_off) &&
104846 -           (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
104847 -               return REASON_BOUNDS;
104849         switch (ptr_reg->type) {
104850         case PTR_TO_STACK:
104851                 /* Offset 0 is out-of-bounds, but acceptable start for the
104852 @@ -5942,16 +5932,22 @@ static bool sanitize_needed(u8 opcode)
104853         return opcode == BPF_ADD || opcode == BPF_SUB;
104856 +struct bpf_sanitize_info {
104857 +       struct bpf_insn_aux_data aux;
104858 +       bool mask_to_left;
104861  static int sanitize_ptr_alu(struct bpf_verifier_env *env,
104862                             struct bpf_insn *insn,
104863                             const struct bpf_reg_state *ptr_reg,
104864                             const struct bpf_reg_state *off_reg,
104865                             struct bpf_reg_state *dst_reg,
104866 -                           struct bpf_insn_aux_data *tmp_aux,
104867 +                           struct bpf_sanitize_info *info,
104868                             const bool commit_window)
104870 -       struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux;
104871 +       struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
104872         struct bpf_verifier_state *vstate = env->cur_state;
104873 +       bool off_is_imm = tnum_is_const(off_reg->var_off);
104874         bool off_is_neg = off_reg->smin_value < 0;
104875         bool ptr_is_dst_reg = ptr_reg == dst_reg;
104876         u8 opcode = BPF_OP(insn->code);
104877 @@ -5970,7 +5966,16 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
104878         if (vstate->speculative)
104879                 goto do_sim;
104881 -       err = retrieve_ptr_limit(ptr_reg, off_reg, &alu_limit, opcode);
104882 +       if (!commit_window) {
104883 +               if (!tnum_is_const(off_reg->var_off) &&
104884 +                   (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
104885 +                       return REASON_BOUNDS;
104887 +               info->mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
104888 +                                    (opcode == BPF_SUB && !off_is_neg);
104889 +       }
104891 +       err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left);
104892         if (err < 0)
104893                 return err;
104895 @@ -5978,10 +5983,11 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
104896                 /* In commit phase we narrow the masking window based on
104897                  * the observed pointer move after the simulated operation.
104898                  */
104899 -               alu_state = tmp_aux->alu_state;
104900 -               alu_limit = abs(tmp_aux->alu_limit - alu_limit);
104901 +               alu_state = info->aux.alu_state;
104902 +               alu_limit = abs(info->aux.alu_limit - alu_limit);
104903         } else {
104904                 alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
104905 +               alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
104906                 alu_state |= ptr_is_dst_reg ?
104907                              BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
104908         }
104909 @@ -5993,8 +5999,12 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
104910         /* If we're in commit phase, we're done here given we already
104911          * pushed the truncated dst_reg into the speculative verification
104912          * stack.
104913 +        *
104914 +        * Also, when register is a known constant, we rewrite register-based
104915 +        * operation to immediate-based, and thus do not need masking (and as
104916 +        * a consequence, do not need to simulate the zero-truncation either).
104917          */
104918 -       if (commit_window)
104919 +       if (commit_window || off_is_imm)
104920                 return 0;
104922         /* Simulate and find potential out-of-bounds access under
104923 @@ -6139,7 +6149,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
104924             smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
104925         u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
104926             umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
104927 -       struct bpf_insn_aux_data tmp_aux = {};
104928 +       struct bpf_sanitize_info info = {};
104929         u8 opcode = BPF_OP(insn->code);
104930         u32 dst = insn->dst_reg;
104931         int ret;
104932 @@ -6208,7 +6218,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
104934         if (sanitize_needed(opcode)) {
104935                 ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
104936 -                                      &tmp_aux, false);
104937 +                                      &info, false);
104938                 if (ret < 0)
104939                         return sanitize_err(env, insn, ret, off_reg, dst_reg);
104940         }
104941 @@ -6349,7 +6359,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
104942                 return -EACCES;
104943         if (sanitize_needed(opcode)) {
104944                 ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
104945 -                                      &tmp_aux, true);
104946 +                                      &info, true);
104947                 if (ret < 0)
104948                         return sanitize_err(env, insn, ret, off_reg, dst_reg);
104949         }
104950 @@ -6538,11 +6548,10 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
104951         s32 smin_val = src_reg->s32_min_value;
104952         u32 umax_val = src_reg->u32_max_value;
104954 -       /* Assuming scalar64_min_max_and will be called so its safe
104955 -        * to skip updating register for known 32-bit case.
104956 -        */
104957 -       if (src_known && dst_known)
104958 +       if (src_known && dst_known) {
104959 +               __mark_reg32_known(dst_reg, var32_off.value);
104960                 return;
104961 +       }
104963         /* We get our minimum from the var_off, since that's inherently
104964          * bitwise.  Our maximum is the minimum of the operands' maxima.
104965 @@ -6562,7 +6571,6 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
104966                 dst_reg->s32_min_value = dst_reg->u32_min_value;
104967                 dst_reg->s32_max_value = dst_reg->u32_max_value;
104968         }
104972  static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
104973 @@ -6609,11 +6617,10 @@ static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
104974         s32 smin_val = src_reg->s32_min_value;
104975         u32 umin_val = src_reg->u32_min_value;
104977 -       /* Assuming scalar64_min_max_or will be called so it is safe
104978 -        * to skip updating register for known case.
104979 -        */
104980 -       if (src_known && dst_known)
104981 +       if (src_known && dst_known) {
104982 +               __mark_reg32_known(dst_reg, var32_off.value);
104983                 return;
104984 +       }
104986         /* We get our maximum from the var_off, and our minimum is the
104987          * maximum of the operands' minima
104988 @@ -6678,11 +6685,10 @@ static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
104989         struct tnum var32_off = tnum_subreg(dst_reg->var_off);
104990         s32 smin_val = src_reg->s32_min_value;
104992 -       /* Assuming scalar64_min_max_xor will be called so it is safe
104993 -        * to skip updating register for known case.
104994 -        */
104995 -       if (src_known && dst_known)
104996 +       if (src_known && dst_known) {
104997 +               __mark_reg32_known(dst_reg, var32_off.value);
104998                 return;
104999 +       }
105001         /* We get both minimum and maximum from the var32_off. */
105002         dst_reg->u32_min_value = var32_off.value;
105003 @@ -11740,7 +11746,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
105004                         const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
105005                         struct bpf_insn insn_buf[16];
105006                         struct bpf_insn *patch = &insn_buf[0];
105007 -                       bool issrc, isneg;
105008 +                       bool issrc, isneg, isimm;
105009                         u32 off_reg;
105011                         aux = &env->insn_aux_data[i + delta];
105012 @@ -11751,28 +11757,29 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
105013                         isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
105014                         issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
105015                                 BPF_ALU_SANITIZE_SRC;
105016 +                       isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
105018                         off_reg = issrc ? insn->src_reg : insn->dst_reg;
105019 -                       if (isneg)
105020 -                               *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
105021 -                       *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
105022 -                       *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
105023 -                       *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
105024 -                       *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
105025 -                       *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
105026 -                       if (issrc) {
105027 -                               *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
105028 -                                                        off_reg);
105029 -                               insn->src_reg = BPF_REG_AX;
105030 +                       if (isimm) {
105031 +                               *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
105032                         } else {
105033 -                               *patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
105034 -                                                        BPF_REG_AX);
105035 +                               if (isneg)
105036 +                                       *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
105037 +                               *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
105038 +                               *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
105039 +                               *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
105040 +                               *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
105041 +                               *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
105042 +                               *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
105043                         }
105044 +                       if (!issrc)
105045 +                               *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
105046 +                       insn->src_reg = BPF_REG_AX;
105047                         if (isneg)
105048                                 insn->code = insn->code == code_add ?
105049                                              code_sub : code_add;
105050                         *patch++ = *insn;
105051 -                       if (issrc && isneg)
105052 +                       if (issrc && isneg && !isimm)
105053                                 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
105054                         cnt = patch - insn_buf;
105056 diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
105057 index c10e855a03bc..fe4c01c14ab2 100644
105058 --- a/kernel/dma/swiotlb.c
105059 +++ b/kernel/dma/swiotlb.c
105060 @@ -608,7 +608,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
105061                 enum dma_data_direction dir, unsigned long attrs)
105063         unsigned int offset = swiotlb_align_offset(dev, orig_addr);
105064 -       unsigned int index, i;
105065 +       unsigned int i;
105066 +       int index;
105067         phys_addr_t tlb_addr;
105069         if (no_iotlb_memory)
105070 diff --git a/kernel/events/core.c b/kernel/events/core.c
105071 index 03db40f6cba9..c24ea952e7ae 100644
105072 --- a/kernel/events/core.c
105073 +++ b/kernel/events/core.c
105074 @@ -2204,6 +2204,26 @@ static void perf_group_detach(struct perf_event *event)
105075         perf_event__header_size(leader);
105078 +static void sync_child_event(struct perf_event *child_event);
105080 +static void perf_child_detach(struct perf_event *event)
105082 +       struct perf_event *parent_event = event->parent;
105084 +       if (!(event->attach_state & PERF_ATTACH_CHILD))
105085 +               return;
105087 +       event->attach_state &= ~PERF_ATTACH_CHILD;
105089 +       if (WARN_ON_ONCE(!parent_event))
105090 +               return;
105092 +       lockdep_assert_held(&parent_event->child_mutex);
105094 +       sync_child_event(event);
105095 +       list_del_init(&event->child_list);
105098  static bool is_orphaned_event(struct perf_event *event)
105100         return event->state == PERF_EVENT_STATE_DEAD;
105101 @@ -2311,6 +2331,7 @@ group_sched_out(struct perf_event *group_event,
105104  #define DETACH_GROUP   0x01UL
105105 +#define DETACH_CHILD   0x02UL
105108   * Cross CPU call to remove a performance event
105109 @@ -2334,6 +2355,8 @@ __perf_remove_from_context(struct perf_event *event,
105110         event_sched_out(event, cpuctx, ctx);
105111         if (flags & DETACH_GROUP)
105112                 perf_group_detach(event);
105113 +       if (flags & DETACH_CHILD)
105114 +               perf_child_detach(event);
105115         list_del_event(event, ctx);
105117         if (!ctx->nr_events && ctx->is_active) {
105118 @@ -2362,25 +2385,21 @@ static void perf_remove_from_context(struct perf_event *event, unsigned long fla
105120         lockdep_assert_held(&ctx->mutex);
105122 -       event_function_call(event, __perf_remove_from_context, (void *)flags);
105124         /*
105125 -        * The above event_function_call() can NO-OP when it hits
105126 -        * TASK_TOMBSTONE. In that case we must already have been detached
105127 -        * from the context (by perf_event_exit_event()) but the grouping
105128 -        * might still be in-tact.
105129 +        * Because of perf_event_exit_task(), perf_remove_from_context() ought
105130 +        * to work in the face of TASK_TOMBSTONE, unlike every other
105131 +        * event_function_call() user.
105132          */
105133 -       WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
105134 -       if ((flags & DETACH_GROUP) &&
105135 -           (event->attach_state & PERF_ATTACH_GROUP)) {
105136 -               /*
105137 -                * Since in that case we cannot possibly be scheduled, simply
105138 -                * detach now.
105139 -                */
105140 -               raw_spin_lock_irq(&ctx->lock);
105141 -               perf_group_detach(event);
105142 +       raw_spin_lock_irq(&ctx->lock);
105143 +       if (!ctx->is_active) {
105144 +               __perf_remove_from_context(event, __get_cpu_context(ctx),
105145 +                                          ctx, (void *)flags);
105146                 raw_spin_unlock_irq(&ctx->lock);
105147 +               return;
105148         }
105149 +       raw_spin_unlock_irq(&ctx->lock);
105151 +       event_function_call(event, __perf_remove_from_context, (void *)flags);
105155 @@ -11829,12 +11848,12 @@ SYSCALL_DEFINE5(perf_event_open,
105156                         return err;
105157         }
105159 -       err = security_locked_down(LOCKDOWN_PERF);
105160 -       if (err && (attr.sample_type & PERF_SAMPLE_REGS_INTR))
105161 -               /* REGS_INTR can leak data, lockdown must prevent this */
105162 -               return err;
105164 -       err = 0;
105165 +       /* REGS_INTR can leak data, lockdown must prevent this */
105166 +       if (attr.sample_type & PERF_SAMPLE_REGS_INTR) {
105167 +               err = security_locked_down(LOCKDOWN_PERF);
105168 +               if (err)
105169 +                       return err;
105170 +       }
105172         /*
105173          * In cgroup mode, the pid argument is used to pass the fd
105174 @@ -12373,14 +12392,17 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
105176  EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
105178 -static void sync_child_event(struct perf_event *child_event,
105179 -                              struct task_struct *child)
105180 +static void sync_child_event(struct perf_event *child_event)
105182         struct perf_event *parent_event = child_event->parent;
105183         u64 child_val;
105185 -       if (child_event->attr.inherit_stat)
105186 -               perf_event_read_event(child_event, child);
105187 +       if (child_event->attr.inherit_stat) {
105188 +               struct task_struct *task = child_event->ctx->task;
105190 +               if (task && task != TASK_TOMBSTONE)
105191 +                       perf_event_read_event(child_event, task);
105192 +       }
105194         child_val = perf_event_count(child_event);
105196 @@ -12395,60 +12417,53 @@ static void sync_child_event(struct perf_event *child_event,
105199  static void
105200 -perf_event_exit_event(struct perf_event *child_event,
105201 -                     struct perf_event_context *child_ctx,
105202 -                     struct task_struct *child)
105203 +perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx)
105205 -       struct perf_event *parent_event = child_event->parent;
105206 +       struct perf_event *parent_event = event->parent;
105207 +       unsigned long detach_flags = 0;
105209 -       /*
105210 -        * Do not destroy the 'original' grouping; because of the context
105211 -        * switch optimization the original events could've ended up in a
105212 -        * random child task.
105213 -        *
105214 -        * If we were to destroy the original group, all group related
105215 -        * operations would cease to function properly after this random
105216 -        * child dies.
105217 -        *
105218 -        * Do destroy all inherited groups, we don't care about those
105219 -        * and being thorough is better.
105220 -        */
105221 -       raw_spin_lock_irq(&child_ctx->lock);
105222 -       WARN_ON_ONCE(child_ctx->is_active);
105223 +       if (parent_event) {
105224 +               /*
105225 +                * Do not destroy the 'original' grouping; because of the
105226 +                * context switch optimization the original events could've
105227 +                * ended up in a random child task.
105228 +                *
105229 +                * If we were to destroy the original group, all group related
105230 +                * operations would cease to function properly after this
105231 +                * random child dies.
105232 +                *
105233 +                * Do destroy all inherited groups, we don't care about those
105234 +                * and being thorough is better.
105235 +                */
105236 +               detach_flags = DETACH_GROUP | DETACH_CHILD;
105237 +               mutex_lock(&parent_event->child_mutex);
105238 +       }
105240 -       if (parent_event)
105241 -               perf_group_detach(child_event);
105242 -       list_del_event(child_event, child_ctx);
105243 -       perf_event_set_state(child_event, PERF_EVENT_STATE_EXIT); /* is_event_hup() */
105244 -       raw_spin_unlock_irq(&child_ctx->lock);
105245 +       perf_remove_from_context(event, detach_flags);
105247 +       raw_spin_lock_irq(&ctx->lock);
105248 +       if (event->state > PERF_EVENT_STATE_EXIT)
105249 +               perf_event_set_state(event, PERF_EVENT_STATE_EXIT);
105250 +       raw_spin_unlock_irq(&ctx->lock);
105252         /*
105253 -        * Parent events are governed by their filedesc, retain them.
105254 +        * Child events can be freed.
105255          */
105256 -       if (!parent_event) {
105257 -               perf_event_wakeup(child_event);
105258 +       if (parent_event) {
105259 +               mutex_unlock(&parent_event->child_mutex);
105260 +               /*
105261 +                * Kick perf_poll() for is_event_hup();
105262 +                */
105263 +               perf_event_wakeup(parent_event);
105264 +               free_event(event);
105265 +               put_event(parent_event);
105266                 return;
105267         }
105268 -       /*
105269 -        * Child events can be cleaned up.
105270 -        */
105272 -       sync_child_event(child_event, child);
105274         /*
105275 -        * Remove this event from the parent's list
105276 -        */
105277 -       WARN_ON_ONCE(parent_event->ctx->parent_ctx);
105278 -       mutex_lock(&parent_event->child_mutex);
105279 -       list_del_init(&child_event->child_list);
105280 -       mutex_unlock(&parent_event->child_mutex);
105282 -       /*
105283 -        * Kick perf_poll() for is_event_hup().
105284 +        * Parent events are governed by their filedesc, retain them.
105285          */
105286 -       perf_event_wakeup(parent_event);
105287 -       free_event(child_event);
105288 -       put_event(parent_event);
105289 +       perf_event_wakeup(event);
105292  static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
105293 @@ -12505,7 +12520,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
105294         perf_event_task(child, child_ctx, 0);
105296         list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
105297 -               perf_event_exit_event(child_event, child_ctx, child);
105298 +               perf_event_exit_event(child_event, child_ctx);
105300         mutex_unlock(&child_ctx->mutex);
105302 @@ -12765,6 +12780,7 @@ inherit_event(struct perf_event *parent_event,
105303          */
105304         raw_spin_lock_irqsave(&child_ctx->lock, flags);
105305         add_event_to_ctx(child_event, child_ctx);
105306 +       child_event->attach_state |= PERF_ATTACH_CHILD;
105307         raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
105309         /*
105310 diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
105311 index 6addc9780319..4e93e5602723 100644
105312 --- a/kernel/events/uprobes.c
105313 +++ b/kernel/events/uprobes.c
105314 @@ -184,7 +184,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
105315         if (new_page) {
105316                 get_page(new_page);
105317                 page_add_new_anon_rmap(new_page, vma, addr, false);
105318 -               lru_cache_add_inactive_or_unevictable(new_page, vma);
105319 +               lru_cache_add_page_vma(new_page, vma, false);
105320         } else
105321                 /* no new page, just dec_mm_counter for old_page */
105322                 dec_mm_counter(mm, MM_ANONPAGES);
105323 diff --git a/kernel/exit.c b/kernel/exit.c
105324 index 04029e35e69a..e4292717ce37 100644
105325 --- a/kernel/exit.c
105326 +++ b/kernel/exit.c
105327 @@ -422,6 +422,7 @@ void mm_update_next_owner(struct mm_struct *mm)
105328                 goto retry;
105329         }
105330         WRITE_ONCE(mm->owner, c);
105331 +       lru_gen_migrate_mm(mm);
105332         task_unlock(c);
105333         put_task_struct(c);
105335 diff --git a/kernel/fork.c b/kernel/fork.c
105336 index 426cd0c51f9e..c54400f24fb2 100644
105337 --- a/kernel/fork.c
105338 +++ b/kernel/fork.c
105339 @@ -107,6 +107,11 @@
105341  #define CREATE_TRACE_POINTS
105342  #include <trace/events/task.h>
105343 +#ifdef CONFIG_USER_NS
105344 +extern int unprivileged_userns_clone;
105345 +#else
105346 +#define unprivileged_userns_clone 0
105347 +#endif
105350   * Minimum number of threads to boot the kernel
105351 @@ -665,6 +670,7 @@ static void check_mm(struct mm_struct *mm)
105352  #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
105353         VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
105354  #endif
105355 +       VM_BUG_ON_MM(lru_gen_mm_is_active(mm), mm);
105358  #define allocate_mm()  (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
105359 @@ -1055,6 +1061,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
105360                 goto fail_nocontext;
105362         mm->user_ns = get_user_ns(user_ns);
105363 +       lru_gen_init_mm(mm);
105364         return mm;
105366  fail_nocontext:
105367 @@ -1097,6 +1104,7 @@ static inline void __mmput(struct mm_struct *mm)
105368         }
105369         if (mm->binfmt)
105370                 module_put(mm->binfmt->module);
105371 +       lru_gen_del_mm(mm);
105372         mmdrop(mm);
105375 @@ -1128,6 +1136,7 @@ void mmput_async(struct mm_struct *mm)
105376                 schedule_work(&mm->async_put_work);
105377         }
105379 +EXPORT_SYMBOL(mmput_async);
105380  #endif
105382  /**
105383 @@ -1316,6 +1325,8 @@ static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
105384                         put_user(0, tsk->clear_child_tid);
105385                         do_futex(tsk->clear_child_tid, FUTEX_WAKE,
105386                                         1, NULL, NULL, 0, 0);
105387 +                       ksys_futex_wake(tsk->clear_child_tid, 1,
105388 +                                       FUTEX_32 | FUTEX_SHARED_FLAG);
105389                 }
105390                 tsk->clear_child_tid = NULL;
105391         }
105392 @@ -1872,6 +1883,10 @@ static __latent_entropy struct task_struct *copy_process(
105393         if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
105394                 return ERR_PTR(-EINVAL);
105396 +       if ((clone_flags & CLONE_NEWUSER) && !unprivileged_userns_clone)
105397 +               if (!capable(CAP_SYS_ADMIN))
105398 +                       return ERR_PTR(-EPERM);
105400         /*
105401          * Thread groups must share signals as well, and detached threads
105402          * can only be started up within the thread group.
105403 @@ -2521,6 +2536,13 @@ pid_t kernel_clone(struct kernel_clone_args *args)
105404                 get_task_struct(p);
105405         }
105407 +       if (IS_ENABLED(CONFIG_LRU_GEN) && !(clone_flags & CLONE_VM)) {
105408 +               /* lock the task to synchronize with memcg migration */
105409 +               task_lock(p);
105410 +               lru_gen_add_mm(p->mm);
105411 +               task_unlock(p);
105412 +       }
105414         wake_up_new_task(p);
105416         /* forking complete and child started to run, tell ptracer */
105417 @@ -2971,6 +2993,12 @@ int ksys_unshare(unsigned long unshare_flags)
105418         if (unshare_flags & CLONE_NEWNS)
105419                 unshare_flags |= CLONE_FS;
105421 +       if ((unshare_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) {
105422 +               err = -EPERM;
105423 +               if (!capable(CAP_SYS_ADMIN))
105424 +                       goto bad_unshare_out;
105425 +       }
105427         err = check_unshare_flags(unshare_flags);
105428         if (err)
105429                 goto bad_unshare_out;
105430 diff --git a/kernel/futex.c b/kernel/futex.c
105431 index 00febd6dea9c..f923d2da4b40 100644
105432 --- a/kernel/futex.c
105433 +++ b/kernel/futex.c
105434 @@ -198,6 +198,8 @@ struct futex_pi_state {
105435   * @rt_waiter:         rt_waiter storage for use with requeue_pi
105436   * @requeue_pi_key:    the requeue_pi target futex key
105437   * @bitset:            bitset for the optional bitmasked wakeup
105438 + * @uaddr:             userspace address of futex
105439 + * @uval:              expected futex's value
105440   *
105441   * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
105442   * we can wake only the relevant ones (hashed queues may be shared).
105443 @@ -220,6 +222,8 @@ struct futex_q {
105444         struct rt_mutex_waiter *rt_waiter;
105445         union futex_key *requeue_pi_key;
105446         u32 bitset;
105447 +       u32 __user *uaddr;
105448 +       u32 uval;
105449  } __randomize_layout;
105451  static const struct futex_q futex_q_init = {
105452 @@ -2313,6 +2317,29 @@ static int unqueue_me(struct futex_q *q)
105453         return ret;
105457 + * unqueue_multiple() - Remove several futexes from their futex_hash_bucket
105458 + * @q: The list of futexes to unqueue
105459 + * @count: Number of futexes in the list
105461 + * Helper to unqueue a list of futexes. This can't fail.
105463 + * Return:
105464 + *  - >=0 - Index of the last futex that was awoken;
105465 + *  - -1  - If no futex was awoken
105466 + */
105467 +static int unqueue_multiple(struct futex_q *q, int count)
105469 +       int ret = -1;
105470 +       int i;
105472 +       for (i = 0; i < count; i++) {
105473 +               if (!unqueue_me(&q[i]))
105474 +                       ret = i;
105475 +       }
105476 +       return ret;
105480   * PI futexes can not be requeued and must remove themself from the
105481   * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
105482 @@ -2680,6 +2707,205 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
105483         return ret;
105487 + * futex_wait_multiple_setup() - Prepare to wait and enqueue multiple futexes
105488 + * @qs:                The corresponding futex list
105489 + * @count:     The size of the lists
105490 + * @flags:     Futex flags (FLAGS_SHARED, etc.)
105491 + * @awaken:    Index of the last awoken futex
105493 + * Prepare multiple futexes in a single step and enqueue them. This may fail if
105494 + * the futex list is invalid or if any futex was already awoken. On success the
105495 + * task is ready to interruptible sleep.
105497 + * Return:
105498 + *  -  1 - One of the futexes was awaken by another thread
105499 + *  -  0 - Success
105500 + *  - <0 - -EFAULT, -EWOULDBLOCK or -EINVAL
105501 + */
105502 +static int futex_wait_multiple_setup(struct futex_q *qs, int count,
105503 +                                    unsigned int flags, int *awaken)
105505 +       struct futex_hash_bucket *hb;
105506 +       int ret, i;
105507 +       u32 uval;
105509 +       /*
105510 +        * Enqueuing multiple futexes is tricky, because we need to
105511 +        * enqueue each futex in the list before dealing with the next
105512 +        * one to avoid deadlocking on the hash bucket.  But, before
105513 +        * enqueuing, we need to make sure that current->state is
105514 +        * TASK_INTERRUPTIBLE, so we don't absorb any awake events, which
105515 +        * cannot be done before the get_futex_key of the next key,
105516 +        * because it calls get_user_pages, which can sleep.  Thus, we
105517 +        * fetch the list of futexes keys in two steps, by first pinning
105518 +        * all the memory keys in the futex key, and only then we read
105519 +        * each key and queue the corresponding futex.
105520 +        */
105521 +retry:
105522 +       for (i = 0; i < count; i++) {
105523 +               qs[i].key = FUTEX_KEY_INIT;
105524 +               ret = get_futex_key(qs[i].uaddr, flags & FLAGS_SHARED,
105525 +                                   &qs[i].key, FUTEX_READ);
105526 +               if (unlikely(ret)) {
105527 +                       return ret;
105528 +               }
105529 +       }
105531 +       set_current_state(TASK_INTERRUPTIBLE);
105533 +       for (i = 0; i < count; i++) {
105534 +               struct futex_q *q = &qs[i];
105536 +               hb = queue_lock(q);
105538 +               ret = get_futex_value_locked(&uval, q->uaddr);
105539 +               if (ret) {
105540 +                       /*
105541 +                        * We need to try to handle the fault, which
105542 +                        * cannot be done without sleep, so we need to
105543 +                        * undo all the work already done, to make sure
105544 +                        * we don't miss any wake ups.  Therefore, clean
105545 +                        * up, handle the fault and retry from the
105546 +                        * beginning.
105547 +                        */
105548 +                       queue_unlock(hb);
105550 +                       /*
105551 +                        * Keys 0..(i-1) are implicitly put
105552 +                        * on unqueue_multiple.
105553 +                        */
105554 +                       *awaken = unqueue_multiple(qs, i);
105556 +                       __set_current_state(TASK_RUNNING);
105558 +                       /*
105559 +                        * On a real fault, prioritize the error even if
105560 +                        * some other futex was awoken.  Userspace gave
105561 +                        * us a bad address, -EFAULT them.
105562 +                        */
105563 +                       ret = get_user(uval, q->uaddr);
105564 +                       if (ret)
105565 +                               return ret;
105567 +                       /*
105568 +                        * Even if the page fault was handled, If
105569 +                        * something was already awaken, we can safely
105570 +                        * give up and succeed to give a hint for userspace to
105571 +                        * acquire the right futex faster.
105572 +                        */
105573 +                       if (*awaken >= 0)
105574 +                               return 1;
105576 +                       goto retry;
105577 +               }
105579 +               if (uval != q->uval) {
105580 +                       queue_unlock(hb);
105582 +                       /*
105583 +                        * If something was already awaken, we can
105584 +                        * safely ignore the error and succeed.
105585 +                        */
105586 +                       *awaken = unqueue_multiple(qs, i);
105587 +                       __set_current_state(TASK_RUNNING);
105588 +                       if (*awaken >= 0)
105589 +                               return 1;
105591 +                       return -EWOULDBLOCK;
105592 +               }
105594 +               /*
105595 +                * The bucket lock can't be held while dealing with the
105596 +                * next futex. Queue each futex at this moment so hb can
105597 +                * be unlocked.
105598 +                */
105599 +               queue_me(&qs[i], hb);
105600 +       }
105601 +       return 0;
105605 + * futex_wait_multiple() - Prepare to wait on and enqueue several futexes
105606 + * @qs:                The list of futexes to wait on
105607 + * @op:                Operation code from futex's syscall
105608 + * @count:     The number of objects
105609 + * @abs_time:  Timeout before giving up and returning to userspace
105611 + * Entry point for the FUTEX_WAIT_MULTIPLE futex operation, this function
105612 + * sleeps on a group of futexes and returns on the first futex that
105613 + * triggered, or after the timeout has elapsed.
105615 + * Return:
105616 + *  - >=0 - Hint to the futex that was awoken
105617 + *  - <0  - On error
105618 + */
105619 +static int futex_wait_multiple(struct futex_q *qs, int op,
105620 +                              u32 count, ktime_t *abs_time)
105622 +       struct hrtimer_sleeper timeout, *to;
105623 +       int ret, flags = 0, hint = 0;
105624 +       unsigned int i;
105626 +       if (!(op & FUTEX_PRIVATE_FLAG))
105627 +               flags |= FLAGS_SHARED;
105629 +       if (op & FUTEX_CLOCK_REALTIME)
105630 +               flags |= FLAGS_CLOCKRT;
105632 +       to = futex_setup_timer(abs_time, &timeout, flags, 0);
105633 +       while (1) {
105634 +               ret = futex_wait_multiple_setup(qs, count, flags, &hint);
105635 +               if (ret) {
105636 +                       if (ret > 0) {
105637 +                               /* A futex was awaken during setup */
105638 +                               ret = hint;
105639 +                       }
105640 +                       break;
105641 +               }
105643 +               if (to)
105644 +                       hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
105646 +               /*
105647 +                * Avoid sleeping if another thread already tried to
105648 +                * wake us.
105649 +                */
105650 +               for (i = 0; i < count; i++) {
105651 +                       if (plist_node_empty(&qs[i].list))
105652 +                               break;
105653 +               }
105655 +               if (i == count && (!to || to->task))
105656 +                       freezable_schedule();
105658 +               ret = unqueue_multiple(qs, count);
105660 +               __set_current_state(TASK_RUNNING);
105662 +               if (ret >= 0)
105663 +                       break;
105664 +               if (to && !to->task) {
105665 +                       ret = -ETIMEDOUT;
105666 +                       break;
105667 +               } else if (signal_pending(current)) {
105668 +                       ret = -ERESTARTSYS;
105669 +                       break;
105670 +               }
105671 +               /*
105672 +                * The final case is a spurious wakeup, for
105673 +                * which just retry.
105674 +                */
105675 +       }
105677 +       if (to) {
105678 +               hrtimer_cancel(&to->timer);
105679 +               destroy_hrtimer_on_stack(&to->timer);
105680 +       }
105682 +       return ret;
105685  static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
105686                       ktime_t *abs_time, u32 bitset)
105688 @@ -3711,8 +3937,7 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
105690         if (op & FUTEX_CLOCK_REALTIME) {
105691                 flags |= FLAGS_CLOCKRT;
105692 -               if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
105693 -                   cmd != FUTEX_WAIT_REQUEUE_PI)
105694 +               if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
105695                         return -ENOSYS;
105696         }
105698 @@ -3759,6 +3984,43 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
105699         return -ENOSYS;
105703 + * futex_read_wait_block - Read an array of futex_wait_block from userspace
105704 + * @uaddr:     Userspace address of the block
105705 + * @count:     Number of blocks to be read
105707 + * This function creates and allocate an array of futex_q (we zero it to
105708 + * initialize the fields) and then, for each futex_wait_block element from
105709 + * userspace, fill a futex_q element with proper values.
105710 + */
105711 +inline struct futex_q *futex_read_wait_block(u32 __user *uaddr, u32 count)
105713 +       unsigned int i;
105714 +       struct futex_q *qs;
105715 +       struct futex_wait_block fwb;
105716 +       struct futex_wait_block __user *entry =
105717 +               (struct futex_wait_block __user *)uaddr;
105719 +       if (!count || count > FUTEX_MULTIPLE_MAX_COUNT)
105720 +               return ERR_PTR(-EINVAL);
105722 +       qs = kcalloc(count, sizeof(*qs), GFP_KERNEL);
105723 +       if (!qs)
105724 +               return ERR_PTR(-ENOMEM);
105726 +       for (i = 0; i < count; i++) {
105727 +               if (copy_from_user(&fwb, &entry[i], sizeof(fwb))) {
105728 +                       kfree(qs);
105729 +                       return ERR_PTR(-EFAULT);
105730 +               }
105732 +               qs[i].uaddr = fwb.uaddr;
105733 +               qs[i].uval = fwb.val;
105734 +               qs[i].bitset = fwb.bitset;
105735 +       }
105737 +       return qs;
105740  SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
105741                 const struct __kernel_timespec __user *, utime,
105742 @@ -3771,7 +4033,8 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
105744         if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
105745                       cmd == FUTEX_WAIT_BITSET ||
105746 -                     cmd == FUTEX_WAIT_REQUEUE_PI)) {
105747 +                     cmd == FUTEX_WAIT_REQUEUE_PI ||
105748 +                     cmd == FUTEX_WAIT_MULTIPLE)) {
105749                 if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
105750                         return -EFAULT;
105751                 if (get_timespec64(&ts, utime))
105752 @@ -3780,9 +4043,9 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
105753                         return -EINVAL;
105755                 t = timespec64_to_ktime(ts);
105756 -               if (cmd == FUTEX_WAIT)
105757 +               if (cmd == FUTEX_WAIT || cmd == FUTEX_WAIT_MULTIPLE)
105758                         t = ktime_add_safe(ktime_get(), t);
105759 -               else if (!(op & FUTEX_CLOCK_REALTIME))
105760 +               else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME))
105761                         t = timens_ktime_to_host(CLOCK_MONOTONIC, t);
105762                 tp = &t;
105763         }
105764 @@ -3794,6 +4057,25 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
105765             cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
105766                 val2 = (u32) (unsigned long) utime;
105768 +       if (cmd == FUTEX_WAIT_MULTIPLE) {
105769 +               int ret;
105770 +               struct futex_q *qs;
105772 +#ifdef CONFIG_X86_X32
105773 +               if (unlikely(in_x32_syscall()))
105774 +                       return -ENOSYS;
105775 +#endif
105776 +               qs = futex_read_wait_block(uaddr, val);
105778 +               if (IS_ERR(qs))
105779 +                       return PTR_ERR(qs);
105781 +               ret = futex_wait_multiple(qs, op, val, tp);
105782 +               kfree(qs);
105784 +               return ret;
105785 +       }
105787         return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
105790 @@ -3956,6 +4238,58 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
105791  #endif /* CONFIG_COMPAT */
105793  #ifdef CONFIG_COMPAT_32BIT_TIME
105795 + * struct compat_futex_wait_block - Block of futexes to be waited for
105796 + * @uaddr:     User address of the futex (compatible pointer)
105797 + * @val:       Futex value expected by userspace
105798 + * @bitset:    Bitset for the optional bitmasked wakeup
105799 + */
105800 +struct compat_futex_wait_block {
105801 +       compat_uptr_t   uaddr;
105802 +       __u32 pad;
105803 +       __u32 val;
105804 +       __u32 bitset;
105808 + * compat_futex_read_wait_block - Read an array of futex_wait_block from
105809 + * userspace
105810 + * @uaddr:     Userspace address of the block
105811 + * @count:     Number of blocks to be read
105813 + * This function does the same as futex_read_wait_block(), except that it
105814 + * converts the pointer to the futex from the compat version to the regular one.
105815 + */
105816 +inline struct futex_q *compat_futex_read_wait_block(u32 __user *uaddr,
105817 +                                                   u32 count)
105819 +       unsigned int i;
105820 +       struct futex_q *qs;
105821 +       struct compat_futex_wait_block fwb;
105822 +       struct compat_futex_wait_block __user *entry =
105823 +               (struct compat_futex_wait_block __user *)uaddr;
105825 +       if (!count || count > FUTEX_MULTIPLE_MAX_COUNT)
105826 +               return ERR_PTR(-EINVAL);
105828 +       qs = kcalloc(count, sizeof(*qs), GFP_KERNEL);
105829 +       if (!qs)
105830 +               return ERR_PTR(-ENOMEM);
105832 +       for (i = 0; i < count; i++) {
105833 +               if (copy_from_user(&fwb, &entry[i], sizeof(fwb))) {
105834 +                       kfree(qs);
105835 +                       return ERR_PTR(-EFAULT);
105836 +               }
105838 +               qs[i].uaddr = compat_ptr(fwb.uaddr);
105839 +               qs[i].uval = fwb.val;
105840 +               qs[i].bitset = fwb.bitset;
105841 +       }
105843 +       return qs;
105846  SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
105847                 const struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
105848                 u32, val3)
105849 @@ -3967,16 +4301,17 @@ SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
105851         if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
105852                       cmd == FUTEX_WAIT_BITSET ||
105853 -                     cmd == FUTEX_WAIT_REQUEUE_PI)) {
105854 +                     cmd == FUTEX_WAIT_REQUEUE_PI ||
105855 +                     cmd == FUTEX_WAIT_MULTIPLE)) {
105856                 if (get_old_timespec32(&ts, utime))
105857                         return -EFAULT;
105858                 if (!timespec64_valid(&ts))
105859                         return -EINVAL;
105861                 t = timespec64_to_ktime(ts);
105862 -               if (cmd == FUTEX_WAIT)
105863 +               if (cmd == FUTEX_WAIT || cmd == FUTEX_WAIT_MULTIPLE)
105864                         t = ktime_add_safe(ktime_get(), t);
105865 -               else if (!(op & FUTEX_CLOCK_REALTIME))
105866 +               else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME))
105867                         t = timens_ktime_to_host(CLOCK_MONOTONIC, t);
105868                 tp = &t;
105869         }
105870 @@ -3984,6 +4319,19 @@ SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
105871             cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
105872                 val2 = (int) (unsigned long) utime;
105874 +       if (cmd == FUTEX_WAIT_MULTIPLE) {
105875 +               int ret;
105876 +               struct futex_q *qs = compat_futex_read_wait_block(uaddr, val);
105878 +               if (IS_ERR(qs))
105879 +                       return PTR_ERR(qs);
105881 +               ret = futex_wait_multiple(qs, op, val, tp);
105882 +               kfree(qs);
105884 +               return ret;
105885 +       }
105887         return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
105889  #endif /* CONFIG_COMPAT_32BIT_TIME */
105890 diff --git a/kernel/futex2.c b/kernel/futex2.c
105891 new file mode 100644
105892 index 000000000000..dd6f54ae0220
105893 --- /dev/null
105894 +++ b/kernel/futex2.c
105895 @@ -0,0 +1,1239 @@
105896 +// SPDX-License-Identifier: GPL-2.0-or-later
105898 + * futex2 system call interface by André Almeida <andrealmeid@collabora.com>
105900 + * Copyright 2021 Collabora Ltd.
105902 + * Based on original futex implementation by:
105903 + *  (C) 2002 Rusty Russell, IBM
105904 + *  (C) 2003, 2006 Ingo Molnar, Red Hat Inc.
105905 + *  (C) 2003, 2004 Jamie Lokier
105906 + *  (C) 2006 Thomas Gleixner, Timesys Corp.
105907 + *  (C) 2007 Eric Dumazet
105908 + *  (C) 2009 Darren Hart, IBM
105909 + */
105911 +#include <linux/freezer.h>
105912 +#include <linux/hugetlb.h>
105913 +#include <linux/jhash.h>
105914 +#include <linux/memblock.h>
105915 +#include <linux/pagemap.h>
105916 +#include <linux/sched/wake_q.h>
105917 +#include <linux/spinlock.h>
105918 +#include <linux/syscalls.h>
105919 +#include <uapi/linux/futex.h>
105921 +#ifdef CONFIG_X86_64
105922 +#include <linux/compat.h>
105923 +#endif
105926 + * struct futex_key - Components to build unique key for a futex
105927 + * @pointer: Pointer to current->mm or inode's UUID for file backed futexes
105928 + * @index: Start address of the page containing futex or index of the page
105929 + * @offset: Address offset of uaddr in a page
105930 + */
105931 +struct futex_key {
105932 +       u64 pointer;
105933 +       unsigned long index;
105934 +       unsigned long offset;
105938 + * struct futex_waiter - List entry for a waiter
105939 + * @uaddr:        Virtual address of userspace futex
105940 + * @key:          Information that uniquely identify a futex
105941 + * @list:        List node struct
105942 + * @val:         Expected value for this waiter
105943 + * @flags:        Flags
105944 + * @bucket:       Pointer to the bucket for this waiter
105945 + * @index:        Index of waiter in futexv list
105946 + */
105947 +struct futex_waiter {
105948 +       void __user *uaddr;
105949 +       struct futex_key key;
105950 +       struct list_head list;
105951 +       unsigned int val;
105952 +       unsigned int flags;
105953 +       struct futex_bucket *bucket;
105954 +       unsigned int index;
105958 + * struct futex_waiter_head - List of futexes to be waited
105959 + * @task:    Task to be awaken
105960 + * @hint:    Was someone on this list awakened?
105961 + * @objects: List of futexes
105962 + */
105963 +struct futex_waiter_head {
105964 +       struct task_struct *task;
105965 +       bool hint;
105966 +       struct futex_waiter objects[0];
105970 + * struct futex_bucket - A bucket of futex's hash table
105971 + * @waiters: Number of waiters in the bucket
105972 + * @lock:    Bucket lock
105973 + * @list:    List of waiters on this bucket
105974 + */
105975 +struct futex_bucket {
105976 +       atomic_t waiters;
105977 +       spinlock_t lock;
105978 +       struct list_head list;
105981 +/* Mask for futex2 flag operations */
105982 +#define FUTEX2_MASK (FUTEX_SIZE_MASK | FUTEX_CLOCK_REALTIME | FUTEX_SHARED_FLAG)
105984 +/* Mask for sys_futex_waitv flag */
105985 +#define FUTEXV_MASK (FUTEX_CLOCK_REALTIME)
105987 +/* Mask for each futex in futex_waitv list */
105988 +#define FUTEXV_WAITER_MASK (FUTEX_SIZE_MASK | FUTEX_SHARED_FLAG)
105990 +#define is_object_shared ((futexv->objects[i].flags & FUTEX_SHARED_FLAG) ? true : false)
105992 +#define FUT_OFF_INODE    1 /* We set bit 0 if key has a reference on inode */
105993 +#define FUT_OFF_MMSHARED 2 /* We set bit 1 if key has a reference on mm */
105995 +static struct futex_bucket *futex_table;
105996 +static unsigned int futex2_hashsize;
105999 + * Reflects a new waiter being added to the waitqueue.
106000 + */
106001 +static inline void bucket_inc_waiters(struct futex_bucket *bucket)
106003 +#ifdef CONFIG_SMP
106004 +       atomic_inc(&bucket->waiters);
106005 +       /*
106006 +        * Issue a barrier after adding so futex_wake() will see that the
106007 +        * value had increased
106008 +        */
106009 +       smp_mb__after_atomic();
106010 +#endif
106014 + * Reflects a waiter being removed from the waitqueue by wakeup
106015 + * paths.
106016 + */
106017 +static inline void bucket_dec_waiters(struct futex_bucket *bucket)
106019 +#ifdef CONFIG_SMP
106020 +       atomic_dec(&bucket->waiters);
106021 +#endif
106025 + * Get the number of waiters in a bucket
106026 + */
106027 +static inline int bucket_get_waiters(struct futex_bucket *bucket)
106029 +#ifdef CONFIG_SMP
106030 +       /*
106031 +        * Issue a barrier before reading so we get an updated value from
106032 +        * futex_wait()
106033 +        */
106034 +       smp_mb();
106035 +       return atomic_read(&bucket->waiters);
106036 +#else
106037 +       return 1;
106038 +#endif
106042 + * futex_get_inode_uuid - Gets an UUID for an inode
106043 + * @inode: inode to get UUID
106045 + * Generate a machine wide unique identifier for this inode.
106047 + * This relies on u64 not wrapping in the life-time of the machine; which with
106048 + * 1ns resolution means almost 585 years.
106050 + * This further relies on the fact that a well formed program will not unmap
106051 + * the file while it has a (shared) futex waiting on it. This mapping will have
106052 + * a file reference which pins the mount and inode.
106054 + * If for some reason an inode gets evicted and read back in again, it will get
106055 + * a new sequence number and will _NOT_ match, even though it is the exact same
106056 + * file.
106058 + * It is important that match_futex() will never have a false-positive, esp.
106059 + * for PI futexes that can mess up the state. The above argues that false-negatives
106060 + * are only possible for malformed programs.
106062 + * Returns: UUID for the given inode
106063 + */
106064 +static u64 futex_get_inode_uuid(struct inode *inode)
106066 +       static atomic64_t i_seq;
106067 +       u64 old;
106069 +       /* Does the inode already have a sequence number? */
106070 +       old = atomic64_read(&inode->i_sequence2);
106072 +       if (likely(old))
106073 +               return old;
106075 +       for (;;) {
106076 +               u64 new = atomic64_add_return(1, &i_seq);
106078 +               if (WARN_ON_ONCE(!new))
106079 +                       continue;
106081 +               old = atomic64_cmpxchg_relaxed(&inode->i_sequence2, 0, new);
106082 +               if (old)
106083 +                       return old;
106084 +               return new;
106085 +       }
106089 + * futex_get_shared_key - Get a key for a shared futex
106090 + * @address: Futex memory address
106091 + * @mm:      Current process mm_struct pointer
106092 + * @key:     Key struct to be filled
106094 + * Returns: 0 on success, error code otherwise
106095 + */
106096 +static int futex_get_shared_key(uintptr_t address, struct mm_struct *mm,
106097 +                               struct futex_key *key)
106099 +       int ret;
106100 +       struct page *page, *tail;
106101 +       struct address_space *mapping;
106103 +again:
106104 +       ret = get_user_pages_fast(address, 1, 0, &page);
106105 +       if (ret < 0)
106106 +               return ret;
106108 +       /*
106109 +        * The treatment of mapping from this point on is critical. The page
106110 +        * lock protects many things but in this context the page lock
106111 +        * stabilizes mapping, prevents inode freeing in the shared
106112 +        * file-backed region case and guards against movement to swap cache.
106113 +        *
106114 +        * Strictly speaking the page lock is not needed in all cases being
106115 +        * considered here and page lock forces unnecessarily serialization
106116 +        * From this point on, mapping will be re-verified if necessary and
106117 +        * page lock will be acquired only if it is unavoidable
106118 +        *
106119 +        * Mapping checks require the head page for any compound page so the
106120 +        * head page and mapping is looked up now. For anonymous pages, it
106121 +        * does not matter if the page splits in the future as the key is
106122 +        * based on the address. For filesystem-backed pages, the tail is
106123 +        * required as the index of the page determines the key. For
106124 +        * base pages, there is no tail page and tail == page.
106125 +        */
106126 +       tail = page;
106127 +       page = compound_head(page);
106128 +       mapping = READ_ONCE(page->mapping);
106130 +       /*
106131 +        * If page->mapping is NULL, then it cannot be a PageAnon
106132 +        * page; but it might be the ZERO_PAGE or in the gate area or
106133 +        * in a special mapping (all cases which we are happy to fail);
106134 +        * or it may have been a good file page when get_user_pages_fast
106135 +        * found it, but truncated or holepunched or subjected to
106136 +        * invalidate_complete_page2 before we got the page lock (also
106137 +        * cases which we are happy to fail).  And we hold a reference,
106138 +        * so refcount care in invalidate_complete_page's remove_mapping
106139 +        * prevents drop_caches from setting mapping to NULL beneath us.
106140 +        *
106141 +        * The case we do have to guard against is when memory pressure made
106142 +        * shmem_writepage move it from filecache to swapcache beneath us:
106143 +        * an unlikely race, but we do need to retry for page->mapping.
106144 +        */
106145 +       if (unlikely(!mapping)) {
106146 +               int shmem_swizzled;
106148 +               /*
106149 +                * Page lock is required to identify which special case above
106150 +                * applies. If this is really a shmem page then the page lock
106151 +                * will prevent unexpected transitions.
106152 +                */
106153 +               lock_page(page);
106154 +               shmem_swizzled = PageSwapCache(page) || page->mapping;
106155 +               unlock_page(page);
106156 +               put_page(page);
106158 +               if (shmem_swizzled)
106159 +                       goto again;
106161 +               return -EFAULT;
106162 +       }
106164 +       /*
106165 +        * Private mappings are handled in a simple way.
106166 +        *
106167 +        * If the futex key is stored on an anonymous page, then the associated
106168 +        * object is the mm which is implicitly pinned by the calling process.
106169 +        *
106170 +        * NOTE: When userspace waits on a MAP_SHARED mapping, even if
106171 +        * it's a read-only handle, it's expected that futexes attach to
106172 +        * the object not the particular process.
106173 +        */
106174 +       if (PageAnon(page)) {
106175 +               key->offset |= FUT_OFF_MMSHARED;
106176 +       } else {
106177 +               struct inode *inode;
106179 +               /*
106180 +                * The associated futex object in this case is the inode and
106181 +                * the page->mapping must be traversed. Ordinarily this should
106182 +                * be stabilised under page lock but it's not strictly
106183 +                * necessary in this case as we just want to pin the inode, not
106184 +                * update the radix tree or anything like that.
106185 +                *
106186 +                * The RCU read lock is taken as the inode is finally freed
106187 +                * under RCU. If the mapping still matches expectations then the
106188 +                * mapping->host can be safely accessed as being a valid inode.
106189 +                */
106190 +               rcu_read_lock();
106192 +               if (READ_ONCE(page->mapping) != mapping) {
106193 +                       rcu_read_unlock();
106194 +                       put_page(page);
106196 +                       goto again;
106197 +               }
106199 +               inode = READ_ONCE(mapping->host);
106200 +               if (!inode) {
106201 +                       rcu_read_unlock();
106202 +                       put_page(page);
106204 +                       goto again;
106205 +               }
106207 +               key->pointer = futex_get_inode_uuid(inode);
106208 +               key->index = (unsigned long)basepage_index(tail);
106209 +               key->offset |= FUT_OFF_INODE;
106211 +               rcu_read_unlock();
106212 +       }
106214 +       put_page(page);
106216 +       return 0;
106220 + * futex_get_bucket - Check if the user address is valid, prepare internal
106221 + *                    data and calculate the hash
106222 + * @uaddr:   futex user address
106223 + * @key:     data that uniquely identifies a futex
106224 + * @shared:  is this a shared futex?
106226 + * For private futexes, each uaddr will be unique for a given mm_struct, and it
106227 + * won't be freed for the life time of the process. For shared futexes, check
106228 + * futex_get_shared_key().
106230 + * Return: address of bucket on success, error code otherwise
106231 + */
106232 +static struct futex_bucket *futex_get_bucket(void __user *uaddr,
106233 +                                            struct futex_key *key,
106234 +                                            bool shared)
106236 +       uintptr_t address = (uintptr_t)uaddr;
106237 +       u32 hash_key;
106239 +       /* Checking if uaddr is valid and accessible */
106240 +       if (unlikely(!IS_ALIGNED(address, sizeof(u32))))
106241 +               return ERR_PTR(-EINVAL);
106242 +       if (unlikely(!access_ok(uaddr, sizeof(u32))))
106243 +               return ERR_PTR(-EFAULT);
106245 +       key->offset = address % PAGE_SIZE;
106246 +       address -= key->offset;
106247 +       key->pointer = (u64)address;
106248 +       key->index = (unsigned long)current->mm;
106250 +       if (shared)
106251 +               futex_get_shared_key(address, current->mm, key);
106253 +       /* Generate hash key for this futex using uaddr and current->mm */
106254 +       hash_key = jhash2((u32 *)key, sizeof(*key) / sizeof(u32), 0);
106256 +       /* Since HASH_SIZE is 2^n, subtracting 1 makes a perfect bit mask */
106257 +       return &futex_table[hash_key & (futex2_hashsize - 1)];
106261 + * futex_get_user - Get the userspace value on this address
106262 + * @uval:  variable to store the value
106263 + * @uaddr: userspace address
106265 + * Check the comment at futex_enqueue() for more information.
106266 + */
106267 +static int futex_get_user(u32 *uval, u32 __user *uaddr)
106269 +       int ret;
106271 +       pagefault_disable();
106272 +       ret = __get_user(*uval, uaddr);
106273 +       pagefault_enable();
106275 +       return ret;
106279 + * futex_setup_time - Prepare the timeout mechanism and start it.
106280 + * @timo:    Timeout value from userspace
106281 + * @timeout: Pointer to hrtimer handler
106282 + * @flags: Flags from userspace, to decide which clockid to use
106284 + * Return: 0 on success, error code otherwise
106285 + */
106286 +static int futex_setup_time(struct __kernel_timespec __user *timo,
106287 +                           struct hrtimer_sleeper *timeout,
106288 +                           unsigned int flags)
106290 +       ktime_t time;
106291 +       struct timespec64 ts;
106292 +       clockid_t clockid = (flags & FUTEX_CLOCK_REALTIME) ?
106293 +                           CLOCK_REALTIME : CLOCK_MONOTONIC;
106295 +       if (get_timespec64(&ts, timo))
106296 +               return -EFAULT;
106298 +       if (!timespec64_valid(&ts))
106299 +               return -EINVAL;
106301 +       time = timespec64_to_ktime(ts);
106303 +       hrtimer_init_sleeper(timeout, clockid, HRTIMER_MODE_ABS);
106305 +       hrtimer_set_expires(&timeout->timer, time);
106307 +       hrtimer_sleeper_start_expires(timeout, HRTIMER_MODE_ABS);
106309 +       return 0;
106313 + * futex_dequeue_multiple - Remove multiple futexes from hash table
106314 + * @futexv: list of waiters
106315 + * @nr:     number of futexes to be removed
106317 + * This function is used if (a) something went wrong while enqueuing, and we
106318 + * need to undo our work (then nr <= nr_futexes) or (b) we woke up, and thus
106319 + * need to remove every waiter, check if some was indeed woken and return.
106320 + * Before removing a waiter, we check if it's on the list, since we have no
106321 + * clue who have been waken.
106323 + * Return:
106324 + *  * -1  - If no futex was woken during the removal
106325 + *  * 0>= - At least one futex was found woken, index of the last one
106326 + */
106327 +static int futex_dequeue_multiple(struct futex_waiter_head *futexv, unsigned int nr)
106329 +       int i, ret = -1;
106331 +       for (i = 0; i < nr; i++) {
106332 +               spin_lock(&futexv->objects[i].bucket->lock);
106333 +               if (!list_empty(&futexv->objects[i].list)) {
106334 +                       list_del_init(&futexv->objects[i].list);
106335 +                       bucket_dec_waiters(futexv->objects[i].bucket);
106336 +               } else {
106337 +                       ret = i;
106338 +               }
106339 +               spin_unlock(&futexv->objects[i].bucket->lock);
106340 +       }
106342 +       return ret;
106346 + * futex_enqueue - Check the value and enqueue a futex on a wait list
106348 + * @futexv:     List of futexes
106349 + * @nr_futexes: Number of futexes in the list
106350 + * @awakened:  If a futex was awakened during enqueueing, store the index here
106352 + * Get the value from the userspace address and compares with the expected one.
106354 + * Getting the value from user futex address:
106356 + * Since we are in a hurry, we use a spin lock and we can't sleep.
106357 + * Try to get the value with page fault disabled (when enable, we might
106358 + * sleep).
106360 + * If we fail, we aren't sure if the address is invalid or is just a
106361 + * page fault. Then, release the lock (so we can sleep) and try to get
106362 + * the value with page fault enabled. In order to trigger a page fault
106363 + * handling, we just call __get_user() again. If we sleep with enqueued
106364 + * futexes, we might miss a wake, so dequeue everything before sleeping.
106366 + * If get_user succeeds, this mean that the address is valid and we do
106367 + * the work again. Since we just handled the page fault, the page is
106368 + * likely pinned in memory and we should be luckier this time and be
106369 + * able to get the value. If we fail anyway, we will try again.
106371 + * If even with page faults enabled we get and error, this means that
106372 + * the address is not valid and we return from the syscall.
106374 + * If we got an unexpected value or need to treat a page fault and realized that
106375 + * a futex was awakened, we can priority this and return success.
106377 + * In success, enqueue the futex in the correct bucket
106379 + * Return:
106380 + * * 1  - We were awake in the process and nothing is enqueued
106381 + * * 0  - Everything is enqueued and we are ready to sleep
106382 + * * 0< - Something went wrong, nothing is enqueued, return error code
106383 + */
106384 +static int futex_enqueue(struct futex_waiter_head *futexv, unsigned int nr_futexes,
106385 +                        int *awakened)
106387 +       int i, ret;
106388 +       u32 uval, val;
106389 +       u32 __user *uaddr;
106390 +       bool retry = false;
106391 +       struct futex_bucket *bucket;
106393 +retry:
106394 +       set_current_state(TASK_INTERRUPTIBLE);
106396 +       for (i = 0; i < nr_futexes; i++) {
106397 +               uaddr = (u32 __user *)futexv->objects[i].uaddr;
106398 +               val = (u32)futexv->objects[i].val;
106400 +               if (is_object_shared && retry) {
106401 +                       struct futex_bucket *tmp =
106402 +                               futex_get_bucket((void __user *)uaddr,
106403 +                                                &futexv->objects[i].key, true);
106404 +                       if (IS_ERR(tmp)) {
106405 +                               __set_current_state(TASK_RUNNING);
106406 +                               futex_dequeue_multiple(futexv, i);
106407 +                               return PTR_ERR(tmp);
106408 +                       }
106409 +                       futexv->objects[i].bucket = tmp;
106410 +               }
106412 +               bucket = futexv->objects[i].bucket;
106414 +               bucket_inc_waiters(bucket);
106415 +               spin_lock(&bucket->lock);
106417 +               ret = futex_get_user(&uval, uaddr);
106419 +               if (unlikely(ret)) {
106420 +                       spin_unlock(&bucket->lock);
106422 +                       bucket_dec_waiters(bucket);
106423 +                       __set_current_state(TASK_RUNNING);
106424 +                       *awakened = futex_dequeue_multiple(futexv, i);
106426 +                       if (*awakened >= 0)
106427 +                               return 1;
106429 +                       if (__get_user(uval, uaddr))
106430 +                               return -EFAULT;
106432 +                       retry = true;
106433 +                       goto retry;
106434 +               }
106436 +               if (uval != val) {
106437 +                       spin_unlock(&bucket->lock);
106439 +                       bucket_dec_waiters(bucket);
106440 +                       __set_current_state(TASK_RUNNING);
106441 +                       *awakened = futex_dequeue_multiple(futexv, i);
106443 +                       if (*awakened >= 0)
106444 +                               return 1;
106446 +                       return -EAGAIN;
106447 +               }
106449 +               list_add_tail(&futexv->objects[i].list, &bucket->list);
106450 +               spin_unlock(&bucket->lock);
106451 +       }
106453 +       return 0;
106457 + * __futex_waitv - Enqueue the list of futexes and wait to be woken
106458 + * @futexv: List of futexes to wait
106459 + * @nr_futexes: Length of futexv
106460 + * @timo:      Timeout
106461 + * @flags:     Timeout flags
106463 + * Return:
106464 + * * 0 >= - Hint of which futex woke us
106465 + * * 0 <  - Error code
106466 + */
106467 +static int __futex_waitv(struct futex_waiter_head *futexv, unsigned int nr_futexes,
106468 +                        struct __kernel_timespec __user *timo,
106469 +                        unsigned int flags)
106471 +       int ret;
106472 +       struct hrtimer_sleeper timeout;
106474 +       if (timo) {
106475 +               ret = futex_setup_time(timo, &timeout, flags);
106476 +               if (ret)
106477 +                       return ret;
106478 +       }
106480 +       while (1) {
106481 +               int awakened = -1;
106483 +               ret = futex_enqueue(futexv, nr_futexes, &awakened);
106485 +               if (ret) {
106486 +                       if (awakened >= 0)
106487 +                               ret = awakened;
106488 +                       break;
106489 +               }
106491 +               /* Before sleeping, check if someone was woken */
106492 +               if (!futexv->hint && (!timo || timeout.task))
106493 +                       freezable_schedule();
106495 +               __set_current_state(TASK_RUNNING);
106497 +               /*
106498 +                * One of those things triggered this wake:
106499 +                *
106500 +                * * We have been removed from the bucket. futex_wake() woke
106501 +                *   us. We just need to dequeue and return 0 to userspace.
106502 +                *
106503 +                * However, if no futex was dequeued by a futex_wake():
106504 +                *
106505 +                * * If the there's a timeout and it has expired,
106506 +                *   return -ETIMEDOUT.
106507 +                *
106508 +                * * If there is a signal pending, something wants to kill our
106509 +                *   thread, return -ERESTARTSYS.
106510 +                *
106511 +                * * If there's no signal pending, it was a spurious wake
106512 +                *   (scheduler gave us a chance to do some work, even if we
106513 +                *   don't want to). We need to remove ourselves from the
106514 +                *   bucket and add again, to prevent losing wakeups in the
106515 +                *   meantime.
106516 +                */
106518 +               ret = futex_dequeue_multiple(futexv, nr_futexes);
106520 +               /* Normal wake */
106521 +               if (ret >= 0)
106522 +                       break;
106524 +               if (timo && !timeout.task) {
106525 +                       ret = -ETIMEDOUT;
106526 +                       break;
106527 +               }
106529 +               if (signal_pending(current)) {
106530 +                       ret = -ERESTARTSYS;
106531 +                       break;
106532 +               }
106534 +               /* Spurious wake, do everything again */
106535 +       }
106537 +       if (timo)
106538 +               hrtimer_cancel(&timeout.timer);
106540 +       return ret;
106544 + * sys_futex_wait - Wait on a futex address if (*uaddr) == val
106545 + * @uaddr: User address of futex
106546 + * @val:   Expected value of futex
106547 + * @flags: Specify the size of futex and the clockid
106548 + * @timo:  Optional absolute timeout.
106550 + * The user thread is put to sleep, waiting for a futex_wake() at uaddr, if the
106551 + * value at *uaddr is the same as val (otherwise, the syscall returns
106552 + * immediately with -EAGAIN).
106554 + * Returns 0 on success, error code otherwise.
106555 + */
106556 +SYSCALL_DEFINE4(futex_wait, void __user *, uaddr, unsigned int, val,
106557 +               unsigned int, flags, struct __kernel_timespec __user *, timo)
106559 +       bool shared = (flags & FUTEX_SHARED_FLAG) ? true : false;
106560 +       unsigned int size = flags & FUTEX_SIZE_MASK;
106561 +       struct futex_waiter *waiter;
106562 +       struct futex_waiter_head *futexv;
106564 +       /* Wrapper for a futexv_waiter_head with one element */
106565 +       struct {
106566 +               struct futex_waiter_head futexv;
106567 +               struct futex_waiter waiter;
106568 +       } __packed wait_single;
106570 +       if (flags & ~FUTEX2_MASK)
106571 +               return -EINVAL;
106573 +       if (size != FUTEX_32)
106574 +               return -EINVAL;
106576 +       futexv = &wait_single.futexv;
106577 +       futexv->task = current;
106578 +       futexv->hint = false;
106580 +       waiter = &wait_single.waiter;
106581 +       waiter->index = 0;
106582 +       waiter->val = val;
106583 +       waiter->uaddr = uaddr;
106584 +       memset(&wait_single.waiter.key, 0, sizeof(struct futex_key));
106586 +       INIT_LIST_HEAD(&waiter->list);
106588 +       /* Get an unlocked hash bucket */
106589 +       waiter->bucket = futex_get_bucket(uaddr, &waiter->key, shared);
106590 +       if (IS_ERR(waiter->bucket))
106591 +               return PTR_ERR(waiter->bucket);
106593 +       return __futex_waitv(futexv, 1, timo, flags);
106596 +#ifdef CONFIG_COMPAT
106598 + * compat_futex_parse_waitv - Parse a waitv array from userspace
106599 + * @futexv:    Kernel side list of waiters to be filled
106600 + * @uwaitv:     Userspace list to be parsed
106601 + * @nr_futexes: Length of futexv
106603 + * Return: Error code on failure, pointer to a prepared futexv otherwise
106604 + */
106605 +static int compat_futex_parse_waitv(struct futex_waiter_head *futexv,
106606 +                                   struct compat_futex_waitv __user *uwaitv,
106607 +                                   unsigned int nr_futexes)
106609 +       struct futex_bucket *bucket;
106610 +       struct compat_futex_waitv waitv;
106611 +       unsigned int i;
106613 +       for (i = 0; i < nr_futexes; i++) {
106614 +               if (copy_from_user(&waitv, &uwaitv[i], sizeof(waitv)))
106615 +                       return -EFAULT;
106617 +               if ((waitv.flags & ~FUTEXV_WAITER_MASK) ||
106618 +                   (waitv.flags & FUTEX_SIZE_MASK) != FUTEX_32)
106619 +                       return -EINVAL;
106621 +               futexv->objects[i].key.pointer = 0;
106622 +               futexv->objects[i].flags  = waitv.flags;
106623 +               futexv->objects[i].uaddr  = compat_ptr(waitv.uaddr);
106624 +               futexv->objects[i].val    = waitv.val;
106625 +               futexv->objects[i].index  = i;
106627 +               bucket = futex_get_bucket(compat_ptr(waitv.uaddr),
106628 +                                         &futexv->objects[i].key,
106629 +                                         is_object_shared);
106631 +               if (IS_ERR(bucket))
106632 +                       return PTR_ERR(bucket);
106634 +               futexv->objects[i].bucket = bucket;
106636 +               INIT_LIST_HEAD(&futexv->objects[i].list);
106637 +       }
106639 +       return 0;
106642 +COMPAT_SYSCALL_DEFINE4(futex_waitv, struct compat_futex_waitv __user *, waiters,
106643 +                      unsigned int, nr_futexes, unsigned int, flags,
106644 +                      struct __kernel_timespec __user *, timo)
106646 +       struct futex_waiter_head *futexv;
106647 +       int ret;
106649 +       if (flags & ~FUTEXV_MASK)
106650 +               return -EINVAL;
106652 +       if (!nr_futexes || nr_futexes > FUTEX_WAITV_MAX || !waiters)
106653 +               return -EINVAL;
106655 +       futexv = kmalloc((sizeof(struct futex_waiter) * nr_futexes) +
106656 +                        sizeof(*futexv), GFP_KERNEL);
106657 +       if (!futexv)
106658 +               return -ENOMEM;
106660 +       futexv->hint = false;
106661 +       futexv->task = current;
106663 +       ret = compat_futex_parse_waitv(futexv, waiters, nr_futexes);
106665 +       if (!ret)
106666 +               ret = __futex_waitv(futexv, nr_futexes, timo, flags);
106668 +       kfree(futexv);
106670 +       return ret;
106672 +#endif
106675 + * futex_parse_waitv - Parse a waitv array from userspace
106676 + * @futexv:    Kernel side list of waiters to be filled
106677 + * @uwaitv:     Userspace list to be parsed
106678 + * @nr_futexes: Length of futexv
106680 + * Return: Error code on failure, pointer to a prepared futexv otherwise
106681 + */
106682 +static int futex_parse_waitv(struct futex_waiter_head *futexv,
106683 +                            struct futex_waitv __user *uwaitv,
106684 +                            unsigned int nr_futexes)
106686 +       struct futex_bucket *bucket;
106687 +       struct futex_waitv waitv;
106688 +       unsigned int i;
106690 +       for (i = 0; i < nr_futexes; i++) {
106691 +               if (copy_from_user(&waitv, &uwaitv[i], sizeof(waitv)))
106692 +                       return -EFAULT;
106694 +               if ((waitv.flags & ~FUTEXV_WAITER_MASK) ||
106695 +                   (waitv.flags & FUTEX_SIZE_MASK) != FUTEX_32)
106696 +                       return -EINVAL;
106698 +               futexv->objects[i].key.pointer = 0;
106699 +               futexv->objects[i].flags  = waitv.flags;
106700 +               futexv->objects[i].uaddr  = waitv.uaddr;
106701 +               futexv->objects[i].val    = waitv.val;
106702 +               futexv->objects[i].index  = i;
106704 +               bucket = futex_get_bucket(waitv.uaddr, &futexv->objects[i].key,
106705 +                                         is_object_shared);
106707 +               if (IS_ERR(bucket))
106708 +                       return PTR_ERR(bucket);
106710 +               futexv->objects[i].bucket = bucket;
106712 +               INIT_LIST_HEAD(&futexv->objects[i].list);
106713 +       }
106715 +       return 0;
106719 + * sys_futex_waitv - Wait on a list of futexes
106720 + * @waiters:    List of futexes to wait on
106721 + * @nr_futexes: Length of futexv
106722 + * @flags:      Flag for timeout (monotonic/realtime)
106723 + * @timo:      Optional absolute timeout.
106725 + * Given an array of `struct futex_waitv`, wait on each uaddr. The thread wakes
106726 + * if a futex_wake() is performed at any uaddr. The syscall returns immediately
106727 + * if any waiter has *uaddr != val. *timo is an optional timeout value for the
106728 + * operation. Each waiter has individual flags. The `flags` argument for the
106729 + * syscall should be used solely for specifying the timeout as realtime, if
106730 + * needed. Flags for shared futexes, sizes, etc. should be used on the
106731 + * individual flags of each waiter.
106733 + * Returns the array index of one of the awaken futexes. There's no given
106734 + * information of how many were awakened, or any particular attribute of it (if
106735 + * it's the first awakened, if it is of the smaller index...).
106736 + */
106737 +SYSCALL_DEFINE4(futex_waitv, struct futex_waitv __user *, waiters,
106738 +               unsigned int, nr_futexes, unsigned int, flags,
106739 +               struct __kernel_timespec __user *, timo)
106741 +       struct futex_waiter_head *futexv;
106742 +       int ret;
106744 +       if (flags & ~FUTEXV_MASK)
106745 +               return -EINVAL;
106747 +       if (!nr_futexes || nr_futexes > FUTEX_WAITV_MAX || !waiters)
106748 +               return -EINVAL;
106750 +       futexv = kmalloc((sizeof(struct futex_waiter) * nr_futexes) +
106751 +                        sizeof(*futexv), GFP_KERNEL);
106752 +       if (!futexv)
106753 +               return -ENOMEM;
106755 +       futexv->hint = false;
106756 +       futexv->task = current;
106758 +#ifdef CONFIG_X86_X32_ABI
106759 +       if (in_x32_syscall()) {
106760 +               ret = compat_futex_parse_waitv(futexv, (struct compat_futex_waitv *)waiters,
106761 +                                              nr_futexes);
106762 +       } else
106763 +#endif
106764 +       {
106765 +               ret = futex_parse_waitv(futexv, waiters, nr_futexes);
106766 +       }
106768 +       if (!ret)
106769 +               ret = __futex_waitv(futexv, nr_futexes, timo, flags);
106771 +       kfree(futexv);
106773 +       return ret;
106777 + * futex_get_parent - For a given futex in a futexv list, get a pointer to the futexv
106778 + * @waiter: Address of futex in the list
106779 + * @index: Index of futex in the list
106781 + * Return: A pointer to its futexv struct
106782 + */
106783 +static inline struct futex_waiter_head *futex_get_parent(uintptr_t waiter,
106784 +                                                        unsigned int index)
106786 +       uintptr_t parent = waiter - sizeof(struct futex_waiter_head)
106787 +                          - (uintptr_t)(index * sizeof(struct futex_waiter));
106789 +       return (struct futex_waiter_head *)parent;
106793 + * futex_mark_wake - Find the task to be wake and add it in wake queue
106794 + * @waiter: Waiter to be wake
106795 + * @bucket: Bucket to be decremented
106796 + * @wake_q: Wake queue to insert the task
106797 + */
106798 +static void futex_mark_wake(struct futex_waiter *waiter,
106799 +                           struct futex_bucket *bucket,
106800 +                           struct wake_q_head *wake_q)
106802 +       struct task_struct *task;
106803 +       struct futex_waiter_head *parent = futex_get_parent((uintptr_t)waiter,
106804 +                                                           waiter->index);
106806 +       lockdep_assert_held(&bucket->lock);
106807 +       parent->hint = true;
106808 +       task = parent->task;
106809 +       get_task_struct(task);
106810 +       list_del_init(&waiter->list);
106811 +       wake_q_add_safe(wake_q, task);
106812 +       bucket_dec_waiters(bucket);
106815 +static inline bool futex_match(struct futex_key key1, struct futex_key key2)
106817 +       return (key1.index == key2.index &&
106818 +               key1.pointer == key2.pointer &&
106819 +               key1.offset == key2.offset);
106822 +long ksys_futex_wake(void __user *uaddr, unsigned long nr_wake,
106823 +                    unsigned int flags)
106825 +       bool shared = (flags & FUTEX_SHARED_FLAG) ? true : false;
106826 +       unsigned int size = flags & FUTEX_SIZE_MASK;
106827 +       struct futex_waiter waiter, *aux, *tmp;
106828 +       struct futex_bucket *bucket;
106829 +       DEFINE_WAKE_Q(wake_q);
106830 +       int ret = 0;
106832 +       if (flags & ~FUTEX2_MASK)
106833 +               return -EINVAL;
106835 +       if (size != FUTEX_32)
106836 +               return -EINVAL;
106838 +       bucket = futex_get_bucket(uaddr, &waiter.key, shared);
106839 +       if (IS_ERR(bucket))
106840 +               return PTR_ERR(bucket);
106842 +       if (!bucket_get_waiters(bucket) || !nr_wake)
106843 +               return 0;
106845 +       spin_lock(&bucket->lock);
106846 +       list_for_each_entry_safe(aux, tmp, &bucket->list, list) {
106847 +               if (futex_match(waiter.key, aux->key)) {
106848 +                       futex_mark_wake(aux, bucket, &wake_q);
106849 +                       if (++ret >= nr_wake)
106850 +                               break;
106851 +               }
106852 +       }
106853 +       spin_unlock(&bucket->lock);
106855 +       wake_up_q(&wake_q);
106857 +       return ret;
106861 + * sys_futex_wake - Wake a number of futexes waiting on an address
106862 + * @uaddr:   Address of futex to be woken up
106863 + * @nr_wake: Number of futexes waiting in uaddr to be woken up
106864 + * @flags:   Flags for size and shared
106866 + * Wake `nr_wake` threads waiting at uaddr.
106868 + * Returns the number of woken threads on success, error code otherwise.
106869 + */
106870 +SYSCALL_DEFINE3(futex_wake, void __user *, uaddr, unsigned int, nr_wake,
106871 +               unsigned int, flags)
106873 +       return ksys_futex_wake(uaddr, nr_wake, flags);
106876 +static void futex_double_unlock(struct futex_bucket *b1, struct futex_bucket *b2)
106878 +       spin_unlock(&b1->lock);
106879 +       if (b1 != b2)
106880 +               spin_unlock(&b2->lock);
106883 +static inline int __futex_requeue(struct futex_requeue rq1,
106884 +                                 struct futex_requeue rq2, unsigned int nr_wake,
106885 +                                 unsigned int nr_requeue, unsigned int cmpval,
106886 +                                 bool shared1, bool shared2)
106888 +       struct futex_waiter w1, w2, *aux, *tmp;
106889 +       bool retry = false;
106890 +       struct futex_bucket *b1, *b2;
106891 +       DEFINE_WAKE_Q(wake_q);
106892 +       u32 uval;
106893 +       int ret;
106895 +       b1 = futex_get_bucket(rq1.uaddr, &w1.key, shared1);
106896 +       if (IS_ERR(b1))
106897 +               return PTR_ERR(b1);
106899 +       b2 = futex_get_bucket(rq2.uaddr, &w2.key, shared2);
106900 +       if (IS_ERR(b2))
106901 +               return PTR_ERR(b2);
106903 +retry:
106904 +       if (shared1 && retry) {
106905 +               b1 = futex_get_bucket(rq1.uaddr, &w1.key, shared1);
106906 +               if (IS_ERR(b1))
106907 +                       return PTR_ERR(b1);
106908 +       }
106910 +       if (shared2 && retry) {
106911 +               b2 = futex_get_bucket(rq2.uaddr, &w2.key, shared2);
106912 +               if (IS_ERR(b2))
106913 +                       return PTR_ERR(b2);
106914 +       }
106916 +       bucket_inc_waiters(b2);
106917 +       /*
106918 +        * To ensure the locks are taken in the same order for all threads (and
106919 +        * thus avoiding deadlocks), take the "smaller" one first
106920 +        */
106921 +       if (b1 <= b2) {
106922 +               spin_lock(&b1->lock);
106923 +               if (b1 < b2)
106924 +                       spin_lock_nested(&b2->lock, SINGLE_DEPTH_NESTING);
106925 +       } else {
106926 +               spin_lock(&b2->lock);
106927 +               spin_lock_nested(&b1->lock, SINGLE_DEPTH_NESTING);
106928 +       }
106930 +       ret = futex_get_user(&uval, rq1.uaddr);
106932 +       if (unlikely(ret)) {
106933 +               futex_double_unlock(b1, b2);
106934 +               if (__get_user(uval, (u32 __user *)rq1.uaddr))
106935 +                       return -EFAULT;
106937 +               bucket_dec_waiters(b2);
106938 +               retry = true;
106939 +               goto retry;
106940 +       }
106942 +       if (uval != cmpval) {
106943 +               futex_double_unlock(b1, b2);
106945 +               bucket_dec_waiters(b2);
106946 +               return -EAGAIN;
106947 +       }
106949 +       list_for_each_entry_safe(aux, tmp, &b1->list, list) {
106950 +               if (futex_match(w1.key, aux->key)) {
106951 +                       if (ret < nr_wake) {
106952 +                               futex_mark_wake(aux, b1, &wake_q);
106953 +                               ret++;
106954 +                               continue;
106955 +                       }
106957 +                       if (ret >= nr_wake + nr_requeue)
106958 +                               break;
106960 +                       aux->key.pointer = w2.key.pointer;
106961 +                       aux->key.index = w2.key.index;
106962 +                       aux->key.offset = w2.key.offset;
106964 +                       if (b1 != b2) {
106965 +                               list_del_init(&aux->list);
106966 +                               bucket_dec_waiters(b1);
106968 +                               list_add_tail(&aux->list, &b2->list);
106969 +                               bucket_inc_waiters(b2);
106970 +                       }
106971 +                       ret++;
106972 +               }
106973 +       }
106975 +       futex_double_unlock(b1, b2);
106976 +       wake_up_q(&wake_q);
106977 +       bucket_dec_waiters(b2);
106979 +       return ret;
106982 +#ifdef CONFIG_COMPAT
106983 +static int compat_futex_parse_requeue(struct futex_requeue *rq,
106984 +                                     struct compat_futex_requeue __user *uaddr,
106985 +                                     bool *shared)
106987 +       struct compat_futex_requeue tmp;
106989 +       if (copy_from_user(&tmp, uaddr, sizeof(tmp)))
106990 +               return -EFAULT;
106992 +       if (tmp.flags & ~FUTEXV_WAITER_MASK ||
106993 +           (tmp.flags & FUTEX_SIZE_MASK) != FUTEX_32)
106994 +               return -EINVAL;
106996 +       *shared = (tmp.flags & FUTEX_SHARED_FLAG) ? true : false;
106998 +       rq->uaddr = compat_ptr(tmp.uaddr);
106999 +       rq->flags = tmp.flags;
107001 +       return 0;
107004 +COMPAT_SYSCALL_DEFINE6(futex_requeue, struct compat_futex_requeue __user *, uaddr1,
107005 +                      struct compat_futex_requeue __user *, uaddr2,
107006 +                      unsigned int, nr_wake, unsigned int, nr_requeue,
107007 +                      unsigned int, cmpval, unsigned int, flags)
107009 +       struct futex_requeue rq1, rq2;
107010 +       bool shared1, shared2;
107011 +       int ret;
107013 +       if (flags)
107014 +               return -EINVAL;
107016 +       ret = compat_futex_parse_requeue(&rq1, uaddr1, &shared1);
107017 +       if (ret)
107018 +               return ret;
107020 +       ret = compat_futex_parse_requeue(&rq2, uaddr2, &shared2);
107021 +       if (ret)
107022 +               return ret;
107024 +       return __futex_requeue(rq1, rq2, nr_wake, nr_requeue, cmpval, shared1, shared2);
107026 +#endif
107029 + * futex_parse_requeue - Copy a user struct futex_requeue and check it's flags
107030 + * @rq:    Kernel struct
107031 + * @uaddr: Address of user struct
107032 + * @shared: Out parameter, defines if this is a shared futex
107034 + * Return: 0 on success, error code otherwise
107035 + */
107036 +static int futex_parse_requeue(struct futex_requeue *rq,
107037 +                              struct futex_requeue __user *uaddr, bool *shared)
107039 +       if (copy_from_user(rq, uaddr, sizeof(*rq)))
107040 +               return -EFAULT;
107042 +       if (rq->flags & ~FUTEXV_WAITER_MASK ||
107043 +           (rq->flags & FUTEX_SIZE_MASK) != FUTEX_32)
107044 +               return -EINVAL;
107046 +       *shared = (rq->flags & FUTEX_SHARED_FLAG) ? true : false;
107048 +       return 0;
107052 + * sys_futex_requeue - Wake futexes at uaddr1 and requeue from uaddr1 to uaddr2
107053 + * @uaddr1:    Address of futexes to be waken/dequeued
107054 + * @uaddr2:    Address for the futexes to be enqueued
107055 + * @nr_wake:   Number of futexes waiting in uaddr1 to be woken up
107056 + * @nr_requeue: Number of futexes to be requeued from uaddr1 to uaddr2
107057 + * @cmpval:    Expected value at uaddr1
107058 + * @flags:     Reserved flags arg for requeue operation expansion. Must be 0.
107060 + * If (uaddr1->uaddr == cmpval), wake at uaddr1->uaddr a nr_wake number of
107061 + * waiters and then, remove a number of nr_requeue waiters at uaddr1->uaddr
107062 + * and add then to uaddr2->uaddr list. Each uaddr has its own set of flags,
107063 + * that must be defined at struct futex_requeue (such as size, shared, NUMA).
107065 + * Return the number of the woken futexes + the number of requeued ones on
107066 + * success, error code otherwise.
107067 + */
107068 +SYSCALL_DEFINE6(futex_requeue, struct futex_requeue __user *, uaddr1,
107069 +               struct futex_requeue __user *, uaddr2,
107070 +               unsigned int, nr_wake, unsigned int, nr_requeue,
107071 +               unsigned int, cmpval, unsigned int, flags)
107073 +       struct futex_requeue rq1, rq2;
107074 +       bool shared1, shared2;
107075 +       int ret;
107077 +       if (flags)
107078 +               return -EINVAL;
107080 +#ifdef CONFIG_X86_X32_ABI
107081 +       if (in_x32_syscall()) {
107082 +               ret = compat_futex_parse_requeue(&rq1, (struct compat_futex_requeue *)uaddr1,
107083 +                                                &shared1);
107084 +               if (ret)
107085 +                       return ret;
107087 +               ret = compat_futex_parse_requeue(&rq2, (struct compat_futex_requeue *)uaddr2,
107088 +                                                &shared2);
107089 +               if (ret)
107090 +                       return ret;
107091 +       } else
107092 +#endif
107093 +       {
107094 +               ret = futex_parse_requeue(&rq1, uaddr1, &shared1);
107095 +               if (ret)
107096 +                       return ret;
107098 +               ret = futex_parse_requeue(&rq2, uaddr2, &shared2);
107099 +               if (ret)
107100 +                       return ret;
107101 +       }
107103 +       return __futex_requeue(rq1, rq2, nr_wake, nr_requeue, cmpval, shared1, shared2);
107106 +static int __init futex2_init(void)
107108 +       int i;
107109 +       unsigned int futex_shift;
107111 +#if CONFIG_BASE_SMALL
107112 +       futex2_hashsize = 16;
107113 +#else
107114 +       futex2_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
107115 +#endif
107117 +       futex_table = alloc_large_system_hash("futex2", sizeof(struct futex_bucket),
107118 +                                             futex2_hashsize, 0,
107119 +                                             futex2_hashsize < 256 ? HASH_SMALL : 0,
107120 +                                             &futex_shift, NULL,
107121 +                                             futex2_hashsize, futex2_hashsize);
107122 +       futex2_hashsize = 1UL << futex_shift;
107124 +       BUG_ON(!is_power_of_2(futex2_hashsize));
107126 +       for (i = 0; i < futex2_hashsize; i++) {
107127 +               INIT_LIST_HEAD(&futex_table[i].list);
107128 +               spin_lock_init(&futex_table[i].lock);
107129 +               atomic_set(&futex_table[i].waiters, 0);
107130 +       }
107132 +       return 0;
107134 +core_initcall(futex2_init);
107135 diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
107136 index 651a4ad6d711..8e586858bcf4 100644
107137 --- a/kernel/irq/matrix.c
107138 +++ b/kernel/irq/matrix.c
107139 @@ -423,7 +423,9 @@ void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
107140         if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
107141                 return;
107143 -       clear_bit(bit, cm->alloc_map);
107144 +       if (WARN_ON_ONCE(!test_and_clear_bit(bit, cm->alloc_map)))
107145 +               return;
107147         cm->allocated--;
107148         if(managed)
107149                 cm->managed_allocated--;
107150 diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
107151 index 3bf98db9c702..23e7acb5c667 100644
107152 --- a/kernel/kcsan/core.c
107153 +++ b/kernel/kcsan/core.c
107154 @@ -639,8 +639,6 @@ void __init kcsan_init(void)
107156         BUG_ON(!in_task());
107158 -       kcsan_debugfs_init();
107160         for_each_possible_cpu(cpu)
107161                 per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
107163 diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
107164 index 3c8093a371b1..62a52be8f6ba 100644
107165 --- a/kernel/kcsan/debugfs.c
107166 +++ b/kernel/kcsan/debugfs.c
107167 @@ -261,7 +261,10 @@ static const struct file_operations debugfs_ops =
107168         .release = single_release
107171 -void __init kcsan_debugfs_init(void)
107172 +static int __init kcsan_debugfs_init(void)
107174         debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops);
107175 +       return 0;
107178 +late_initcall(kcsan_debugfs_init);
107179 diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
107180 index 8d4bf3431b3c..87ccdb3b051f 100644
107181 --- a/kernel/kcsan/kcsan.h
107182 +++ b/kernel/kcsan/kcsan.h
107183 @@ -30,11 +30,6 @@ extern bool kcsan_enabled;
107184  void kcsan_save_irqtrace(struct task_struct *task);
107185  void kcsan_restore_irqtrace(struct task_struct *task);
107188 - * Initialize debugfs file.
107189 - */
107190 -void kcsan_debugfs_init(void);
107193   * Statistics counters displayed via debugfs; should only be modified in
107194   * slow-paths.
107195 diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
107196 index 5c3447cf7ad5..33400ff051a8 100644
107197 --- a/kernel/kexec_file.c
107198 +++ b/kernel/kexec_file.c
107199 @@ -740,8 +740,10 @@ static int kexec_calculate_store_digests(struct kimage *image)
107201         sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region);
107202         sha_regions = vzalloc(sha_region_sz);
107203 -       if (!sha_regions)
107204 +       if (!sha_regions) {
107205 +               ret = -ENOMEM;
107206                 goto out_free_desc;
107207 +       }
107209         desc->tfm   = tfm;
107211 diff --git a/kernel/kthread.c b/kernel/kthread.c
107212 index 1578973c5740..3b8dfbc24a22 100644
107213 --- a/kernel/kthread.c
107214 +++ b/kernel/kthread.c
107215 @@ -84,6 +84,25 @@ static inline struct kthread *to_kthread(struct task_struct *k)
107216         return (__force void *)k->set_child_tid;
107220 + * Variant of to_kthread() that doesn't assume @p is a kthread.
107222 + * Per construction; when:
107224 + *   (p->flags & PF_KTHREAD) && p->set_child_tid
107226 + * the task is both a kthread and struct kthread is persistent. However
107227 + * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
107228 + * begin_new_exec()).
107229 + */
107230 +static inline struct kthread *__to_kthread(struct task_struct *p)
107232 +       void *kthread = (__force void *)p->set_child_tid;
107233 +       if (kthread && !(p->flags & PF_KTHREAD))
107234 +               kthread = NULL;
107235 +       return kthread;
107238  void free_kthread_struct(struct task_struct *k)
107240         struct kthread *kthread;
107241 @@ -168,8 +187,9 @@ EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
107242   */
107243  void *kthread_func(struct task_struct *task)
107245 -       if (task->flags & PF_KTHREAD)
107246 -               return to_kthread(task)->threadfn;
107247 +       struct kthread *kthread = __to_kthread(task);
107248 +       if (kthread)
107249 +               return kthread->threadfn;
107250         return NULL;
107252  EXPORT_SYMBOL_GPL(kthread_func);
107253 @@ -199,10 +219,11 @@ EXPORT_SYMBOL_GPL(kthread_data);
107254   */
107255  void *kthread_probe_data(struct task_struct *task)
107257 -       struct kthread *kthread = to_kthread(task);
107258 +       struct kthread *kthread = __to_kthread(task);
107259         void *data = NULL;
107261 -       copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
107262 +       if (kthread)
107263 +               copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
107264         return data;
107267 @@ -514,9 +535,9 @@ void kthread_set_per_cpu(struct task_struct *k, int cpu)
107268         set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
107271 -bool kthread_is_per_cpu(struct task_struct *k)
107272 +bool kthread_is_per_cpu(struct task_struct *p)
107274 -       struct kthread *kthread = to_kthread(k);
107275 +       struct kthread *kthread = __to_kthread(p);
107276         if (!kthread)
107277                 return false;
107279 @@ -1303,6 +1324,7 @@ void kthread_use_mm(struct mm_struct *mm)
107280         tsk->mm = mm;
107281         membarrier_update_current_mm(mm);
107282         switch_mm_irqs_off(active_mm, mm, tsk);
107283 +       lru_gen_switch_mm(active_mm, mm);
107284         local_irq_enable();
107285         task_unlock(tsk);
107286  #ifdef finish_arch_post_lock_switch
107287 diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
107288 index f160f1c97ca1..f39c383c7180 100644
107289 --- a/kernel/locking/lockdep.c
107290 +++ b/kernel/locking/lockdep.c
107291 @@ -5731,7 +5731,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
107293         unsigned long flags;
107295 -       trace_lock_acquired(lock, ip);
107296 +       trace_lock_contended(lock, ip);
107298         if (unlikely(!lock_stat || !lockdep_enabled()))
107299                 return;
107300 @@ -5749,7 +5749,7 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
107302         unsigned long flags;
107304 -       trace_lock_contended(lock, ip);
107305 +       trace_lock_acquired(lock, ip);
107307         if (unlikely(!lock_stat || !lockdep_enabled()))
107308                 return;
107309 diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
107310 index a7276aaf2abc..db9301591e3f 100644
107311 --- a/kernel/locking/mutex-debug.c
107312 +++ b/kernel/locking/mutex-debug.c
107313 @@ -57,7 +57,7 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
107314         task->blocked_on = waiter;
107317 -void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
107318 +void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
107319                          struct task_struct *task)
107321         DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
107322 @@ -65,7 +65,7 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
107323         DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
107324         task->blocked_on = NULL;
107326 -       list_del_init(&waiter->list);
107327 +       INIT_LIST_HEAD(&waiter->list);
107328         waiter->task = NULL;
107331 diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
107332 index 1edd3f45a4ec..53e631e1d76d 100644
107333 --- a/kernel/locking/mutex-debug.h
107334 +++ b/kernel/locking/mutex-debug.h
107335 @@ -22,7 +22,7 @@ extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
107336  extern void debug_mutex_add_waiter(struct mutex *lock,
107337                                    struct mutex_waiter *waiter,
107338                                    struct task_struct *task);
107339 -extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
107340 +extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
107341                                 struct task_struct *task);
107342  extern void debug_mutex_unlock(struct mutex *lock);
107343  extern void debug_mutex_init(struct mutex *lock, const char *name,
107344 diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
107345 index 622ebdfcd083..3899157c13b1 100644
107346 --- a/kernel/locking/mutex.c
107347 +++ b/kernel/locking/mutex.c
107348 @@ -194,7 +194,7 @@ static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_wait
107349   * Add @waiter to a given location in the lock wait_list and set the
107350   * FLAG_WAITERS flag if it's the first waiter.
107351   */
107352 -static void __sched
107353 +static void
107354  __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
107355                    struct list_head *list)
107357 @@ -205,6 +205,16 @@ __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
107358                 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
107361 +static void
107362 +__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
107364 +       list_del(&waiter->list);
107365 +       if (likely(list_empty(&lock->wait_list)))
107366 +               __mutex_clear_flag(lock, MUTEX_FLAGS);
107368 +       debug_mutex_remove_waiter(lock, waiter, current);
107372   * Give up ownership to a specific task, when @task = NULL, this is equivalent
107373   * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
107374 @@ -1061,9 +1071,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
107375                         __ww_mutex_check_waiters(lock, ww_ctx);
107376         }
107378 -       mutex_remove_waiter(lock, &waiter, current);
107379 -       if (likely(list_empty(&lock->wait_list)))
107380 -               __mutex_clear_flag(lock, MUTEX_FLAGS);
107381 +       __mutex_remove_waiter(lock, &waiter);
107383         debug_mutex_free_waiter(&waiter);
107385 @@ -1080,7 +1088,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
107387  err:
107388         __set_current_state(TASK_RUNNING);
107389 -       mutex_remove_waiter(lock, &waiter, current);
107390 +       __mutex_remove_waiter(lock, &waiter);
107391  err_early_kill:
107392         spin_unlock(&lock->wait_lock);
107393         debug_mutex_free_waiter(&waiter);
107394 diff --git a/kernel/locking/mutex.h b/kernel/locking/mutex.h
107395 index 1c2287d3fa71..f0c710b1d192 100644
107396 --- a/kernel/locking/mutex.h
107397 +++ b/kernel/locking/mutex.h
107398 @@ -10,12 +10,10 @@
107399   * !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
107400   */
107402 -#define mutex_remove_waiter(lock, waiter, task) \
107403 -               __list_del((waiter)->list.prev, (waiter)->list.next)
107405  #define debug_mutex_wake_waiter(lock, waiter)          do { } while (0)
107406  #define debug_mutex_free_waiter(waiter)                        do { } while (0)
107407  #define debug_mutex_add_waiter(lock, waiter, ti)       do { } while (0)
107408 +#define debug_mutex_remove_waiter(lock, waiter, ti)     do { } while (0)
107409  #define debug_mutex_unlock(lock)                       do { } while (0)
107410  #define debug_mutex_init(lock, name, key)              do { } while (0)
107412 diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
107413 index abba5df50006..b9fab2d55b93 100644
107414 --- a/kernel/locking/rwsem.c
107415 +++ b/kernel/locking/rwsem.c
107416 @@ -668,6 +668,7 @@ rwsem_spin_on_owner(struct rw_semaphore *sem)
107417         struct task_struct *new, *owner;
107418         unsigned long flags, new_flags;
107419         enum owner_state state;
107420 +       int i = 0;
107422         owner = rwsem_owner_flags(sem, &flags);
107423         state = rwsem_owner_state(owner, flags);
107424 @@ -701,7 +702,8 @@ rwsem_spin_on_owner(struct rw_semaphore *sem)
107425                         break;
107426                 }
107428 -               cpu_relax();
107429 +               if (i++ > 1000)
107430 +                       cpu_relax();
107431         }
107432         rcu_read_unlock();
107434 diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
107435 index 575a34b88936..77ae2704e979 100644
107436 --- a/kernel/printk/printk.c
107437 +++ b/kernel/printk/printk.c
107438 @@ -1494,6 +1494,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
107439         struct printk_info info;
107440         unsigned int line_count;
107441         struct printk_record r;
107442 +       u64 max_seq;
107443         char *text;
107444         int len = 0;
107445         u64 seq;
107446 @@ -1512,9 +1513,15 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
107447         prb_for_each_info(clear_seq, prb, seq, &info, &line_count)
107448                 len += get_record_print_text_size(&info, line_count, true, time);
107450 +       /*
107451 +        * Set an upper bound for the next loop to avoid subtracting lengths
107452 +        * that were never added.
107453 +        */
107454 +       max_seq = seq;
107456         /* move first record forward until length fits into the buffer */
107457         prb_for_each_info(clear_seq, prb, seq, &info, &line_count) {
107458 -               if (len <= size)
107459 +               if (len <= size || info.seq >= max_seq)
107460                         break;
107461                 len -= get_record_print_text_size(&info, line_count, true, time);
107462         }
107463 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
107464 index 61db50f7ca86..5f50fdd1d855 100644
107465 --- a/kernel/ptrace.c
107466 +++ b/kernel/ptrace.c
107467 @@ -169,6 +169,21 @@ void __ptrace_unlink(struct task_struct *child)
107468         spin_unlock(&child->sighand->siglock);
107471 +static bool looks_like_a_spurious_pid(struct task_struct *task)
107473 +       if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP))
107474 +               return false;
107476 +       if (task_pid_vnr(task) == task->ptrace_message)
107477 +               return false;
107478 +       /*
107479 +        * The tracee changed its pid but the PTRACE_EVENT_EXEC event
107480 +        * was not wait()'ed, most probably debugger targets the old
107481 +        * leader which was destroyed in de_thread().
107482 +        */
107483 +       return true;
107486  /* Ensure that nothing can wake it up, even SIGKILL */
107487  static bool ptrace_freeze_traced(struct task_struct *task)
107489 @@ -179,7 +194,8 @@ static bool ptrace_freeze_traced(struct task_struct *task)
107490                 return ret;
107492         spin_lock_irq(&task->sighand->siglock);
107493 -       if (task_is_traced(task) && !__fatal_signal_pending(task)) {
107494 +       if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
107495 +           !__fatal_signal_pending(task)) {
107496                 task->state = __TASK_TRACED;
107497                 ret = true;
107498         }
107499 diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig
107500 index 3128b7cf8e1f..abfae9afbdc8 100644
107501 --- a/kernel/rcu/Kconfig
107502 +++ b/kernel/rcu/Kconfig
107503 @@ -189,8 +189,8 @@ config RCU_FAST_NO_HZ
107505  config RCU_BOOST
107506         bool "Enable RCU priority boosting"
107507 -       depends on (RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT) || PREEMPT_RT
107508 -       default y if PREEMPT_RT
107509 +       depends on (RT_MUTEXES && PREEMPT_RCU) || PREEMPT_RT
107510 +       default y
107511         help
107512           This option boosts the priority of preempted RCU readers that
107513           block the current preemptible RCU grace period for too long.
107514 @@ -204,7 +204,7 @@ config RCU_BOOST_DELAY
107515         int "Milliseconds to delay boosting after RCU grace-period start"
107516         range 0 3000
107517         depends on RCU_BOOST
107518 -       default 500
107519 +       default 0
107520         help
107521           This option specifies the time to wait after the beginning of
107522           a given grace period before priority-boosting preempted RCU
107523 diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
107524 index da6f5213fb74..7356764e49a0 100644
107525 --- a/kernel/rcu/tree.c
107526 +++ b/kernel/rcu/tree.c
107527 @@ -1077,7 +1077,6 @@ noinstr void rcu_nmi_enter(void)
107528         } else if (!in_nmi()) {
107529                 instrumentation_begin();
107530                 rcu_irq_enter_check_tick();
107531 -               instrumentation_end();
107532         } else  {
107533                 instrumentation_begin();
107534         }
107535 @@ -3464,7 +3463,7 @@ static void fill_page_cache_func(struct work_struct *work)
107537         for (i = 0; i < rcu_min_cached_objs; i++) {
107538                 bnode = (struct kvfree_rcu_bulk_data *)
107539 -                       __get_free_page(GFP_KERNEL | __GFP_NOWARN);
107540 +                       __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
107542                 if (bnode) {
107543                         raw_spin_lock_irqsave(&krcp->lock, flags);
107544 diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
107545 index 2d603771c7dc..0796a75b6e0e 100644
107546 --- a/kernel/rcu/tree_plugin.h
107547 +++ b/kernel/rcu/tree_plugin.h
107548 @@ -1646,7 +1646,11 @@ static bool wake_nocb_gp(struct rcu_data *rdp, bool force,
107549                 rcu_nocb_unlock_irqrestore(rdp, flags);
107550                 return false;
107551         }
107552 -       del_timer(&rdp->nocb_timer);
107554 +       if (READ_ONCE(rdp->nocb_defer_wakeup) > RCU_NOCB_WAKE_NOT) {
107555 +               WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
107556 +               del_timer(&rdp->nocb_timer);
107557 +       }
107558         rcu_nocb_unlock_irqrestore(rdp, flags);
107559         raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
107560         if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
107561 @@ -2265,7 +2269,6 @@ static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
107562                 return false;
107563         }
107564         ndw = READ_ONCE(rdp->nocb_defer_wakeup);
107565 -       WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
107566         ret = wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
107567         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
107569 diff --git a/kernel/resource.c b/kernel/resource.c
107570 index 627e61b0c124..16e0c7e8ed24 100644
107571 --- a/kernel/resource.c
107572 +++ b/kernel/resource.c
107573 @@ -457,7 +457,7 @@ int walk_system_ram_res(u64 start, u64 end, void *arg,
107575         unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
107577 -       return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
107578 +       return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, false,
107579                                      arg, func);
107582 @@ -470,7 +470,7 @@ int walk_mem_res(u64 start, u64 end, void *arg,
107584         unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
107586 -       return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
107587 +       return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, false,
107588                                      arg, func);
107591 diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c
107592 index 2067080bb235..573b313efe4c 100644
107593 --- a/kernel/sched/autogroup.c
107594 +++ b/kernel/sched/autogroup.c
107595 @@ -5,7 +5,8 @@
107596  #include <linux/nospec.h>
107597  #include "sched.h"
107599 -unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
107600 +unsigned int __read_mostly sysctl_sched_autogroup_enabled =
107601 +               IS_ENABLED(CONFIG_SCHED_AUTOGROUP_DEFAULT_ENABLED) ? 1 : 0;
107602  static struct autogroup autogroup_default;
107603  static atomic_t autogroup_seq_nr;
107605 @@ -197,11 +198,12 @@ void sched_autogroup_exit(struct signal_struct *sig)
107607  static int __init setup_autogroup(char *str)
107609 -       sysctl_sched_autogroup_enabled = 0;
107611 +       unsigned long enabled;
107612 +       if (!kstrtoul(str, 0, &enabled))
107613 +               sysctl_sched_autogroup_enabled = enabled ? 1 : 0;
107614         return 1;
107616 -__setup("noautogroup", setup_autogroup);
107617 +__setup("autogroup=", setup_autogroup);
107619  #ifdef CONFIG_PROC_FS
107621 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
107622 index 98191218d891..cd3bb52378c0 100644
107623 --- a/kernel/sched/core.c
107624 +++ b/kernel/sched/core.c
107625 @@ -928,7 +928,7 @@ DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
107627  static inline unsigned int uclamp_bucket_id(unsigned int clamp_value)
107629 -       return clamp_value / UCLAMP_BUCKET_DELTA;
107630 +       return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1);
107633  static inline unsigned int uclamp_none(enum uclamp_id clamp_id)
107634 @@ -4306,6 +4306,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
107635                  * finish_task_switch()'s mmdrop().
107636                  */
107637                 switch_mm_irqs_off(prev->active_mm, next->mm, next);
107638 +               lru_gen_switch_mm(prev->active_mm, next->mm);
107640                 if (!prev->mm) {                        // from kernel
107641                         /* will mmdrop() in finish_task_switch(). */
107642 @@ -5765,6 +5766,7 @@ int can_nice(const struct task_struct *p, const int nice)
107643         return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
107644                 capable(CAP_SYS_NICE));
107646 +EXPORT_SYMBOL(can_nice);
107648  #ifdef __ARCH_WANT_SYS_NICE
107650 @@ -7597,6 +7599,7 @@ void idle_task_exit(void)
107652         if (mm != &init_mm) {
107653                 switch_mm(mm, &init_mm, current);
107654 +               lru_gen_switch_mm(mm, &init_mm);
107655                 finish_arch_post_lock_switch();
107656         }
107658 @@ -7652,7 +7655,7 @@ static void balance_push(struct rq *rq)
107659          * histerical raisins.
107660          */
107661         if (rq->idle == push_task ||
107662 -           ((push_task->flags & PF_KTHREAD) && kthread_is_per_cpu(push_task)) ||
107663 +           kthread_is_per_cpu(push_task) ||
107664             is_migration_disabled(push_task)) {
107666                 /*
107667 diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
107668 index 486f403a778b..9c8b3ed2199a 100644
107669 --- a/kernel/sched/debug.c
107670 +++ b/kernel/sched/debug.c
107671 @@ -8,8 +8,6 @@
107672   */
107673  #include "sched.h"
107675 -static DEFINE_SPINLOCK(sched_debug_lock);
107678   * This allows printing both to /proc/sched_debug and
107679   * to the console
107680 @@ -470,16 +468,37 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
107681  #endif
107683  #ifdef CONFIG_CGROUP_SCHED
107684 +static DEFINE_SPINLOCK(sched_debug_lock);
107685  static char group_path[PATH_MAX];
107687 -static char *task_group_path(struct task_group *tg)
107688 +static void task_group_path(struct task_group *tg, char *path, int plen)
107690 -       if (autogroup_path(tg, group_path, PATH_MAX))
107691 -               return group_path;
107692 +       if (autogroup_path(tg, path, plen))
107693 +               return;
107695 -       cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
107696 +       cgroup_path(tg->css.cgroup, path, plen);
107699 -       return group_path;
107701 + * Only 1 SEQ_printf_task_group_path() caller can use the full length
107702 + * group_path[] for cgroup path. Other simultaneous callers will have
107703 + * to use a shorter stack buffer. A "..." suffix is appended at the end
107704 + * of the stack buffer so that it will show up in case the output length
107705 + * matches the given buffer size to indicate possible path name truncation.
107706 + */
107707 +#define SEQ_printf_task_group_path(m, tg, fmt...)                      \
107708 +{                                                                      \
107709 +       if (spin_trylock(&sched_debug_lock)) {                          \
107710 +               task_group_path(tg, group_path, sizeof(group_path));    \
107711 +               SEQ_printf(m, fmt, group_path);                         \
107712 +               spin_unlock(&sched_debug_lock);                         \
107713 +       } else {                                                        \
107714 +               char buf[128];                                          \
107715 +               char *bufend = buf + sizeof(buf) - 3;                   \
107716 +               task_group_path(tg, buf, bufend - buf);                 \
107717 +               strcpy(bufend - 1, "...");                              \
107718 +               SEQ_printf(m, fmt, buf);                                \
107719 +       }                                                               \
107721  #endif
107723 @@ -506,7 +525,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
107724         SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
107725  #endif
107726  #ifdef CONFIG_CGROUP_SCHED
107727 -       SEQ_printf(m, " %s", task_group_path(task_group(p)));
107728 +       SEQ_printf_task_group_path(m, task_group(p), " %s")
107729  #endif
107731         SEQ_printf(m, "\n");
107732 @@ -543,7 +562,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
107734  #ifdef CONFIG_FAIR_GROUP_SCHED
107735         SEQ_printf(m, "\n");
107736 -       SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
107737 +       SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
107738  #else
107739         SEQ_printf(m, "\n");
107740         SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
107741 @@ -614,7 +633,7 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
107743  #ifdef CONFIG_RT_GROUP_SCHED
107744         SEQ_printf(m, "\n");
107745 -       SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
107746 +       SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
107747  #else
107748         SEQ_printf(m, "\n");
107749         SEQ_printf(m, "rt_rq[%d]:\n", cpu);
107750 @@ -666,7 +685,6 @@ void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
107751  static void print_cpu(struct seq_file *m, int cpu)
107753         struct rq *rq = cpu_rq(cpu);
107754 -       unsigned long flags;
107756  #ifdef CONFIG_X86
107757         {
107758 @@ -717,13 +735,11 @@ do {                                                                      \
107759         }
107760  #undef P
107762 -       spin_lock_irqsave(&sched_debug_lock, flags);
107763         print_cfs_stats(m, cpu);
107764         print_rt_stats(m, cpu);
107765         print_dl_stats(m, cpu);
107767         print_rq(m, rq, cpu);
107768 -       spin_unlock_irqrestore(&sched_debug_lock, flags);
107769         SEQ_printf(m, "\n");
107772 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
107773 index 794c2cb945f8..883b6fe91ca5 100644
107774 --- a/kernel/sched/fair.c
107775 +++ b/kernel/sched/fair.c
107776 @@ -682,7 +682,13 @@ static u64 __sched_period(unsigned long nr_running)
107777   */
107778  static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
107780 -       u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
107781 +       unsigned int nr_running = cfs_rq->nr_running;
107782 +       u64 slice;
107784 +       if (sched_feat(ALT_PERIOD))
107785 +               nr_running = rq_of(cfs_rq)->cfs.h_nr_running;
107787 +       slice = __sched_period(nr_running + !se->on_rq);
107789         for_each_sched_entity(se) {
107790                 struct load_weight *load;
107791 @@ -699,6 +705,10 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
107792                 }
107793                 slice = __calc_delta(slice, se->load.weight, load);
107794         }
107796 +       if (sched_feat(BASE_SLICE))
107797 +               slice = max(slice, (u64)sysctl_sched_min_granularity);
107799         return slice;
107802 @@ -3941,6 +3951,8 @@ static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
107803         trace_sched_util_est_cfs_tp(cfs_rq);
107806 +#define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100)
107809   * Check if a (signed) value is within a specified (unsigned) margin,
107810   * based on the observation that:
107811 @@ -3958,7 +3970,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
107812                                    struct task_struct *p,
107813                                    bool task_sleep)
107815 -       long last_ewma_diff;
107816 +       long last_ewma_diff, last_enqueued_diff;
107817         struct util_est ue;
107819         if (!sched_feat(UTIL_EST))
107820 @@ -3979,6 +3991,8 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
107821         if (ue.enqueued & UTIL_AVG_UNCHANGED)
107822                 return;
107824 +       last_enqueued_diff = ue.enqueued;
107826         /*
107827          * Reset EWMA on utilization increases, the moving average is used only
107828          * to smooth utilization decreases.
107829 @@ -3992,12 +4006,17 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
107830         }
107832         /*
107833 -        * Skip update of task's estimated utilization when its EWMA is
107834 +        * Skip update of task's estimated utilization when its members are
107835          * already ~1% close to its last activation value.
107836          */
107837         last_ewma_diff = ue.enqueued - ue.ewma;
107838 -       if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100)))
107839 +       last_enqueued_diff -= ue.enqueued;
107840 +       if (within_margin(last_ewma_diff, UTIL_EST_MARGIN)) {
107841 +               if (!within_margin(last_enqueued_diff, UTIL_EST_MARGIN))
107842 +                       goto done;
107844                 return;
107845 +       }
107847         /*
107848          * To avoid overestimation of actual task utilization, skip updates if
107849 @@ -6098,6 +6117,24 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
107850         return -1;
107854 + * Scan the local SMT mask for idle CPUs.
107855 + */
107856 +static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
107858 +       int cpu;
107860 +       for_each_cpu(cpu, cpu_smt_mask(target)) {
107861 +               if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
107862 +                   !cpumask_test_cpu(cpu, sched_domain_span(sd)))
107863 +                       continue;
107864 +               if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
107865 +                       return cpu;
107866 +       }
107868 +       return -1;
107871  #else /* CONFIG_SCHED_SMT */
107873  static inline void set_idle_cores(int cpu, int val)
107874 @@ -6114,6 +6151,11 @@ static inline int select_idle_core(struct task_struct *p, int core, struct cpuma
107875         return __select_idle_cpu(core);
107878 +static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
107880 +       return -1;
107883  #endif /* CONFIG_SCHED_SMT */
107886 @@ -6121,11 +6163,10 @@ static inline int select_idle_core(struct task_struct *p, int core, struct cpuma
107887   * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
107888   * average idle time for this rq (as found in rq->avg_idle).
107889   */
107890 -static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
107891 +static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int target)
107893         struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
107894         int i, cpu, idle_cpu = -1, nr = INT_MAX;
107895 -       bool smt = test_idle_cores(target, false);
107896         int this = smp_processor_id();
107897         struct sched_domain *this_sd;
107898         u64 time;
107899 @@ -6136,7 +6177,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
107901         cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
107903 -       if (sched_feat(SIS_PROP) && !smt) {
107904 +       if (sched_feat(SIS_PROP) && !has_idle_core) {
107905                 u64 avg_cost, avg_idle, span_avg;
107907                 /*
107908 @@ -6156,7 +6197,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
107909         }
107911         for_each_cpu_wrap(cpu, cpus, target) {
107912 -               if (smt) {
107913 +               if (has_idle_core) {
107914                         i = select_idle_core(p, cpu, cpus, &idle_cpu);
107915                         if ((unsigned int)i < nr_cpumask_bits)
107916                                 return i;
107917 @@ -6170,10 +6211,10 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
107918                 }
107919         }
107921 -       if (smt)
107922 -               set_idle_cores(this, false);
107923 +       if (has_idle_core)
107924 +               set_idle_cores(target, false);
107926 -       if (sched_feat(SIS_PROP) && !smt) {
107927 +       if (sched_feat(SIS_PROP) && !has_idle_core) {
107928                 time = cpu_clock(this) - time;
107929                 update_avg(&this_sd->avg_scan_cost, time);
107930         }
107931 @@ -6228,6 +6269,7 @@ static inline bool asym_fits_capacity(int task_util, int cpu)
107932   */
107933  static int select_idle_sibling(struct task_struct *p, int prev, int target)
107935 +       bool has_idle_core = false;
107936         struct sched_domain *sd;
107937         unsigned long task_util;
107938         int i, recent_used_cpu;
107939 @@ -6307,7 +6349,17 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
107940         if (!sd)
107941                 return target;
107943 -       i = select_idle_cpu(p, sd, target);
107944 +       if (sched_smt_active()) {
107945 +               has_idle_core = test_idle_cores(target, false);
107947 +               if (!has_idle_core && cpus_share_cache(prev, target)) {
107948 +                       i = select_idle_smt(p, sd, prev);
107949 +                       if ((unsigned int)i < nr_cpumask_bits)
107950 +                               return i;
107951 +               }
107952 +       }
107954 +       i = select_idle_cpu(p, sd, has_idle_core, target);
107955         if ((unsigned)i < nr_cpumask_bits)
107956                 return i;
107958 @@ -6455,240 +6507,6 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
107959         return min_t(unsigned long, util, capacity_orig_of(cpu));
107963 - * Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued)
107964 - * to @dst_cpu.
107965 - */
107966 -static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu)
107968 -       struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
107969 -       unsigned long util_est, util = READ_ONCE(cfs_rq->avg.util_avg);
107971 -       /*
107972 -        * If @p migrates from @cpu to another, remove its contribution. Or,
107973 -        * if @p migrates from another CPU to @cpu, add its contribution. In
107974 -        * the other cases, @cpu is not impacted by the migration, so the
107975 -        * util_avg should already be correct.
107976 -        */
107977 -       if (task_cpu(p) == cpu && dst_cpu != cpu)
107978 -               sub_positive(&util, task_util(p));
107979 -       else if (task_cpu(p) != cpu && dst_cpu == cpu)
107980 -               util += task_util(p);
107982 -       if (sched_feat(UTIL_EST)) {
107983 -               util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued);
107985 -               /*
107986 -                * During wake-up, the task isn't enqueued yet and doesn't
107987 -                * appear in the cfs_rq->avg.util_est.enqueued of any rq,
107988 -                * so just add it (if needed) to "simulate" what will be
107989 -                * cpu_util() after the task has been enqueued.
107990 -                */
107991 -               if (dst_cpu == cpu)
107992 -                       util_est += _task_util_est(p);
107994 -               util = max(util, util_est);
107995 -       }
107997 -       return min(util, capacity_orig_of(cpu));
108001 - * compute_energy(): Estimates the energy that @pd would consume if @p was
108002 - * migrated to @dst_cpu. compute_energy() predicts what will be the utilization
108003 - * landscape of @pd's CPUs after the task migration, and uses the Energy Model
108004 - * to compute what would be the energy if we decided to actually migrate that
108005 - * task.
108006 - */
108007 -static long
108008 -compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
108010 -       struct cpumask *pd_mask = perf_domain_span(pd);
108011 -       unsigned long cpu_cap = arch_scale_cpu_capacity(cpumask_first(pd_mask));
108012 -       unsigned long max_util = 0, sum_util = 0;
108013 -       int cpu;
108015 -       /*
108016 -        * The capacity state of CPUs of the current rd can be driven by CPUs
108017 -        * of another rd if they belong to the same pd. So, account for the
108018 -        * utilization of these CPUs too by masking pd with cpu_online_mask
108019 -        * instead of the rd span.
108020 -        *
108021 -        * If an entire pd is outside of the current rd, it will not appear in
108022 -        * its pd list and will not be accounted by compute_energy().
108023 -        */
108024 -       for_each_cpu_and(cpu, pd_mask, cpu_online_mask) {
108025 -               unsigned long cpu_util, util_cfs = cpu_util_next(cpu, p, dst_cpu);
108026 -               struct task_struct *tsk = cpu == dst_cpu ? p : NULL;
108028 -               /*
108029 -                * Busy time computation: utilization clamping is not
108030 -                * required since the ratio (sum_util / cpu_capacity)
108031 -                * is already enough to scale the EM reported power
108032 -                * consumption at the (eventually clamped) cpu_capacity.
108033 -                */
108034 -               sum_util += effective_cpu_util(cpu, util_cfs, cpu_cap,
108035 -                                              ENERGY_UTIL, NULL);
108037 -               /*
108038 -                * Performance domain frequency: utilization clamping
108039 -                * must be considered since it affects the selection
108040 -                * of the performance domain frequency.
108041 -                * NOTE: in case RT tasks are running, by default the
108042 -                * FREQUENCY_UTIL's utilization can be max OPP.
108043 -                */
108044 -               cpu_util = effective_cpu_util(cpu, util_cfs, cpu_cap,
108045 -                                             FREQUENCY_UTIL, tsk);
108046 -               max_util = max(max_util, cpu_util);
108047 -       }
108049 -       return em_cpu_energy(pd->em_pd, max_util, sum_util);
108053 - * find_energy_efficient_cpu(): Find most energy-efficient target CPU for the
108054 - * waking task. find_energy_efficient_cpu() looks for the CPU with maximum
108055 - * spare capacity in each performance domain and uses it as a potential
108056 - * candidate to execute the task. Then, it uses the Energy Model to figure
108057 - * out which of the CPU candidates is the most energy-efficient.
108059 - * The rationale for this heuristic is as follows. In a performance domain,
108060 - * all the most energy efficient CPU candidates (according to the Energy
108061 - * Model) are those for which we'll request a low frequency. When there are
108062 - * several CPUs for which the frequency request will be the same, we don't
108063 - * have enough data to break the tie between them, because the Energy Model
108064 - * only includes active power costs. With this model, if we assume that
108065 - * frequency requests follow utilization (e.g. using schedutil), the CPU with
108066 - * the maximum spare capacity in a performance domain is guaranteed to be among
108067 - * the best candidates of the performance domain.
108069 - * In practice, it could be preferable from an energy standpoint to pack
108070 - * small tasks on a CPU in order to let other CPUs go in deeper idle states,
108071 - * but that could also hurt our chances to go cluster idle, and we have no
108072 - * ways to tell with the current Energy Model if this is actually a good
108073 - * idea or not. So, find_energy_efficient_cpu() basically favors
108074 - * cluster-packing, and spreading inside a cluster. That should at least be
108075 - * a good thing for latency, and this is consistent with the idea that most
108076 - * of the energy savings of EAS come from the asymmetry of the system, and
108077 - * not so much from breaking the tie between identical CPUs. That's also the
108078 - * reason why EAS is enabled in the topology code only for systems where
108079 - * SD_ASYM_CPUCAPACITY is set.
108081 - * NOTE: Forkees are not accepted in the energy-aware wake-up path because
108082 - * they don't have any useful utilization data yet and it's not possible to
108083 - * forecast their impact on energy consumption. Consequently, they will be
108084 - * placed by find_idlest_cpu() on the least loaded CPU, which might turn out
108085 - * to be energy-inefficient in some use-cases. The alternative would be to
108086 - * bias new tasks towards specific types of CPUs first, or to try to infer
108087 - * their util_avg from the parent task, but those heuristics could hurt
108088 - * other use-cases too. So, until someone finds a better way to solve this,
108089 - * let's keep things simple by re-using the existing slow path.
108090 - */
108091 -static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
108093 -       unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
108094 -       struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
108095 -       unsigned long cpu_cap, util, base_energy = 0;
108096 -       int cpu, best_energy_cpu = prev_cpu;
108097 -       struct sched_domain *sd;
108098 -       struct perf_domain *pd;
108100 -       rcu_read_lock();
108101 -       pd = rcu_dereference(rd->pd);
108102 -       if (!pd || READ_ONCE(rd->overutilized))
108103 -               goto fail;
108105 -       /*
108106 -        * Energy-aware wake-up happens on the lowest sched_domain starting
108107 -        * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu.
108108 -        */
108109 -       sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity));
108110 -       while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
108111 -               sd = sd->parent;
108112 -       if (!sd)
108113 -               goto fail;
108115 -       sync_entity_load_avg(&p->se);
108116 -       if (!task_util_est(p))
108117 -               goto unlock;
108119 -       for (; pd; pd = pd->next) {
108120 -               unsigned long cur_delta, spare_cap, max_spare_cap = 0;
108121 -               unsigned long base_energy_pd;
108122 -               int max_spare_cap_cpu = -1;
108124 -               /* Compute the 'base' energy of the pd, without @p */
108125 -               base_energy_pd = compute_energy(p, -1, pd);
108126 -               base_energy += base_energy_pd;
108128 -               for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) {
108129 -                       if (!cpumask_test_cpu(cpu, p->cpus_ptr))
108130 -                               continue;
108132 -                       util = cpu_util_next(cpu, p, cpu);
108133 -                       cpu_cap = capacity_of(cpu);
108134 -                       spare_cap = cpu_cap;
108135 -                       lsub_positive(&spare_cap, util);
108137 -                       /*
108138 -                        * Skip CPUs that cannot satisfy the capacity request.
108139 -                        * IOW, placing the task there would make the CPU
108140 -                        * overutilized. Take uclamp into account to see how
108141 -                        * much capacity we can get out of the CPU; this is
108142 -                        * aligned with sched_cpu_util().
108143 -                        */
108144 -                       util = uclamp_rq_util_with(cpu_rq(cpu), util, p);
108145 -                       if (!fits_capacity(util, cpu_cap))
108146 -                               continue;
108148 -                       /* Always use prev_cpu as a candidate. */
108149 -                       if (cpu == prev_cpu) {
108150 -                               prev_delta = compute_energy(p, prev_cpu, pd);
108151 -                               prev_delta -= base_energy_pd;
108152 -                               best_delta = min(best_delta, prev_delta);
108153 -                       }
108155 -                       /*
108156 -                        * Find the CPU with the maximum spare capacity in
108157 -                        * the performance domain
108158 -                        */
108159 -                       if (spare_cap > max_spare_cap) {
108160 -                               max_spare_cap = spare_cap;
108161 -                               max_spare_cap_cpu = cpu;
108162 -                       }
108163 -               }
108165 -               /* Evaluate the energy impact of using this CPU. */
108166 -               if (max_spare_cap_cpu >= 0 && max_spare_cap_cpu != prev_cpu) {
108167 -                       cur_delta = compute_energy(p, max_spare_cap_cpu, pd);
108168 -                       cur_delta -= base_energy_pd;
108169 -                       if (cur_delta < best_delta) {
108170 -                               best_delta = cur_delta;
108171 -                               best_energy_cpu = max_spare_cap_cpu;
108172 -                       }
108173 -               }
108174 -       }
108175 -unlock:
108176 -       rcu_read_unlock();
108178 -       /*
108179 -        * Pick the best CPU if prev_cpu cannot be used, or if it saves at
108180 -        * least 6% of the energy used by prev_cpu.
108181 -        */
108182 -       if (prev_delta == ULONG_MAX)
108183 -               return best_energy_cpu;
108185 -       if ((prev_delta - best_delta) > ((prev_delta + base_energy) >> 4))
108186 -               return best_energy_cpu;
108188 -       return prev_cpu;
108190 -fail:
108191 -       rcu_read_unlock();
108193 -       return -1;
108197   * select_task_rq_fair: Select target runqueue for the waking task in domains
108198   * that have the relevant SD flag set. In practice, this is SD_BALANCE_WAKE,
108199 @@ -6714,14 +6532,6 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
108201         if (wake_flags & WF_TTWU) {
108202                 record_wakee(p);
108204 -               if (sched_energy_enabled()) {
108205 -                       new_cpu = find_energy_efficient_cpu(p, prev_cpu);
108206 -                       if (new_cpu >= 0)
108207 -                               return new_cpu;
108208 -                       new_cpu = prev_cpu;
108209 -               }
108211                 want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
108212         }
108214 @@ -7539,6 +7349,10 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
108215         if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
108216                 return 0;
108218 +       /* Disregard pcpu kthreads; they are where they need to be. */
108219 +       if (kthread_is_per_cpu(p))
108220 +               return 0;
108222         if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
108223                 int cpu;
108225 @@ -7708,8 +7522,7 @@ static int detach_tasks(struct lb_env *env)
108226                          * scheduler fails to find a good waiting task to
108227                          * migrate.
108228                          */
108230 -                       if ((load >> env->sd->nr_balance_failed) > env->imbalance)
108231 +                       if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance)
108232                                 goto next;
108234                         env->imbalance -= load;
108235 @@ -10844,16 +10657,22 @@ static void propagate_entity_cfs_rq(struct sched_entity *se)
108237         struct cfs_rq *cfs_rq;
108239 +       list_add_leaf_cfs_rq(cfs_rq_of(se));
108241         /* Start to propagate at parent */
108242         se = se->parent;
108244         for_each_sched_entity(se) {
108245                 cfs_rq = cfs_rq_of(se);
108247 -               if (cfs_rq_throttled(cfs_rq))
108248 -                       break;
108249 +               if (!cfs_rq_throttled(cfs_rq)){
108250 +                       update_load_avg(cfs_rq, se, UPDATE_TG);
108251 +                       list_add_leaf_cfs_rq(cfs_rq);
108252 +                       continue;
108253 +               }
108255 -               update_load_avg(cfs_rq, se, UPDATE_TG);
108256 +               if (list_add_leaf_cfs_rq(cfs_rq))
108257 +                       break;
108258         }
108260  #else
108261 diff --git a/kernel/sched/features.h b/kernel/sched/features.h
108262 index 1bc2b158fc51..e911111df83a 100644
108263 --- a/kernel/sched/features.h
108264 +++ b/kernel/sched/features.h
108265 @@ -90,3 +90,6 @@ SCHED_FEAT(WA_BIAS, true)
108266   */
108267  SCHED_FEAT(UTIL_EST, true)
108268  SCHED_FEAT(UTIL_EST_FASTUP, true)
108270 +SCHED_FEAT(ALT_PERIOD, true)
108271 +SCHED_FEAT(BASE_SLICE, true)
108272 diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
108273 index 967732c0766c..651218ded981 100644
108274 --- a/kernel/sched/psi.c
108275 +++ b/kernel/sched/psi.c
108276 @@ -711,14 +711,15 @@ static void psi_group_change(struct psi_group *group, int cpu,
108277         for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
108278                 if (!(m & (1 << t)))
108279                         continue;
108280 -               if (groupc->tasks[t] == 0 && !psi_bug) {
108281 +               if (groupc->tasks[t]) {
108282 +                       groupc->tasks[t]--;
108283 +               } else if (!psi_bug) {
108284                         printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
108285                                         cpu, t, groupc->tasks[0],
108286                                         groupc->tasks[1], groupc->tasks[2],
108287                                         groupc->tasks[3], clear, set);
108288                         psi_bug = 1;
108289                 }
108290 -               groupc->tasks[t]--;
108291         }
108293         for (t = 0; set; set &= ~(1 << t), t++)
108294 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
108295 index 10a1522b1e30..e4e4f47cee6a 100644
108296 --- a/kernel/sched/sched.h
108297 +++ b/kernel/sched/sched.h
108298 @@ -204,6 +204,13 @@ static inline void update_avg(u64 *avg, u64 sample)
108299         *avg += diff / 8;
108303 + * Shifting a value by an exponent greater *or equal* to the size of said value
108304 + * is UB; cap at size-1.
108305 + */
108306 +#define shr_bound(val, shift)                                                  \
108307 +       (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1))
108310   * !! For sched_setattr_nocheck() (kernel) only !!
108311   *
108312 diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
108313 index 09d35044bd88..12f80587e127 100644
108314 --- a/kernel/sched/topology.c
108315 +++ b/kernel/sched/topology.c
108316 @@ -723,35 +723,6 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
108317         for (tmp = sd; tmp; tmp = tmp->parent)
108318                 numa_distance += !!(tmp->flags & SD_NUMA);
108320 -       /*
108321 -        * FIXME: Diameter >=3 is misrepresented.
108322 -        *
108323 -        * Smallest diameter=3 topology is:
108324 -        *
108325 -        *   node   0   1   2   3
108326 -        *     0:  10  20  30  40
108327 -        *     1:  20  10  20  30
108328 -        *     2:  30  20  10  20
108329 -        *     3:  40  30  20  10
108330 -        *
108331 -        *   0 --- 1 --- 2 --- 3
108332 -        *
108333 -        * NUMA-3       0-3             N/A             N/A             0-3
108334 -        *  groups:     {0-2},{1-3}                                     {1-3},{0-2}
108335 -        *
108336 -        * NUMA-2       0-2             0-3             0-3             1-3
108337 -        *  groups:     {0-1},{1-3}     {0-2},{2-3}     {1-3},{0-1}     {2-3},{0-2}
108338 -        *
108339 -        * NUMA-1       0-1             0-2             1-3             2-3
108340 -        *  groups:     {0},{1}         {1},{2},{0}     {2},{3},{1}     {3},{2}
108341 -        *
108342 -        * NUMA-0       0               1               2               3
108343 -        *
108344 -        * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the
108345 -        * group span isn't a subset of the domain span.
108346 -        */
108347 -       WARN_ONCE(numa_distance > 2, "Shortest NUMA path spans too many nodes\n");
108349         sched_domain_debug(sd, cpu);
108351         rq_attach_root(rq, rd);
108352 @@ -982,6 +953,31 @@ static void init_overlap_sched_group(struct sched_domain *sd,
108353         sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
108356 +static struct sched_domain *
108357 +find_descended_sibling(struct sched_domain *sd, struct sched_domain *sibling)
108359 +       /*
108360 +        * The proper descendant would be the one whose child won't span out
108361 +        * of sd
108362 +        */
108363 +       while (sibling->child &&
108364 +              !cpumask_subset(sched_domain_span(sibling->child),
108365 +                              sched_domain_span(sd)))
108366 +               sibling = sibling->child;
108368 +       /*
108369 +        * As we are referencing sgc across different topology level, we need
108370 +        * to go down to skip those sched_domains which don't contribute to
108371 +        * scheduling because they will be degenerated in cpu_attach_domain
108372 +        */
108373 +       while (sibling->child &&
108374 +              cpumask_equal(sched_domain_span(sibling->child),
108375 +                            sched_domain_span(sibling)))
108376 +               sibling = sibling->child;
108378 +       return sibling;
108381  static int
108382  build_overlap_sched_groups(struct sched_domain *sd, int cpu)
108384 @@ -1015,6 +1011,41 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
108385                 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
108386                         continue;
108388 +               /*
108389 +                * Usually we build sched_group by sibling's child sched_domain
108390 +                * But for machines whose NUMA diameter are 3 or above, we move
108391 +                * to build sched_group by sibling's proper descendant's child
108392 +                * domain because sibling's child sched_domain will span out of
108393 +                * the sched_domain being built as below.
108394 +                *
108395 +                * Smallest diameter=3 topology is:
108396 +                *
108397 +                *   node   0   1   2   3
108398 +                *     0:  10  20  30  40
108399 +                *     1:  20  10  20  30
108400 +                *     2:  30  20  10  20
108401 +                *     3:  40  30  20  10
108402 +                *
108403 +                *   0 --- 1 --- 2 --- 3
108404 +                *
108405 +                * NUMA-3       0-3             N/A             N/A             0-3
108406 +                *  groups:     {0-2},{1-3}                                     {1-3},{0-2}
108407 +                *
108408 +                * NUMA-2       0-2             0-3             0-3             1-3
108409 +                *  groups:     {0-1},{1-3}     {0-2},{2-3}     {1-3},{0-1}     {2-3},{0-2}
108410 +                *
108411 +                * NUMA-1       0-1             0-2             1-3             2-3
108412 +                *  groups:     {0},{1}         {1},{2},{0}     {2},{3},{1}     {3},{2}
108413 +                *
108414 +                * NUMA-0       0               1               2               3
108415 +                *
108416 +                * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the
108417 +                * group span isn't a subset of the domain span.
108418 +                */
108419 +               if (sibling->child &&
108420 +                   !cpumask_subset(sched_domain_span(sibling->child), span))
108421 +                       sibling = find_descended_sibling(sd, sibling);
108423                 sg = build_group_from_child_sched_domain(sibling, cpu);
108424                 if (!sg)
108425                         goto fail;
108426 @@ -1022,7 +1053,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
108427                 sg_span = sched_group_span(sg);
108428                 cpumask_or(covered, covered, sg_span);
108430 -               init_overlap_sched_group(sd, sg);
108431 +               init_overlap_sched_group(sibling, sg);
108433                 if (!first)
108434                         first = sg;
108435 diff --git a/kernel/smp.c b/kernel/smp.c
108436 index aeb0adfa0606..c678589fbb76 100644
108437 --- a/kernel/smp.c
108438 +++ b/kernel/smp.c
108439 @@ -110,7 +110,7 @@ static DEFINE_PER_CPU(void *, cur_csd_info);
108440  static atomic_t csd_bug_count = ATOMIC_INIT(0);
108442  /* Record current CSD work for current CPU, NULL to erase. */
108443 -static void csd_lock_record(call_single_data_t *csd)
108444 +static void csd_lock_record(struct __call_single_data *csd)
108446         if (!csd) {
108447                 smp_mb(); /* NULL cur_csd after unlock. */
108448 @@ -125,7 +125,7 @@ static void csd_lock_record(call_single_data_t *csd)
108449                   /* Or before unlock, as the case may be. */
108452 -static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd)
108453 +static __always_inline int csd_lock_wait_getcpu(struct __call_single_data *csd)
108455         unsigned int csd_type;
108457 @@ -140,7 +140,7 @@ static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd)
108458   * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
108459   * so waiting on other types gets much less information.
108460   */
108461 -static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
108462 +static __always_inline bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
108464         int cpu = -1;
108465         int cpux;
108466 @@ -204,7 +204,7 @@ static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 t
108467   * previous function call. For multi-cpu calls its even more interesting
108468   * as we'll have to ensure no other cpu is observing our csd.
108469   */
108470 -static __always_inline void csd_lock_wait(call_single_data_t *csd)
108471 +static __always_inline void csd_lock_wait(struct __call_single_data *csd)
108473         int bug_id = 0;
108474         u64 ts0, ts1;
108475 @@ -219,17 +219,17 @@ static __always_inline void csd_lock_wait(call_single_data_t *csd)
108478  #else
108479 -static void csd_lock_record(call_single_data_t *csd)
108480 +static void csd_lock_record(struct __call_single_data *csd)
108484 -static __always_inline void csd_lock_wait(call_single_data_t *csd)
108485 +static __always_inline void csd_lock_wait(struct __call_single_data *csd)
108487         smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
108489  #endif
108491 -static __always_inline void csd_lock(call_single_data_t *csd)
108492 +static __always_inline void csd_lock(struct __call_single_data *csd)
108494         csd_lock_wait(csd);
108495         csd->node.u_flags |= CSD_FLAG_LOCK;
108496 @@ -242,7 +242,7 @@ static __always_inline void csd_lock(call_single_data_t *csd)
108497         smp_wmb();
108500 -static __always_inline void csd_unlock(call_single_data_t *csd)
108501 +static __always_inline void csd_unlock(struct __call_single_data *csd)
108503         WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
108505 @@ -276,7 +276,7 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
108506   * for execution on the given CPU. data must already have
108507   * ->func, ->info, and ->flags set.
108508   */
108509 -static int generic_exec_single(int cpu, call_single_data_t *csd)
108510 +static int generic_exec_single(int cpu, struct __call_single_data *csd)
108512         if (cpu == smp_processor_id()) {
108513                 smp_call_func_t func = csd->func;
108514 @@ -542,7 +542,7 @@ EXPORT_SYMBOL(smp_call_function_single);
108515   * NOTE: Be careful, there is unfortunately no current debugging facility to
108516   * validate the correctness of this serialization.
108517   */
108518 -int smp_call_function_single_async(int cpu, call_single_data_t *csd)
108519 +int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
108521         int err = 0;
108523 diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
108524 index 19aa806890d5..1750dfc416d8 100644
108525 --- a/kernel/sys_ni.c
108526 +++ b/kernel/sys_ni.c
108527 @@ -150,6 +150,12 @@ COND_SYSCALL_COMPAT(set_robust_list);
108528  COND_SYSCALL(get_robust_list);
108529  COND_SYSCALL_COMPAT(get_robust_list);
108531 +/* kernel/futex2.c */
108532 +COND_SYSCALL(futex_wait);
108533 +COND_SYSCALL(futex_wake);
108534 +COND_SYSCALL(futex_waitv);
108535 +COND_SYSCALL(futex_requeue);
108537  /* kernel/hrtimer.c */
108539  /* kernel/itimer.c */
108540 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
108541 index 62fbd09b5dc1..caad193c931f 100644
108542 --- a/kernel/sysctl.c
108543 +++ b/kernel/sysctl.c
108544 @@ -120,9 +120,9 @@ static unsigned long long_max = LONG_MAX;
108545  static int one_hundred = 100;
108546  static int two_hundred = 200;
108547  static int one_thousand = 1000;
108548 -#ifdef CONFIG_PRINTK
108549  static int ten_thousand = 10000;
108550 -#endif
108551 +extern int hrtimer_granularity_us;
108552 +extern int hrtimeout_min_us;
108553  #ifdef CONFIG_PERF_EVENTS
108554  static int six_hundred_forty_kb = 640 * 1024;
108555  #endif
108556 @@ -200,6 +200,10 @@ static int min_extfrag_threshold;
108557  static int max_extfrag_threshold = 1000;
108558  #endif
108560 +#ifdef CONFIG_USER_NS
108561 +extern int unprivileged_userns_clone;
108562 +#endif
108564  #endif /* CONFIG_SYSCTL */
108566  #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_SYSCTL)
108567 @@ -1652,6 +1656,24 @@ int proc_do_static_key(struct ctl_table *table, int write,
108570  static struct ctl_table kern_table[] = {
108571 +       {
108572 +               .procname       = "hrtimer_granularity_us",
108573 +               .data           = &hrtimer_granularity_us,
108574 +               .maxlen         = sizeof(int),
108575 +               .mode           = 0644,
108576 +               .proc_handler   = &proc_dointvec_minmax,
108577 +               .extra1         = SYSCTL_ONE,
108578 +               .extra2         = &ten_thousand,
108579 +       },
108580 +       {
108581 +               .procname       = "hrtimeout_min_us",
108582 +               .data           = &hrtimeout_min_us,
108583 +               .maxlen         = sizeof(int),
108584 +               .mode           = 0644,
108585 +               .proc_handler   = &proc_dointvec_minmax,
108586 +               .extra1         = SYSCTL_ONE,
108587 +               .extra2         = &ten_thousand,
108588 +       },
108589         {
108590                 .procname       = "sched_child_runs_first",
108591                 .data           = &sysctl_sched_child_runs_first,
108592 @@ -1902,6 +1924,15 @@ static struct ctl_table kern_table[] = {
108593                 .proc_handler   = proc_dointvec,
108594         },
108595  #endif
108596 +#ifdef CONFIG_USER_NS
108597 +       {
108598 +               .procname       = "unprivileged_userns_clone",
108599 +               .data           = &unprivileged_userns_clone,
108600 +               .maxlen         = sizeof(int),
108601 +               .mode           = 0644,
108602 +               .proc_handler   = proc_dointvec,
108603 +       },
108604 +#endif
108605  #ifdef CONFIG_PROC_SYSCTL
108606         {
108607                 .procname       = "tainted",
108608 @@ -3093,6 +3124,20 @@ static struct ctl_table vm_table[] = {
108609                 .extra2         = SYSCTL_ONE,
108610         },
108611  #endif
108612 +       {
108613 +               .procname       = "clean_low_kbytes",
108614 +               .data           = &sysctl_clean_low_kbytes,
108615 +               .maxlen         = sizeof(sysctl_clean_low_kbytes),
108616 +               .mode           = 0644,
108617 +               .proc_handler   = proc_doulongvec_minmax,
108618 +       },
108619 +       {
108620 +               .procname       = "clean_min_kbytes",
108621 +               .data           = &sysctl_clean_min_kbytes,
108622 +               .maxlen         = sizeof(sysctl_clean_min_kbytes),
108623 +               .mode           = 0644,
108624 +               .proc_handler   = proc_doulongvec_minmax,
108625 +       },
108626         {
108627                 .procname       = "user_reserve_kbytes",
108628                 .data           = &sysctl_user_reserve_kbytes,
108629 diff --git a/kernel/task_work.c b/kernel/task_work.c
108630 index 9cde961875c0..5c8dea45d4f8 100644
108631 --- a/kernel/task_work.c
108632 +++ b/kernel/task_work.c
108633 @@ -57,6 +57,7 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
108635         return 0;
108637 +EXPORT_SYMBOL(task_work_add);
108639  /**
108640   * task_work_cancel - cancel a pending work added by task_work_add()
108641 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
108642 index 4d94e2b5499d..a7924fedf479 100644
108643 --- a/kernel/time/alarmtimer.c
108644 +++ b/kernel/time/alarmtimer.c
108645 @@ -92,7 +92,7 @@ static int alarmtimer_rtc_add_device(struct device *dev,
108646         if (rtcdev)
108647                 return -EBUSY;
108649 -       if (!rtc->ops->set_alarm)
108650 +       if (!test_bit(RTC_FEATURE_ALARM, rtc->features))
108651                 return -1;
108652         if (!device_may_wakeup(rtc->dev.parent))
108653                 return -1;
108654 diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
108655 index f5490222e134..23db3c39e07a 100644
108656 --- a/kernel/time/clockevents.c
108657 +++ b/kernel/time/clockevents.c
108658 @@ -190,8 +190,9 @@ int clockevents_tick_resume(struct clock_event_device *dev)
108660  #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
108662 +int __read_mostly hrtimer_granularity_us = 100;
108663  /* Limit min_delta to a jiffie */
108664 -#define MIN_DELTA_LIMIT                (NSEC_PER_SEC / HZ)
108665 +#define MIN_DELTA_LIMIT                (hrtimer_granularity_us * NSEC_PER_USEC)
108667  /**
108668   * clockevents_increase_min_delta - raise minimum delta of a clock event device
108669 diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
108670 index 5c9d968187ae..7a3d640dc13a 100644
108671 --- a/kernel/time/hrtimer.c
108672 +++ b/kernel/time/hrtimer.c
108673 @@ -2236,3 +2236,113 @@ int __sched schedule_hrtimeout(ktime_t *expires,
108674         return schedule_hrtimeout_range(expires, 0, mode);
108676  EXPORT_SYMBOL_GPL(schedule_hrtimeout);
108679 + * As per schedule_hrtimeout but taskes a millisecond value and returns how
108680 + * many milliseconds are left.
108681 + */
108682 +long __sched schedule_msec_hrtimeout(long timeout)
108684 +       struct hrtimer_sleeper t;
108685 +       int delta, jiffs;
108686 +       ktime_t expires;
108688 +       if (!timeout) {
108689 +               __set_current_state(TASK_RUNNING);
108690 +               return 0;
108691 +       }
108693 +       jiffs = msecs_to_jiffies(timeout);
108694 +       /*
108695 +        * If regular timer resolution is adequate or hrtimer resolution is not
108696 +        * (yet) better than Hz, as would occur during startup, use regular
108697 +        * timers.
108698 +        */
108699 +       if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ || pm_freezing)
108700 +               return schedule_timeout(jiffs);
108702 +       delta = (timeout % 1000) * NSEC_PER_MSEC;
108703 +       expires = ktime_set(0, delta);
108705 +       hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
108706 +       hrtimer_set_expires_range_ns(&t.timer, expires, delta);
108708 +       hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_REL);
108710 +       if (likely(t.task))
108711 +               schedule();
108713 +       hrtimer_cancel(&t.timer);
108714 +       destroy_hrtimer_on_stack(&t.timer);
108716 +       __set_current_state(TASK_RUNNING);
108718 +       expires = hrtimer_expires_remaining(&t.timer);
108719 +       timeout = ktime_to_ms(expires);
108720 +       return timeout < 0 ? 0 : timeout;
108723 +EXPORT_SYMBOL(schedule_msec_hrtimeout);
108725 +#define USECS_PER_SEC 1000000
108726 +extern int hrtimer_granularity_us;
108728 +static inline long schedule_usec_hrtimeout(long timeout)
108730 +       struct hrtimer_sleeper t;
108731 +       ktime_t expires;
108732 +       int delta;
108734 +       if (!timeout) {
108735 +               __set_current_state(TASK_RUNNING);
108736 +               return 0;
108737 +       }
108739 +       if (hrtimer_resolution >= NSEC_PER_SEC / HZ)
108740 +               return schedule_timeout(usecs_to_jiffies(timeout));
108742 +       if (timeout < hrtimer_granularity_us)
108743 +               timeout = hrtimer_granularity_us;
108744 +       delta = (timeout % USECS_PER_SEC) * NSEC_PER_USEC;
108745 +       expires = ktime_set(0, delta);
108747 +       hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
108748 +       hrtimer_set_expires_range_ns(&t.timer, expires, delta);
108750 +       hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_REL);
108752 +       if (likely(t.task))
108753 +               schedule();
108755 +       hrtimer_cancel(&t.timer);
108756 +       destroy_hrtimer_on_stack(&t.timer);
108758 +       __set_current_state(TASK_RUNNING);
108760 +       expires = hrtimer_expires_remaining(&t.timer);
108761 +       timeout = ktime_to_us(expires);
108762 +       return timeout < 0 ? 0 : timeout;
108765 +int __read_mostly hrtimeout_min_us = 500;
108767 +long __sched schedule_min_hrtimeout(void)
108769 +       return usecs_to_jiffies(schedule_usec_hrtimeout(hrtimeout_min_us));
108772 +EXPORT_SYMBOL(schedule_min_hrtimeout);
108774 +long __sched schedule_msec_hrtimeout_interruptible(long timeout)
108776 +       __set_current_state(TASK_INTERRUPTIBLE);
108777 +       return schedule_msec_hrtimeout(timeout);
108779 +EXPORT_SYMBOL(schedule_msec_hrtimeout_interruptible);
108781 +long __sched schedule_msec_hrtimeout_uninterruptible(long timeout)
108783 +       __set_current_state(TASK_UNINTERRUPTIBLE);
108784 +       return schedule_msec_hrtimeout(timeout);
108786 +EXPORT_SYMBOL(schedule_msec_hrtimeout_uninterruptible);
108787 diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
108788 index bf540f5a4115..dd5697d7347b 100644
108789 --- a/kernel/time/posix-timers.c
108790 +++ b/kernel/time/posix-timers.c
108791 @@ -1191,8 +1191,8 @@ SYSCALL_DEFINE2(clock_adjtime32, clockid_t, which_clock,
108793         err = do_clock_adjtime(which_clock, &ktx);
108795 -       if (err >= 0)
108796 -               err = put_old_timex32(utp, &ktx);
108797 +       if (err >= 0 && put_old_timex32(utp, &ktx))
108798 +               return -EFAULT;
108800         return err;
108802 diff --git a/kernel/time/timer.c b/kernel/time/timer.c
108803 index f475f1a027c8..8d82fe9f6fbb 100644
108804 --- a/kernel/time/timer.c
108805 +++ b/kernel/time/timer.c
108806 @@ -44,6 +44,7 @@
108807  #include <linux/slab.h>
108808  #include <linux/compat.h>
108809  #include <linux/random.h>
108810 +#include <linux/freezer.h>
108812  #include <linux/uaccess.h>
108813  #include <asm/unistd.h>
108814 @@ -1886,6 +1887,18 @@ signed long __sched schedule_timeout(signed long timeout)
108816         expire = timeout + jiffies;
108818 +#ifdef CONFIG_HIGH_RES_TIMERS
108819 +       if (timeout == 1 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
108820 +               /*
108821 +                * Special case 1 as being a request for the minimum timeout
108822 +                * and use highres timers to timeout after 1ms to workaround
108823 +                * the granularity of low Hz tick timers.
108824 +                */
108825 +               if (!schedule_min_hrtimeout())
108826 +                       return 0;
108827 +               goto out_timeout;
108828 +       }
108829 +#endif
108830         timer.task = current;
108831         timer_setup_on_stack(&timer.timer, process_timeout, 0);
108832         __mod_timer(&timer.timer, expire, MOD_TIMER_NOTPENDING);
108833 @@ -1894,10 +1907,10 @@ signed long __sched schedule_timeout(signed long timeout)
108835         /* Remove the timer from the object tracker */
108836         destroy_timer_on_stack(&timer.timer);
108838 +out_timeout:
108839         timeout = expire - jiffies;
108841 - out:
108842 +out:
108843         return timeout < 0 ? 0 : timeout;
108845  EXPORT_SYMBOL(schedule_timeout);
108846 @@ -2040,7 +2053,19 @@ void __init init_timers(void)
108847   */
108848  void msleep(unsigned int msecs)
108850 -       unsigned long timeout = msecs_to_jiffies(msecs) + 1;
108851 +       int jiffs = msecs_to_jiffies(msecs);
108852 +       unsigned long timeout;
108854 +       /*
108855 +        * Use high resolution timers where the resolution of tick based
108856 +        * timers is inadequate.
108857 +        */
108858 +       if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) {
108859 +               while (msecs)
108860 +                       msecs = schedule_msec_hrtimeout_uninterruptible(msecs);
108861 +               return;
108862 +       }
108863 +       timeout = jiffs + 1;
108865         while (timeout)
108866                 timeout = schedule_timeout_uninterruptible(timeout);
108867 @@ -2054,7 +2079,15 @@ EXPORT_SYMBOL(msleep);
108868   */
108869  unsigned long msleep_interruptible(unsigned int msecs)
108871 -       unsigned long timeout = msecs_to_jiffies(msecs) + 1;
108872 +       int jiffs = msecs_to_jiffies(msecs);
108873 +       unsigned long timeout;
108875 +       if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) {
108876 +               while (msecs && !signal_pending(current))
108877 +                       msecs = schedule_msec_hrtimeout_interruptible(msecs);
108878 +               return msecs;
108879 +       }
108880 +       timeout = jiffs + 1;
108882         while (timeout && !signal_pending(current))
108883                 timeout = schedule_timeout_interruptible(timeout);
108884 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
108885 index 3ba52d4e1314..826b88b727a6 100644
108886 --- a/kernel/trace/ftrace.c
108887 +++ b/kernel/trace/ftrace.c
108888 @@ -5631,7 +5631,10 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
108890         parser = &iter->parser;
108891         if (trace_parser_loaded(parser)) {
108892 -               ftrace_match_records(iter->hash, parser->buffer, parser->idx);
108893 +               int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
108895 +               ftrace_process_regex(iter, parser->buffer,
108896 +                                    parser->idx, enable);
108897         }
108899         trace_parser_put(parser);
108900 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
108901 index c0c9aa5cd8e2..67c01dc5cdeb 100644
108902 --- a/kernel/trace/trace.c
108903 +++ b/kernel/trace/trace.c
108904 @@ -2390,14 +2390,13 @@ static void tracing_stop_tr(struct trace_array *tr)
108906  static int trace_save_cmdline(struct task_struct *tsk)
108908 -       unsigned pid, idx;
108909 +       unsigned tpid, idx;
108911         /* treat recording of idle task as a success */
108912         if (!tsk->pid)
108913                 return 1;
108915 -       if (unlikely(tsk->pid > PID_MAX_DEFAULT))
108916 -               return 0;
108917 +       tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
108919         /*
108920          * It's not the end of the world if we don't get
108921 @@ -2408,26 +2407,15 @@ static int trace_save_cmdline(struct task_struct *tsk)
108922         if (!arch_spin_trylock(&trace_cmdline_lock))
108923                 return 0;
108925 -       idx = savedcmd->map_pid_to_cmdline[tsk->pid];
108926 +       idx = savedcmd->map_pid_to_cmdline[tpid];
108927         if (idx == NO_CMDLINE_MAP) {
108928                 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
108930 -               /*
108931 -                * Check whether the cmdline buffer at idx has a pid
108932 -                * mapped. We are going to overwrite that entry so we
108933 -                * need to clear the map_pid_to_cmdline. Otherwise we
108934 -                * would read the new comm for the old pid.
108935 -                */
108936 -               pid = savedcmd->map_cmdline_to_pid[idx];
108937 -               if (pid != NO_CMDLINE_MAP)
108938 -                       savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
108940 -               savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
108941 -               savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
108943 +               savedcmd->map_pid_to_cmdline[tpid] = idx;
108944                 savedcmd->cmdline_idx = idx;
108945         }
108947 +       savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
108948         set_cmdline(idx, tsk->comm);
108950         arch_spin_unlock(&trace_cmdline_lock);
108951 @@ -2438,6 +2426,7 @@ static int trace_save_cmdline(struct task_struct *tsk)
108952  static void __trace_find_cmdline(int pid, char comm[])
108954         unsigned map;
108955 +       int tpid;
108957         if (!pid) {
108958                 strcpy(comm, "<idle>");
108959 @@ -2449,16 +2438,16 @@ static void __trace_find_cmdline(int pid, char comm[])
108960                 return;
108961         }
108963 -       if (pid > PID_MAX_DEFAULT) {
108964 -               strcpy(comm, "<...>");
108965 -               return;
108966 +       tpid = pid & (PID_MAX_DEFAULT - 1);
108967 +       map = savedcmd->map_pid_to_cmdline[tpid];
108968 +       if (map != NO_CMDLINE_MAP) {
108969 +               tpid = savedcmd->map_cmdline_to_pid[map];
108970 +               if (tpid == pid) {
108971 +                       strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
108972 +                       return;
108973 +               }
108974         }
108976 -       map = savedcmd->map_pid_to_cmdline[pid];
108977 -       if (map != NO_CMDLINE_MAP)
108978 -               strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
108979 -       else
108980 -               strcpy(comm, "<...>");
108981 +       strcpy(comm, "<...>");
108984  void trace_find_cmdline(int pid, char comm[])
108985 diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
108986 index aaf6793ededa..c1637f90c8a3 100644
108987 --- a/kernel/trace/trace_clock.c
108988 +++ b/kernel/trace/trace_clock.c
108989 @@ -95,33 +95,49 @@ u64 notrace trace_clock_global(void)
108991         unsigned long flags;
108992         int this_cpu;
108993 -       u64 now;
108994 +       u64 now, prev_time;
108996         raw_local_irq_save(flags);
108998         this_cpu = raw_smp_processor_id();
108999 -       now = sched_clock_cpu(this_cpu);
109001         /*
109002 -        * If in an NMI context then dont risk lockups and return the
109003 -        * cpu_clock() time:
109004 +        * The global clock "guarantees" that the events are ordered
109005 +        * between CPUs. But if two events on two different CPUS call
109006 +        * trace_clock_global at roughly the same time, it really does
109007 +        * not matter which one gets the earlier time. Just make sure
109008 +        * that the same CPU will always show a monotonic clock.
109009 +        *
109010 +        * Use a read memory barrier to get the latest written
109011 +        * time that was recorded.
109012          */
109013 -       if (unlikely(in_nmi()))
109014 -               goto out;
109015 +       smp_rmb();
109016 +       prev_time = READ_ONCE(trace_clock_struct.prev_time);
109017 +       now = sched_clock_cpu(this_cpu);
109019 -       arch_spin_lock(&trace_clock_struct.lock);
109020 +       /* Make sure that now is always greater than prev_time */
109021 +       if ((s64)(now - prev_time) < 0)
109022 +               now = prev_time + 1;
109024         /*
109025 -        * TODO: if this happens often then maybe we should reset
109026 -        * my_scd->clock to prev_time+1, to make sure
109027 -        * we start ticking with the local clock from now on?
109028 +        * If in an NMI context then dont risk lockups and simply return
109029 +        * the current time.
109030          */
109031 -       if ((s64)(now - trace_clock_struct.prev_time) < 0)
109032 -               now = trace_clock_struct.prev_time + 1;
109033 +       if (unlikely(in_nmi()))
109034 +               goto out;
109036 -       trace_clock_struct.prev_time = now;
109037 +       /* Tracing can cause strange recursion, always use a try lock */
109038 +       if (arch_spin_trylock(&trace_clock_struct.lock)) {
109039 +               /* Reread prev_time in case it was already updated */
109040 +               prev_time = READ_ONCE(trace_clock_struct.prev_time);
109041 +               if ((s64)(now - prev_time) < 0)
109042 +                       now = prev_time + 1;
109044 -       arch_spin_unlock(&trace_clock_struct.lock);
109045 +               trace_clock_struct.prev_time = now;
109047 +               /* The unlock acts as the wmb for the above rmb */
109048 +               arch_spin_unlock(&trace_clock_struct.lock);
109049 +       }
109050   out:
109051         raw_local_irq_restore(flags);
109053 diff --git a/kernel/up.c b/kernel/up.c
109054 index c6f323dcd45b..4edd5493eba2 100644
109055 --- a/kernel/up.c
109056 +++ b/kernel/up.c
109057 @@ -25,7 +25,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
109059  EXPORT_SYMBOL(smp_call_function_single);
109061 -int smp_call_function_single_async(int cpu, call_single_data_t *csd)
109062 +int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
109064         unsigned long flags;
109066 diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
109067 index 9a4b980d695b..0475d15b1c66 100644
109068 --- a/kernel/user_namespace.c
109069 +++ b/kernel/user_namespace.c
109070 @@ -21,6 +21,9 @@
109071  #include <linux/bsearch.h>
109072  #include <linux/sort.h>
109074 +/* sysctl */
109075 +int unprivileged_userns_clone = 1;
109077  static struct kmem_cache *user_ns_cachep __read_mostly;
109078  static DEFINE_MUTEX(userns_state_mutex);
109080 diff --git a/kernel/watchdog.c b/kernel/watchdog.c
109081 index 107bc38b1945..8cf0678378d2 100644
109082 --- a/kernel/watchdog.c
109083 +++ b/kernel/watchdog.c
109084 @@ -154,7 +154,11 @@ static void lockup_detector_update_enable(void)
109086  #ifdef CONFIG_SOFTLOCKUP_DETECTOR
109088 -#define SOFTLOCKUP_RESET       ULONG_MAX
109090 + * Delay the soflockup report when running a known slow code.
109091 + * It does _not_ affect the timestamp of the last successdul reschedule.
109092 + */
109093 +#define SOFTLOCKUP_DELAY_REPORT        ULONG_MAX
109095  #ifdef CONFIG_SMP
109096  int __read_mostly sysctl_softlockup_all_cpu_backtrace;
109097 @@ -169,10 +173,12 @@ unsigned int __read_mostly softlockup_panic =
109098  static bool softlockup_initialized __read_mostly;
109099  static u64 __read_mostly sample_period;
109101 +/* Timestamp taken after the last successful reschedule. */
109102  static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
109103 +/* Timestamp of the last softlockup report. */
109104 +static DEFINE_PER_CPU(unsigned long, watchdog_report_ts);
109105  static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
109106  static DEFINE_PER_CPU(bool, softlockup_touch_sync);
109107 -static DEFINE_PER_CPU(bool, soft_watchdog_warn);
109108  static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
109109  static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
109110  static unsigned long soft_lockup_nmi_warn;
109111 @@ -235,10 +241,16 @@ static void set_sample_period(void)
109112         watchdog_update_hrtimer_threshold(sample_period);
109115 +static void update_report_ts(void)
109117 +       __this_cpu_write(watchdog_report_ts, get_timestamp());
109120  /* Commands for resetting the watchdog */
109121 -static void __touch_watchdog(void)
109122 +static void update_touch_ts(void)
109124         __this_cpu_write(watchdog_touch_ts, get_timestamp());
109125 +       update_report_ts();
109128  /**
109129 @@ -252,10 +264,10 @@ static void __touch_watchdog(void)
109130  notrace void touch_softlockup_watchdog_sched(void)
109132         /*
109133 -        * Preemption can be enabled.  It doesn't matter which CPU's timestamp
109134 -        * gets zeroed here, so use the raw_ operation.
109135 +        * Preemption can be enabled.  It doesn't matter which CPU's watchdog
109136 +        * report period gets restarted here, so use the raw_ operation.
109137          */
109138 -       raw_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
109139 +       raw_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
109142  notrace void touch_softlockup_watchdog(void)
109143 @@ -279,7 +291,7 @@ void touch_all_softlockup_watchdogs(void)
109144          * the softlockup check.
109145          */
109146         for_each_cpu(cpu, &watchdog_allowed_mask) {
109147 -               per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET;
109148 +               per_cpu(watchdog_report_ts, cpu) = SOFTLOCKUP_DELAY_REPORT;
109149                 wq_watchdog_touch(cpu);
109150         }
109152 @@ -287,16 +299,16 @@ void touch_all_softlockup_watchdogs(void)
109153  void touch_softlockup_watchdog_sync(void)
109155         __this_cpu_write(softlockup_touch_sync, true);
109156 -       __this_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
109157 +       __this_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
109160 -static int is_softlockup(unsigned long touch_ts)
109161 +static int is_softlockup(unsigned long touch_ts, unsigned long period_ts)
109163         unsigned long now = get_timestamp();
109165         if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
109166                 /* Warn about unreasonable delays. */
109167 -               if (time_after(now, touch_ts + get_softlockup_thresh()))
109168 +               if (time_after(now, period_ts + get_softlockup_thresh()))
109169                         return now - touch_ts;
109170         }
109171         return 0;
109172 @@ -332,7 +344,7 @@ static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
109173   */
109174  static int softlockup_fn(void *data)
109176 -       __touch_watchdog();
109177 +       update_touch_ts();
109178         complete(this_cpu_ptr(&softlockup_completion));
109180         return 0;
109181 @@ -342,6 +354,7 @@ static int softlockup_fn(void *data)
109182  static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
109184         unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
109185 +       unsigned long period_ts = __this_cpu_read(watchdog_report_ts);
109186         struct pt_regs *regs = get_irq_regs();
109187         int duration;
109188         int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
109189 @@ -363,7 +376,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
109190         /* .. and repeat */
109191         hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
109193 -       if (touch_ts == SOFTLOCKUP_RESET) {
109194 +       /* Reset the interval when touched externally by a known slow code. */
109195 +       if (period_ts == SOFTLOCKUP_DELAY_REPORT) {
109196                 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
109197                         /*
109198                          * If the time stamp was touched atomically
109199 @@ -375,7 +389,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
109201                 /* Clear the guest paused flag on watchdog reset */
109202                 kvm_check_and_clear_guest_paused();
109203 -               __touch_watchdog();
109204 +               update_report_ts();
109206                 return HRTIMER_RESTART;
109207         }
109209 @@ -385,7 +400,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
109210          * indicate it is getting cpu time.  If it hasn't then
109211          * this is a good indication some task is hogging the cpu
109212          */
109213 -       duration = is_softlockup(touch_ts);
109214 +       duration = is_softlockup(touch_ts, period_ts);
109215         if (unlikely(duration)) {
109216                 /*
109217                  * If a virtual machine is stopped by the host it can look to
109218 @@ -395,21 +410,18 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
109219                 if (kvm_check_and_clear_guest_paused())
109220                         return HRTIMER_RESTART;
109222 -               /* only warn once */
109223 -               if (__this_cpu_read(soft_watchdog_warn) == true)
109224 -                       return HRTIMER_RESTART;
109226 +               /*
109227 +                * Prevent multiple soft-lockup reports if one cpu is already
109228 +                * engaged in dumping all cpu back traces.
109229 +                */
109230                 if (softlockup_all_cpu_backtrace) {
109231 -                       /* Prevent multiple soft-lockup reports if one cpu is already
109232 -                        * engaged in dumping cpu back traces
109233 -                        */
109234 -                       if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
109235 -                               /* Someone else will report us. Let's give up */
109236 -                               __this_cpu_write(soft_watchdog_warn, true);
109237 +                       if (test_and_set_bit_lock(0, &soft_lockup_nmi_warn))
109238                                 return HRTIMER_RESTART;
109239 -                       }
109240                 }
109242 +               /* Start period for the next softlockup warning. */
109243 +               update_report_ts();
109245                 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
109246                         smp_processor_id(), duration,
109247                         current->comm, task_pid_nr(current));
109248 @@ -421,22 +433,14 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
109249                         dump_stack();
109251                 if (softlockup_all_cpu_backtrace) {
109252 -                       /* Avoid generating two back traces for current
109253 -                        * given that one is already made above
109254 -                        */
109255                         trigger_allbutself_cpu_backtrace();
109257 -                       clear_bit(0, &soft_lockup_nmi_warn);
109258 -                       /* Barrier to sync with other cpus */
109259 -                       smp_mb__after_atomic();
109260 +                       clear_bit_unlock(0, &soft_lockup_nmi_warn);
109261                 }
109263                 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
109264                 if (softlockup_panic)
109265                         panic("softlockup: hung tasks");
109266 -               __this_cpu_write(soft_watchdog_warn, true);
109267 -       } else
109268 -               __this_cpu_write(soft_watchdog_warn, false);
109269 +       }
109271         return HRTIMER_RESTART;
109273 @@ -461,7 +465,7 @@ static void watchdog_enable(unsigned int cpu)
109274                       HRTIMER_MODE_REL_PINNED_HARD);
109276         /* Initialize timestamp */
109277 -       __touch_watchdog();
109278 +       update_touch_ts();
109279         /* Enable the perf event */
109280         if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
109281                 watchdog_nmi_enable(cpu);
109282 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
109283 index 417c3d3e521b..03d75fe17edf 100644
109284 --- a/lib/Kconfig.debug
109285 +++ b/lib/Kconfig.debug
109286 @@ -179,7 +179,7 @@ config DYNAMIC_DEBUG_CORE
109288  config SYMBOLIC_ERRNAME
109289         bool "Support symbolic error names in printf"
109290 -       default y if PRINTK
109291 +       default n
109292         help
109293           If you say Y here, the kernel's printf implementation will
109294           be able to print symbolic error names such as ENOSPC instead
109295 @@ -189,7 +189,7 @@ config SYMBOLIC_ERRNAME
109296  config DEBUG_BUGVERBOSE
109297         bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT
109298         depends on BUG && (GENERIC_BUG || HAVE_DEBUG_BUGVERBOSE)
109299 -       default y
109300 +       default n
109301         help
109302           Say Y here to make BUG() panics output the file name and line number
109303           of the BUG call as well as the EIP and oops trace.  This aids
109304 diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence
109305 index 78f50ccb3b45..e641add33947 100644
109306 --- a/lib/Kconfig.kfence
109307 +++ b/lib/Kconfig.kfence
109308 @@ -7,6 +7,7 @@ menuconfig KFENCE
109309         bool "KFENCE: low-overhead sampling-based memory safety error detector"
109310         depends on HAVE_ARCH_KFENCE && (SLAB || SLUB)
109311         select STACKTRACE
109312 +       select IRQ_WORK
109313         help
109314           KFENCE is a low-overhead sampling-based detector of heap out-of-bounds
109315           access, use-after-free, and invalid-free errors. KFENCE is designed
109316 diff --git a/lib/bug.c b/lib/bug.c
109317 index 8f9d537bfb2a..b92da1f6e21b 100644
109318 --- a/lib/bug.c
109319 +++ b/lib/bug.c
109320 @@ -155,30 +155,27 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
109322         file = NULL;
109323         line = 0;
109324 -       warning = 0;
109326 -       if (bug) {
109327  #ifdef CONFIG_DEBUG_BUGVERBOSE
109328  #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
109329 -               file = bug->file;
109330 +       file = bug->file;
109331  #else
109332 -               file = (const char *)bug + bug->file_disp;
109333 +       file = (const char *)bug + bug->file_disp;
109334  #endif
109335 -               line = bug->line;
109336 +       line = bug->line;
109337  #endif
109338 -               warning = (bug->flags & BUGFLAG_WARNING) != 0;
109339 -               once = (bug->flags & BUGFLAG_ONCE) != 0;
109340 -               done = (bug->flags & BUGFLAG_DONE) != 0;
109342 -               if (warning && once) {
109343 -                       if (done)
109344 -                               return BUG_TRAP_TYPE_WARN;
109346 -                       /*
109347 -                        * Since this is the only store, concurrency is not an issue.
109348 -                        */
109349 -                       bug->flags |= BUGFLAG_DONE;
109350 -               }
109351 +       warning = (bug->flags & BUGFLAG_WARNING) != 0;
109352 +       once = (bug->flags & BUGFLAG_ONCE) != 0;
109353 +       done = (bug->flags & BUGFLAG_DONE) != 0;
109355 +       if (warning && once) {
109356 +               if (done)
109357 +                       return BUG_TRAP_TYPE_WARN;
109359 +               /*
109360 +                * Since this is the only store, concurrency is not an issue.
109361 +                */
109362 +               bug->flags |= BUGFLAG_DONE;
109363         }
109365         /*
109366 diff --git a/lib/crypto/poly1305-donna32.c b/lib/crypto/poly1305-donna32.c
109367 index 3cc77d94390b..7fb71845cc84 100644
109368 --- a/lib/crypto/poly1305-donna32.c
109369 +++ b/lib/crypto/poly1305-donna32.c
109370 @@ -10,7 +10,8 @@
109371  #include <asm/unaligned.h>
109372  #include <crypto/internal/poly1305.h>
109374 -void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16])
109375 +void poly1305_core_setkey(struct poly1305_core_key *key,
109376 +                         const u8 raw_key[POLY1305_BLOCK_SIZE])
109378         /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
109379         key->key.r[0] = (get_unaligned_le32(&raw_key[0])) & 0x3ffffff;
109380 diff --git a/lib/crypto/poly1305-donna64.c b/lib/crypto/poly1305-donna64.c
109381 index 6ae181bb4345..d34cf4053668 100644
109382 --- a/lib/crypto/poly1305-donna64.c
109383 +++ b/lib/crypto/poly1305-donna64.c
109384 @@ -12,7 +12,8 @@
109386  typedef __uint128_t u128;
109388 -void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16])
109389 +void poly1305_core_setkey(struct poly1305_core_key *key,
109390 +                         const u8 raw_key[POLY1305_BLOCK_SIZE])
109392         u64 t0, t1;
109394 diff --git a/lib/crypto/poly1305.c b/lib/crypto/poly1305.c
109395 index 9d2d14df0fee..26d87fc3823e 100644
109396 --- a/lib/crypto/poly1305.c
109397 +++ b/lib/crypto/poly1305.c
109398 @@ -12,7 +12,8 @@
109399  #include <linux/module.h>
109400  #include <asm/unaligned.h>
109402 -void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key)
109403 +void poly1305_init_generic(struct poly1305_desc_ctx *desc,
109404 +                          const u8 key[POLY1305_KEY_SIZE])
109406         poly1305_core_setkey(&desc->core_r, key);
109407         desc->s[0] = get_unaligned_le32(key + 16);
109408 diff --git a/lib/decompress_unzstd.c b/lib/decompress_unzstd.c
109409 index 790abc472f5b..6e5ecfba0a8d 100644
109410 --- a/lib/decompress_unzstd.c
109411 +++ b/lib/decompress_unzstd.c
109412 @@ -68,11 +68,7 @@
109413  #ifdef STATIC
109414  # define UNZSTD_PREBOOT
109415  # include "xxhash.c"
109416 -# include "zstd/entropy_common.c"
109417 -# include "zstd/fse_decompress.c"
109418 -# include "zstd/huf_decompress.c"
109419 -# include "zstd/zstd_common.c"
109420 -# include "zstd/decompress.c"
109421 +# include "zstd/decompress_sources.h"
109422  #endif
109424  #include <linux/decompress/mm.h>
109425 @@ -91,11 +87,15 @@
109427  static int INIT handle_zstd_error(size_t ret, void (*error)(char *x))
109429 -       const int err = ZSTD_getErrorCode(ret);
109430 +       const zstd_error_code err = zstd_get_error_code(ret);
109432 -       if (!ZSTD_isError(ret))
109433 +       if (!zstd_is_error(ret))
109434                 return 0;
109436 +       /*
109437 +        * zstd_get_error_name() cannot be used because error takes a char *
109438 +        * not a const char *
109439 +        */
109440         switch (err) {
109441         case ZSTD_error_memory_allocation:
109442                 error("ZSTD decompressor ran out of memory");
109443 @@ -124,28 +124,28 @@ static int INIT decompress_single(const u8 *in_buf, long in_len, u8 *out_buf,
109444                                   long out_len, long *in_pos,
109445                                   void (*error)(char *x))
109447 -       const size_t wksp_size = ZSTD_DCtxWorkspaceBound();
109448 +       const size_t wksp_size = zstd_dctx_workspace_bound();
109449         void *wksp = large_malloc(wksp_size);
109450 -       ZSTD_DCtx *dctx = ZSTD_initDCtx(wksp, wksp_size);
109451 +       zstd_dctx *dctx = zstd_init_dctx(wksp, wksp_size);
109452         int err;
109453         size_t ret;
109455         if (dctx == NULL) {
109456 -               error("Out of memory while allocating ZSTD_DCtx");
109457 +               error("Out of memory while allocating zstd_dctx");
109458                 err = -1;
109459                 goto out;
109460         }
109461         /*
109462          * Find out how large the frame actually is, there may be junk at
109463 -        * the end of the frame that ZSTD_decompressDCtx() can't handle.
109464 +        * the end of the frame that zstd_decompress_dctx() can't handle.
109465          */
109466 -       ret = ZSTD_findFrameCompressedSize(in_buf, in_len);
109467 +       ret = zstd_find_frame_compressed_size(in_buf, in_len);
109468         err = handle_zstd_error(ret, error);
109469         if (err)
109470                 goto out;
109471         in_len = (long)ret;
109473 -       ret = ZSTD_decompressDCtx(dctx, out_buf, out_len, in_buf, in_len);
109474 +       ret = zstd_decompress_dctx(dctx, out_buf, out_len, in_buf, in_len);
109475         err = handle_zstd_error(ret, error);
109476         if (err)
109477                 goto out;
109478 @@ -167,14 +167,14 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
109479                          long *in_pos,
109480                          void (*error)(char *x))
109482 -       ZSTD_inBuffer in;
109483 -       ZSTD_outBuffer out;
109484 -       ZSTD_frameParams params;
109485 +       zstd_in_buffer in;
109486 +       zstd_out_buffer out;
109487 +       zstd_frame_header header;
109488         void *in_allocated = NULL;
109489         void *out_allocated = NULL;
109490         void *wksp = NULL;
109491         size_t wksp_size;
109492 -       ZSTD_DStream *dstream;
109493 +       zstd_dstream *dstream;
109494         int err;
109495         size_t ret;
109497 @@ -238,13 +238,13 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
109498         out.size = out_len;
109500         /*
109501 -        * We need to know the window size to allocate the ZSTD_DStream.
109502 +        * We need to know the window size to allocate the zstd_dstream.
109503          * Since we are streaming, we need to allocate a buffer for the sliding
109504          * window. The window size varies from 1 KB to ZSTD_WINDOWSIZE_MAX
109505          * (8 MB), so it is important to use the actual value so as not to
109506          * waste memory when it is smaller.
109507          */
109508 -       ret = ZSTD_getFrameParams(&params, in.src, in.size);
109509 +       ret = zstd_get_frame_header(&header, in.src, in.size);
109510         err = handle_zstd_error(ret, error);
109511         if (err)
109512                 goto out;
109513 @@ -253,19 +253,19 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
109514                 err = -1;
109515                 goto out;
109516         }
109517 -       if (params.windowSize > ZSTD_WINDOWSIZE_MAX) {
109518 +       if (header.windowSize > ZSTD_WINDOWSIZE_MAX) {
109519                 error("ZSTD-compressed data has too large a window size");
109520                 err = -1;
109521                 goto out;
109522         }
109524         /*
109525 -        * Allocate the ZSTD_DStream now that we know how much memory is
109526 +        * Allocate the zstd_dstream now that we know how much memory is
109527          * required.
109528          */
109529 -       wksp_size = ZSTD_DStreamWorkspaceBound(params.windowSize);
109530 +       wksp_size = zstd_dstream_workspace_bound(header.windowSize);
109531         wksp = large_malloc(wksp_size);
109532 -       dstream = ZSTD_initDStream(params.windowSize, wksp, wksp_size);
109533 +       dstream = zstd_init_dstream(header.windowSize, wksp, wksp_size);
109534         if (dstream == NULL) {
109535                 error("Out of memory while allocating ZSTD_DStream");
109536                 err = -1;
109537 @@ -298,7 +298,7 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
109538                         in.size = in_len;
109539                 }
109540                 /* Returns zero when the frame is complete. */
109541 -               ret = ZSTD_decompressStream(dstream, &out, &in);
109542 +               ret = zstd_decompress_stream(dstream, &out, &in);
109543                 err = handle_zstd_error(ret, error);
109544                 if (err)
109545                         goto out;
109546 diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
109547 index c70d6347afa2..921d0a654243 100644
109548 --- a/lib/dynamic_debug.c
109549 +++ b/lib/dynamic_debug.c
109550 @@ -396,7 +396,7 @@ static int ddebug_parse_query(char *words[], int nwords,
109551                         /* tail :$info is function or line-range */
109552                         fline = strchr(query->filename, ':');
109553                         if (!fline)
109554 -                               break;
109555 +                               continue;
109556                         *fline++ = '\0';
109557                         if (isalpha(*fline) || *fline == '*' || *fline == '?') {
109558                                 /* take as function name */
109559 diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
109560 index 7998affa45d4..c87d5b6a8a55 100644
109561 --- a/lib/kobject_uevent.c
109562 +++ b/lib/kobject_uevent.c
109563 @@ -251,12 +251,13 @@ static int kobj_usermode_filter(struct kobject *kobj)
109565  static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem)
109567 +       int buffer_size = sizeof(env->buf) - env->buflen;
109568         int len;
109570 -       len = strlcpy(&env->buf[env->buflen], subsystem,
109571 -                     sizeof(env->buf) - env->buflen);
109572 -       if (len >= (sizeof(env->buf) - env->buflen)) {
109573 -               WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n");
109574 +       len = strlcpy(&env->buf[env->buflen], subsystem, buffer_size);
109575 +       if (len >= buffer_size) {
109576 +               pr_warn("init_uevent_argv: buffer size of %d too small, needed %d\n",
109577 +                       buffer_size, len);
109578                 return -ENOMEM;
109579         }
109581 diff --git a/lib/nlattr.c b/lib/nlattr.c
109582 index 5b6116e81f9f..1d051ef66afe 100644
109583 --- a/lib/nlattr.c
109584 +++ b/lib/nlattr.c
109585 @@ -828,7 +828,7 @@ int nla_strcmp(const struct nlattr *nla, const char *str)
109586         int attrlen = nla_len(nla);
109587         int d;
109589 -       if (attrlen > 0 && buf[attrlen - 1] == '\0')
109590 +       while (attrlen > 0 && buf[attrlen - 1] == '\0')
109591                 attrlen--;
109593         d = attrlen - len;
109594 diff --git a/lib/stackdepot.c b/lib/stackdepot.c
109595 index 49f67a0c6e5d..df9179f4f441 100644
109596 --- a/lib/stackdepot.c
109597 +++ b/lib/stackdepot.c
109598 @@ -71,7 +71,7 @@ static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
109599  static int depot_index;
109600  static int next_slab_inited;
109601  static size_t depot_offset;
109602 -static DEFINE_SPINLOCK(depot_lock);
109603 +static DEFINE_RAW_SPINLOCK(depot_lock);
109605  static bool init_stack_slab(void **prealloc)
109607 @@ -305,7 +305,7 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
109608                         prealloc = page_address(page);
109609         }
109611 -       spin_lock_irqsave(&depot_lock, flags);
109612 +       raw_spin_lock_irqsave(&depot_lock, flags);
109614         found = find_stack(*bucket, entries, nr_entries, hash);
109615         if (!found) {
109616 @@ -329,7 +329,7 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
109617                 WARN_ON(!init_stack_slab(&prealloc));
109618         }
109620 -       spin_unlock_irqrestore(&depot_lock, flags);
109621 +       raw_spin_unlock_irqrestore(&depot_lock, flags);
109622  exit:
109623         if (prealloc) {
109624                 /* Nobody used this memory, ok to free it. */
109625 diff --git a/lib/test_kasan.c b/lib/test_kasan.c
109626 index e5647d147b35..be69c3aa615a 100644
109627 --- a/lib/test_kasan.c
109628 +++ b/lib/test_kasan.c
109629 @@ -646,8 +646,20 @@ static char global_array[10];
109631  static void kasan_global_oob(struct kunit *test)
109633 -       volatile int i = 3;
109634 -       char *p = &global_array[ARRAY_SIZE(global_array) + i];
109635 +       /*
109636 +        * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
109637 +        * from failing here and panicing the kernel, access the array via a
109638 +        * volatile pointer, which will prevent the compiler from being able to
109639 +        * determine the array bounds.
109640 +        *
109641 +        * This access uses a volatile pointer to char (char *volatile) rather
109642 +        * than the more conventional pointer to volatile char (volatile char *)
109643 +        * because we want to prevent the compiler from making inferences about
109644 +        * the pointer itself (i.e. its array bounds), not the data that it
109645 +        * refers to.
109646 +        */
109647 +       char *volatile array = global_array;
109648 +       char *p = &array[ARRAY_SIZE(global_array) + 3];
109650         /* Only generic mode instruments globals. */
109651         KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
109652 @@ -695,8 +707,9 @@ static void ksize_uaf(struct kunit *test)
109653  static void kasan_stack_oob(struct kunit *test)
109655         char stack_array[10];
109656 -       volatile int i = OOB_TAG_OFF;
109657 -       char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
109658 +       /* See comment in kasan_global_oob. */
109659 +       char *volatile array = stack_array;
109660 +       char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
109662         KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
109664 @@ -707,7 +720,9 @@ static void kasan_alloca_oob_left(struct kunit *test)
109666         volatile int i = 10;
109667         char alloca_array[i];
109668 -       char *p = alloca_array - 1;
109669 +       /* See comment in kasan_global_oob. */
109670 +       char *volatile array = alloca_array;
109671 +       char *p = array - 1;
109673         /* Only generic mode instruments dynamic allocas. */
109674         KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
109675 @@ -720,7 +735,9 @@ static void kasan_alloca_oob_right(struct kunit *test)
109677         volatile int i = 10;
109678         char alloca_array[i];
109679 -       char *p = alloca_array + i;
109680 +       /* See comment in kasan_global_oob. */
109681 +       char *volatile array = alloca_array;
109682 +       char *p = array + i;
109684         /* Only generic mode instruments dynamic allocas. */
109685         KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
109686 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
109687 index 41ddc353ebb8..39ef2e314da5 100644
109688 --- a/lib/vsprintf.c
109689 +++ b/lib/vsprintf.c
109690 @@ -3135,8 +3135,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
109691                         switch (*fmt) {
109692                         case 'S':
109693                         case 's':
109694 -                       case 'F':
109695 -                       case 'f':
109696                         case 'x':
109697                         case 'K':
109698                         case 'e':
109699 diff --git a/lib/zstd/Makefile b/lib/zstd/Makefile
109700 index f5d778e7e5c7..19485e3cc7c9 100644
109701 --- a/lib/zstd/Makefile
109702 +++ b/lib/zstd/Makefile
109703 @@ -1,10 +1,46 @@
109704  # SPDX-License-Identifier: GPL-2.0-only
109705 +# ################################################################
109706 +# Copyright (c) Facebook, Inc.
109707 +# All rights reserved.
109709 +# This source code is licensed under both the BSD-style license (found in the
109710 +# LICENSE file in the root directory of this source tree) and the GPLv2 (found
109711 +# in the COPYING file in the root directory of this source tree).
109712 +# You may select, at your option, one of the above-listed licenses.
109713 +# ################################################################
109714  obj-$(CONFIG_ZSTD_COMPRESS) += zstd_compress.o
109715  obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o
109717  ccflags-y += -O3
109719 -zstd_compress-y := fse_compress.o huf_compress.o compress.o \
109720 -                  entropy_common.o fse_decompress.o zstd_common.o
109721 -zstd_decompress-y := huf_decompress.o decompress.o \
109722 -                    entropy_common.o fse_decompress.o zstd_common.o
109723 +zstd_compress-y := \
109724 +               zstd_compress_module.o \
109725 +               common/debug.o \
109726 +               common/entropy_common.o \
109727 +               common/error_private.o \
109728 +               common/fse_decompress.o \
109729 +               common/zstd_common.o \
109730 +               compress/fse_compress.o \
109731 +               compress/hist.o \
109732 +               compress/huf_compress.o \
109733 +               compress/zstd_compress.o \
109734 +               compress/zstd_compress_literals.o \
109735 +               compress/zstd_compress_sequences.o \
109736 +               compress/zstd_compress_superblock.o \
109737 +               compress/zstd_double_fast.o \
109738 +               compress/zstd_fast.o \
109739 +               compress/zstd_lazy.o \
109740 +               compress/zstd_ldm.o \
109741 +               compress/zstd_opt.o \
109743 +zstd_decompress-y := \
109744 +               zstd_decompress_module.o \
109745 +               common/debug.o \
109746 +               common/entropy_common.o \
109747 +               common/error_private.o \
109748 +               common/fse_decompress.o \
109749 +               common/zstd_common.o \
109750 +               decompress/huf_decompress.o \
109751 +               decompress/zstd_ddict.o \
109752 +               decompress/zstd_decompress.o \
109753 +               decompress/zstd_decompress_block.o \
109754 diff --git a/lib/zstd/bitstream.h b/lib/zstd/bitstream.h
109755 deleted file mode 100644
109756 index 5d6343c1a909..000000000000
109757 --- a/lib/zstd/bitstream.h
109758 +++ /dev/null
109759 @@ -1,380 +0,0 @@
109761 - * bitstream
109762 - * Part of FSE library
109763 - * header file (to include)
109764 - * Copyright (C) 2013-2016, Yann Collet.
109766 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
109768 - * Redistribution and use in source and binary forms, with or without
109769 - * modification, are permitted provided that the following conditions are
109770 - * met:
109772 - *   * Redistributions of source code must retain the above copyright
109773 - * notice, this list of conditions and the following disclaimer.
109774 - *   * Redistributions in binary form must reproduce the above
109775 - * copyright notice, this list of conditions and the following disclaimer
109776 - * in the documentation and/or other materials provided with the
109777 - * distribution.
109779 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
109780 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
109781 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
109782 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
109783 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
109784 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
109785 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
109786 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
109787 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
109788 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
109789 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
109791 - * This program is free software; you can redistribute it and/or modify it under
109792 - * the terms of the GNU General Public License version 2 as published by the
109793 - * Free Software Foundation. This program is dual-licensed; you may select
109794 - * either version 2 of the GNU General Public License ("GPL") or BSD license
109795 - * ("BSD").
109797 - * You can contact the author at :
109798 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
109799 - */
109800 -#ifndef BITSTREAM_H_MODULE
109801 -#define BITSTREAM_H_MODULE
109804 -*  This API consists of small unitary functions, which must be inlined for best performance.
109805 -*  Since link-time-optimization is not available for all compilers,
109806 -*  these functions are defined into a .h to be included.
109809 -/*-****************************************
109810 -*  Dependencies
109811 -******************************************/
109812 -#include "error_private.h" /* error codes and messages */
109813 -#include "mem.h"          /* unaligned access routines */
109815 -/*=========================================
109816 -*  Target specific
109817 -=========================================*/
109818 -#define STREAM_ACCUMULATOR_MIN_32 25
109819 -#define STREAM_ACCUMULATOR_MIN_64 57
109820 -#define STREAM_ACCUMULATOR_MIN ((U32)(ZSTD_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
109822 -/*-******************************************
109823 -*  bitStream encoding API (write forward)
109824 -********************************************/
109825 -/* bitStream can mix input from multiple sources.
109826 -*  A critical property of these streams is that they encode and decode in **reverse** direction.
109827 -*  So the first bit sequence you add will be the last to be read, like a LIFO stack.
109829 -typedef struct {
109830 -       size_t bitContainer;
109831 -       int bitPos;
109832 -       char *startPtr;
109833 -       char *ptr;
109834 -       char *endPtr;
109835 -} BIT_CStream_t;
109837 -ZSTD_STATIC size_t BIT_initCStream(BIT_CStream_t *bitC, void *dstBuffer, size_t dstCapacity);
109838 -ZSTD_STATIC void BIT_addBits(BIT_CStream_t *bitC, size_t value, unsigned nbBits);
109839 -ZSTD_STATIC void BIT_flushBits(BIT_CStream_t *bitC);
109840 -ZSTD_STATIC size_t BIT_closeCStream(BIT_CStream_t *bitC);
109842 -/* Start with initCStream, providing the size of buffer to write into.
109843 -*  bitStream will never write outside of this buffer.
109844 -*  `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
109846 -*  bits are first added to a local register.
109847 -*  Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
109848 -*  Writing data into memory is an explicit operation, performed by the flushBits function.
109849 -*  Hence keep track how many bits are potentially stored into local register to avoid register overflow.
109850 -*  After a flushBits, a maximum of 7 bits might still be stored into local register.
109852 -*  Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.
109854 -*  Last operation is to close the bitStream.
109855 -*  The function returns the final size of CStream in bytes.
109856 -*  If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)
109859 -/*-********************************************
109860 -*  bitStream decoding API (read backward)
109861 -**********************************************/
109862 -typedef struct {
109863 -       size_t bitContainer;
109864 -       unsigned bitsConsumed;
109865 -       const char *ptr;
109866 -       const char *start;
109867 -} BIT_DStream_t;
109869 -typedef enum {
109870 -       BIT_DStream_unfinished = 0,
109871 -       BIT_DStream_endOfBuffer = 1,
109872 -       BIT_DStream_completed = 2,
109873 -       BIT_DStream_overflow = 3
109874 -} BIT_DStream_status; /* result of BIT_reloadDStream() */
109875 -/* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
109877 -ZSTD_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, size_t srcSize);
109878 -ZSTD_STATIC size_t BIT_readBits(BIT_DStream_t *bitD, unsigned nbBits);
109879 -ZSTD_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t *bitD);
109880 -ZSTD_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t *bitD);
109882 -/* Start by invoking BIT_initDStream().
109883 -*  A chunk of the bitStream is then stored into a local register.
109884 -*  Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
109885 -*  You can then retrieve bitFields stored into the local register, **in reverse order**.
109886 -*  Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
109887 -*  A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
109888 -*  Otherwise, it can be less than that, so proceed accordingly.
109889 -*  Checking if DStream has reached its end can be performed with BIT_endOfDStream().
109892 -/*-****************************************
109893 -*  unsafe API
109894 -******************************************/
109895 -ZSTD_STATIC void BIT_addBitsFast(BIT_CStream_t *bitC, size_t value, unsigned nbBits);
109896 -/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
109898 -ZSTD_STATIC void BIT_flushBitsFast(BIT_CStream_t *bitC);
109899 -/* unsafe version; does not check buffer overflow */
109901 -ZSTD_STATIC size_t BIT_readBitsFast(BIT_DStream_t *bitD, unsigned nbBits);
109902 -/* faster, but works only if nbBits >= 1 */
109904 -/*-**************************************************************
109905 -*  Internal functions
109906 -****************************************************************/
109907 -ZSTD_STATIC unsigned BIT_highbit32(register U32 val) { return 31 - __builtin_clz(val); }
109909 -/*=====    Local Constants   =====*/
109910 -static const unsigned BIT_mask[] = {0,       1,       3,       7,      0xF,      0x1F,     0x3F,     0x7F,      0xFF,
109911 -                                   0x1FF,   0x3FF,   0x7FF,   0xFFF,    0x1FFF,   0x3FFF,   0x7FFF,   0xFFFF,    0x1FFFF,
109912 -                                   0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF}; /* up to 26 bits */
109914 -/*-**************************************************************
109915 -*  bitStream encoding
109916 -****************************************************************/
109917 -/*! BIT_initCStream() :
109918 - *  `dstCapacity` must be > sizeof(void*)
109919 - *  @return : 0 if success,
109920 -                         otherwise an error code (can be tested using ERR_isError() ) */
109921 -ZSTD_STATIC size_t BIT_initCStream(BIT_CStream_t *bitC, void *startPtr, size_t dstCapacity)
109923 -       bitC->bitContainer = 0;
109924 -       bitC->bitPos = 0;
109925 -       bitC->startPtr = (char *)startPtr;
109926 -       bitC->ptr = bitC->startPtr;
109927 -       bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->ptr);
109928 -       if (dstCapacity <= sizeof(bitC->ptr))
109929 -               return ERROR(dstSize_tooSmall);
109930 -       return 0;
109933 -/*! BIT_addBits() :
109934 -       can add up to 26 bits into `bitC`.
109935 -       Does not check for register overflow ! */
109936 -ZSTD_STATIC void BIT_addBits(BIT_CStream_t *bitC, size_t value, unsigned nbBits)
109938 -       bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
109939 -       bitC->bitPos += nbBits;
109942 -/*! BIT_addBitsFast() :
109943 - *  works only if `value` is _clean_, meaning all high bits above nbBits are 0 */
109944 -ZSTD_STATIC void BIT_addBitsFast(BIT_CStream_t *bitC, size_t value, unsigned nbBits)
109946 -       bitC->bitContainer |= value << bitC->bitPos;
109947 -       bitC->bitPos += nbBits;
109950 -/*! BIT_flushBitsFast() :
109951 - *  unsafe version; does not check buffer overflow */
109952 -ZSTD_STATIC void BIT_flushBitsFast(BIT_CStream_t *bitC)
109954 -       size_t const nbBytes = bitC->bitPos >> 3;
109955 -       ZSTD_writeLEST(bitC->ptr, bitC->bitContainer);
109956 -       bitC->ptr += nbBytes;
109957 -       bitC->bitPos &= 7;
109958 -       bitC->bitContainer >>= nbBytes * 8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */
109961 -/*! BIT_flushBits() :
109962 - *  safe version; check for buffer overflow, and prevents it.
109963 - *  note : does not signal buffer overflow. This will be revealed later on using BIT_closeCStream() */
109964 -ZSTD_STATIC void BIT_flushBits(BIT_CStream_t *bitC)
109966 -       size_t const nbBytes = bitC->bitPos >> 3;
109967 -       ZSTD_writeLEST(bitC->ptr, bitC->bitContainer);
109968 -       bitC->ptr += nbBytes;
109969 -       if (bitC->ptr > bitC->endPtr)
109970 -               bitC->ptr = bitC->endPtr;
109971 -       bitC->bitPos &= 7;
109972 -       bitC->bitContainer >>= nbBytes * 8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */
109975 -/*! BIT_closeCStream() :
109976 - *  @return : size of CStream, in bytes,
109977 -                         or 0 if it could not fit into dstBuffer */
109978 -ZSTD_STATIC size_t BIT_closeCStream(BIT_CStream_t *bitC)
109980 -       BIT_addBitsFast(bitC, 1, 1); /* endMark */
109981 -       BIT_flushBits(bitC);
109983 -       if (bitC->ptr >= bitC->endPtr)
109984 -               return 0; /* doesn't fit within authorized budget : cancel */
109986 -       return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
109989 -/*-********************************************************
109990 -* bitStream decoding
109991 -**********************************************************/
109992 -/*! BIT_initDStream() :
109993 -*   Initialize a BIT_DStream_t.
109994 -*   `bitD` : a pointer to an already allocated BIT_DStream_t structure.
109995 -*   `srcSize` must be the *exact* size of the bitStream, in bytes.
109996 -*   @return : size of stream (== srcSize) or an errorCode if a problem is detected
109998 -ZSTD_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, size_t srcSize)
110000 -       if (srcSize < 1) {
110001 -               memset(bitD, 0, sizeof(*bitD));
110002 -               return ERROR(srcSize_wrong);
110003 -       }
110005 -       if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */
110006 -               bitD->start = (const char *)srcBuffer;
110007 -               bitD->ptr = (const char *)srcBuffer + srcSize - sizeof(bitD->bitContainer);
110008 -               bitD->bitContainer = ZSTD_readLEST(bitD->ptr);
110009 -               {
110010 -                       BYTE const lastByte = ((const BYTE *)srcBuffer)[srcSize - 1];
110011 -                       bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */
110012 -                       if (lastByte == 0)
110013 -                               return ERROR(GENERIC); /* endMark not present */
110014 -               }
110015 -       } else {
110016 -               bitD->start = (const char *)srcBuffer;
110017 -               bitD->ptr = bitD->start;
110018 -               bitD->bitContainer = *(const BYTE *)(bitD->start);
110019 -               switch (srcSize) {
110020 -               case 7: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[6]) << (sizeof(bitD->bitContainer) * 8 - 16);
110021 -                       fallthrough;
110022 -               case 6: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[5]) << (sizeof(bitD->bitContainer) * 8 - 24);
110023 -                       fallthrough;
110024 -               case 5: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[4]) << (sizeof(bitD->bitContainer) * 8 - 32);
110025 -                       fallthrough;
110026 -               case 4: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[3]) << 24;
110027 -                       fallthrough;
110028 -               case 3: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[2]) << 16;
110029 -                       fallthrough;
110030 -               case 2: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[1]) << 8;
110031 -                       fallthrough;
110032 -               default:;
110033 -               }
110034 -               {
110035 -                       BYTE const lastByte = ((const BYTE *)srcBuffer)[srcSize - 1];
110036 -                       bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
110037 -                       if (lastByte == 0)
110038 -                               return ERROR(GENERIC); /* endMark not present */
110039 -               }
110040 -               bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize) * 8;
110041 -       }
110043 -       return srcSize;
110046 -ZSTD_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start) { return bitContainer >> start; }
110048 -ZSTD_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits) { return (bitContainer >> start) & BIT_mask[nbBits]; }
110050 -ZSTD_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) { return bitContainer & BIT_mask[nbBits]; }
110052 -/*! BIT_lookBits() :
110053 - *  Provides next n bits from local register.
110054 - *  local register is not modified.
110055 - *  On 32-bits, maxNbBits==24.
110056 - *  On 64-bits, maxNbBits==56.
110057 - *  @return : value extracted
110058 - */
110059 -ZSTD_STATIC size_t BIT_lookBits(const BIT_DStream_t *bitD, U32 nbBits)
110061 -       U32 const bitMask = sizeof(bitD->bitContainer) * 8 - 1;
110062 -       return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask - nbBits) & bitMask);
110065 -/*! BIT_lookBitsFast() :
110066 -*   unsafe version; only works only if nbBits >= 1 */
110067 -ZSTD_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t *bitD, U32 nbBits)
110069 -       U32 const bitMask = sizeof(bitD->bitContainer) * 8 - 1;
110070 -       return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask + 1) - nbBits) & bitMask);
110073 -ZSTD_STATIC void BIT_skipBits(BIT_DStream_t *bitD, U32 nbBits) { bitD->bitsConsumed += nbBits; }
110075 -/*! BIT_readBits() :
110076 - *  Read (consume) next n bits from local register and update.
110077 - *  Pay attention to not read more than nbBits contained into local register.
110078 - *  @return : extracted value.
110079 - */
110080 -ZSTD_STATIC size_t BIT_readBits(BIT_DStream_t *bitD, U32 nbBits)
110082 -       size_t const value = BIT_lookBits(bitD, nbBits);
110083 -       BIT_skipBits(bitD, nbBits);
110084 -       return value;
110087 -/*! BIT_readBitsFast() :
110088 -*   unsafe version; only works only if nbBits >= 1 */
110089 -ZSTD_STATIC size_t BIT_readBitsFast(BIT_DStream_t *bitD, U32 nbBits)
110091 -       size_t const value = BIT_lookBitsFast(bitD, nbBits);
110092 -       BIT_skipBits(bitD, nbBits);
110093 -       return value;
110096 -/*! BIT_reloadDStream() :
110097 -*   Refill `bitD` from buffer previously set in BIT_initDStream() .
110098 -*   This function is safe, it guarantees it will not read beyond src buffer.
110099 -*   @return : status of `BIT_DStream_t` internal register.
110100 -                         if status == BIT_DStream_unfinished, internal register is filled with >= (sizeof(bitD->bitContainer)*8 - 7) bits */
110101 -ZSTD_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t *bitD)
110103 -       if (bitD->bitsConsumed > (sizeof(bitD->bitContainer) * 8)) /* should not happen => corruption detected */
110104 -               return BIT_DStream_overflow;
110106 -       if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
110107 -               bitD->ptr -= bitD->bitsConsumed >> 3;
110108 -               bitD->bitsConsumed &= 7;
110109 -               bitD->bitContainer = ZSTD_readLEST(bitD->ptr);
110110 -               return BIT_DStream_unfinished;
110111 -       }
110112 -       if (bitD->ptr == bitD->start) {
110113 -               if (bitD->bitsConsumed < sizeof(bitD->bitContainer) * 8)
110114 -                       return BIT_DStream_endOfBuffer;
110115 -               return BIT_DStream_completed;
110116 -       }
110117 -       {
110118 -               U32 nbBytes = bitD->bitsConsumed >> 3;
110119 -               BIT_DStream_status result = BIT_DStream_unfinished;
110120 -               if (bitD->ptr - nbBytes < bitD->start) {
110121 -                       nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
110122 -                       result = BIT_DStream_endOfBuffer;
110123 -               }
110124 -               bitD->ptr -= nbBytes;
110125 -               bitD->bitsConsumed -= nbBytes * 8;
110126 -               bitD->bitContainer = ZSTD_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
110127 -               return result;
110128 -       }
110131 -/*! BIT_endOfDStream() :
110132 -*   @return Tells if DStream has exactly reached its end (all bits consumed).
110134 -ZSTD_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t *DStream)
110136 -       return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer) * 8));
110139 -#endif /* BITSTREAM_H_MODULE */
110140 diff --git a/lib/zstd/common/bitstream.h b/lib/zstd/common/bitstream.h
110141 new file mode 100644
110142 index 000000000000..2d6c95b4f40c
110143 --- /dev/null
110144 +++ b/lib/zstd/common/bitstream.h
110145 @@ -0,0 +1,437 @@
110146 +/* ******************************************************************
110147 + * bitstream
110148 + * Part of FSE library
110149 + * Copyright (c) Yann Collet, Facebook, Inc.
110151 + * You can contact the author at :
110152 + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
110154 + * This source code is licensed under both the BSD-style license (found in the
110155 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
110156 + * in the COPYING file in the root directory of this source tree).
110157 + * You may select, at your option, one of the above-listed licenses.
110158 +****************************************************************** */
110159 +#ifndef BITSTREAM_H_MODULE
110160 +#define BITSTREAM_H_MODULE
110163 +*  This API consists of small unitary functions, which must be inlined for best performance.
110164 +*  Since link-time-optimization is not available for all compilers,
110165 +*  these functions are defined into a .h to be included.
110168 +/*-****************************************
110169 +*  Dependencies
110170 +******************************************/
110171 +#include "mem.h"            /* unaligned access routines */
110172 +#include "compiler.h"       /* UNLIKELY() */
110173 +#include "debug.h"          /* assert(), DEBUGLOG(), RAWLOG() */
110174 +#include "error_private.h"  /* error codes and messages */
110177 +/*=========================================
110178 +*  Target specific
110179 +=========================================*/
110181 +#define STREAM_ACCUMULATOR_MIN_32  25
110182 +#define STREAM_ACCUMULATOR_MIN_64  57
110183 +#define STREAM_ACCUMULATOR_MIN    ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
110186 +/*-******************************************
110187 +*  bitStream encoding API (write forward)
110188 +********************************************/
110189 +/* bitStream can mix input from multiple sources.
110190 + * A critical property of these streams is that they encode and decode in **reverse** direction.
110191 + * So the first bit sequence you add will be the last to be read, like a LIFO stack.
110192 + */
110193 +typedef struct {
110194 +    size_t bitContainer;
110195 +    unsigned bitPos;
110196 +    char*  startPtr;
110197 +    char*  ptr;
110198 +    char*  endPtr;
110199 +} BIT_CStream_t;
110201 +MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity);
110202 +MEM_STATIC void   BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
110203 +MEM_STATIC void   BIT_flushBits(BIT_CStream_t* bitC);
110204 +MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
110206 +/* Start with initCStream, providing the size of buffer to write into.
110207 +*  bitStream will never write outside of this buffer.
110208 +*  `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
110210 +*  bits are first added to a local register.
110211 +*  Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
110212 +*  Writing data into memory is an explicit operation, performed by the flushBits function.
110213 +*  Hence keep track how many bits are potentially stored into local register to avoid register overflow.
110214 +*  After a flushBits, a maximum of 7 bits might still be stored into local register.
110216 +*  Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.
110218 +*  Last operation is to close the bitStream.
110219 +*  The function returns the final size of CStream in bytes.
110220 +*  If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)
110224 +/*-********************************************
110225 +*  bitStream decoding API (read backward)
110226 +**********************************************/
110227 +typedef struct {
110228 +    size_t   bitContainer;
110229 +    unsigned bitsConsumed;
110230 +    const char* ptr;
110231 +    const char* start;
110232 +    const char* limitPtr;
110233 +} BIT_DStream_t;
110235 +typedef enum { BIT_DStream_unfinished = 0,
110236 +               BIT_DStream_endOfBuffer = 1,
110237 +               BIT_DStream_completed = 2,
110238 +               BIT_DStream_overflow = 3 } BIT_DStream_status;  /* result of BIT_reloadDStream() */
110239 +               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
110241 +MEM_STATIC size_t   BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
110242 +MEM_STATIC size_t   BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
110243 +MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
110244 +MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
110247 +/* Start by invoking BIT_initDStream().
110248 +*  A chunk of the bitStream is then stored into a local register.
110249 +*  Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
110250 +*  You can then retrieve bitFields stored into the local register, **in reverse order**.
110251 +*  Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
110252 +*  A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
110253 +*  Otherwise, it can be less than that, so proceed accordingly.
110254 +*  Checking if DStream has reached its end can be performed with BIT_endOfDStream().
110258 +/*-****************************************
110259 +*  unsafe API
110260 +******************************************/
110261 +MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
110262 +/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
110264 +MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC);
110265 +/* unsafe version; does not check buffer overflow */
110267 +MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
110268 +/* faster, but works only if nbBits >= 1 */
110272 +/*-**************************************************************
110273 +*  Internal functions
110274 +****************************************************************/
110275 +MEM_STATIC unsigned BIT_highbit32 (U32 val)
110277 +    assert(val != 0);
110278 +    {
110279 +#   if (__GNUC__ >= 3)   /* Use GCC Intrinsic */
110280 +        return __builtin_clz (val) ^ 31;
110281 +#   else   /* Software version */
110282 +        static const unsigned DeBruijnClz[32] = { 0,  9,  1, 10, 13, 21,  2, 29,
110283 +                                                 11, 14, 16, 18, 22, 25,  3, 30,
110284 +                                                  8, 12, 20, 28, 15, 17, 24,  7,
110285 +                                                 19, 27, 23,  6, 26,  5,  4, 31 };
110286 +        U32 v = val;
110287 +        v |= v >> 1;
110288 +        v |= v >> 2;
110289 +        v |= v >> 4;
110290 +        v |= v >> 8;
110291 +        v |= v >> 16;
110292 +        return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
110293 +#   endif
110294 +    }
110297 +/*=====    Local Constants   =====*/
110298 +static const unsigned BIT_mask[] = {
110299 +    0,          1,         3,         7,         0xF,       0x1F,
110300 +    0x3F,       0x7F,      0xFF,      0x1FF,     0x3FF,     0x7FF,
110301 +    0xFFF,      0x1FFF,    0x3FFF,    0x7FFF,    0xFFFF,    0x1FFFF,
110302 +    0x3FFFF,    0x7FFFF,   0xFFFFF,   0x1FFFFF,  0x3FFFFF,  0x7FFFFF,
110303 +    0xFFFFFF,   0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF,
110304 +    0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */
110305 +#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0]))
110307 +/*-**************************************************************
110308 +*  bitStream encoding
110309 +****************************************************************/
110310 +/*! BIT_initCStream() :
110311 + *  `dstCapacity` must be > sizeof(size_t)
110312 + *  @return : 0 if success,
110313 + *            otherwise an error code (can be tested using ERR_isError()) */
110314 +MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
110315 +                                  void* startPtr, size_t dstCapacity)
110317 +    bitC->bitContainer = 0;
110318 +    bitC->bitPos = 0;
110319 +    bitC->startPtr = (char*)startPtr;
110320 +    bitC->ptr = bitC->startPtr;
110321 +    bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer);
110322 +    if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall);
110323 +    return 0;
110326 +/*! BIT_addBits() :
110327 + *  can add up to 31 bits into `bitC`.
110328 + *  Note : does not check for register overflow ! */
110329 +MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
110330 +                            size_t value, unsigned nbBits)
110332 +    DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32);
110333 +    assert(nbBits < BIT_MASK_SIZE);
110334 +    assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
110335 +    bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
110336 +    bitC->bitPos += nbBits;
110339 +/*! BIT_addBitsFast() :
110340 + *  works only if `value` is _clean_,
110341 + *  meaning all high bits above nbBits are 0 */
110342 +MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
110343 +                                size_t value, unsigned nbBits)
110345 +    assert((value>>nbBits) == 0);
110346 +    assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
110347 +    bitC->bitContainer |= value << bitC->bitPos;
110348 +    bitC->bitPos += nbBits;
110351 +/*! BIT_flushBitsFast() :
110352 + *  assumption : bitContainer has not overflowed
110353 + *  unsafe version; does not check buffer overflow */
110354 +MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
110356 +    size_t const nbBytes = bitC->bitPos >> 3;
110357 +    assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
110358 +    assert(bitC->ptr <= bitC->endPtr);
110359 +    MEM_writeLEST(bitC->ptr, bitC->bitContainer);
110360 +    bitC->ptr += nbBytes;
110361 +    bitC->bitPos &= 7;
110362 +    bitC->bitContainer >>= nbBytes*8;
110365 +/*! BIT_flushBits() :
110366 + *  assumption : bitContainer has not overflowed
110367 + *  safe version; check for buffer overflow, and prevents it.
110368 + *  note : does not signal buffer overflow.
110369 + *  overflow will be revealed later on using BIT_closeCStream() */
110370 +MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
110372 +    size_t const nbBytes = bitC->bitPos >> 3;
110373 +    assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
110374 +    assert(bitC->ptr <= bitC->endPtr);
110375 +    MEM_writeLEST(bitC->ptr, bitC->bitContainer);
110376 +    bitC->ptr += nbBytes;
110377 +    if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
110378 +    bitC->bitPos &= 7;
110379 +    bitC->bitContainer >>= nbBytes*8;
110382 +/*! BIT_closeCStream() :
110383 + *  @return : size of CStream, in bytes,
110384 + *            or 0 if it could not fit into dstBuffer */
110385 +MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
110387 +    BIT_addBitsFast(bitC, 1, 1);   /* endMark */
110388 +    BIT_flushBits(bitC);
110389 +    if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
110390 +    return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
110394 +/*-********************************************************
110395 +*  bitStream decoding
110396 +**********************************************************/
110397 +/*! BIT_initDStream() :
110398 + *  Initialize a BIT_DStream_t.
110399 + * `bitD` : a pointer to an already allocated BIT_DStream_t structure.
110400 + * `srcSize` must be the *exact* size of the bitStream, in bytes.
110401 + * @return : size of stream (== srcSize), or an errorCode if a problem is detected
110402 + */
110403 +MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
110405 +    if (srcSize < 1) { ZSTD_memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
110407 +    bitD->start = (const char*)srcBuffer;
110408 +    bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer);
110410 +    if (srcSize >=  sizeof(bitD->bitContainer)) {  /* normal case */
110411 +        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
110412 +        bitD->bitContainer = MEM_readLEST(bitD->ptr);
110413 +        { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
110414 +          bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;  /* ensures bitsConsumed is always set */
110415 +          if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
110416 +    } else {
110417 +        bitD->ptr   = bitD->start;
110418 +        bitD->bitContainer = *(const BYTE*)(bitD->start);
110419 +        switch(srcSize)
110420 +        {
110421 +        case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
110422 +                /* fall-through */
110424 +        case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
110425 +                /* fall-through */
110427 +        case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
110428 +                /* fall-through */
110430 +        case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
110431 +                /* fall-through */
110433 +        case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
110434 +                /* fall-through */
110436 +        case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) <<  8;
110437 +                /* fall-through */
110439 +        default: break;
110440 +        }
110441 +        {   BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
110442 +            bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
110443 +            if (lastByte == 0) return ERROR(corruption_detected);  /* endMark not present */
110444 +        }
110445 +        bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
110446 +    }
110448 +    return srcSize;
110451 +MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
110453 +    return bitContainer >> start;
110456 +MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
110458 +    U32 const regMask = sizeof(bitContainer)*8 - 1;
110459 +    /* if start > regMask, bitstream is corrupted, and result is undefined */
110460 +    assert(nbBits < BIT_MASK_SIZE);
110461 +    return (bitContainer >> (start & regMask)) & BIT_mask[nbBits];
110464 +MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
110466 +    assert(nbBits < BIT_MASK_SIZE);
110467 +    return bitContainer & BIT_mask[nbBits];
110470 +/*! BIT_lookBits() :
110471 + *  Provides next n bits from local register.
110472 + *  local register is not modified.
110473 + *  On 32-bits, maxNbBits==24.
110474 + *  On 64-bits, maxNbBits==56.
110475 + * @return : value extracted */
110476 +MEM_STATIC  FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t*  bitD, U32 nbBits)
110478 +    /* arbitrate between double-shift and shift+mask */
110479 +#if 1
110480 +    /* if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8,
110481 +     * bitstream is likely corrupted, and result is undefined */
110482 +    return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits);
110483 +#else
110484 +    /* this code path is slower on my os-x laptop */
110485 +    U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
110486 +    return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask);
110487 +#endif
110490 +/*! BIT_lookBitsFast() :
110491 + *  unsafe version; only works if nbBits >= 1 */
110492 +MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
110494 +    U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
110495 +    assert(nbBits >= 1);
110496 +    return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);
110499 +MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
110501 +    bitD->bitsConsumed += nbBits;
110504 +/*! BIT_readBits() :
110505 + *  Read (consume) next n bits from local register and update.
110506 + *  Pay attention to not read more than nbBits contained into local register.
110507 + * @return : extracted value. */
110508 +MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
110510 +    size_t const value = BIT_lookBits(bitD, nbBits);
110511 +    BIT_skipBits(bitD, nbBits);
110512 +    return value;
110515 +/*! BIT_readBitsFast() :
110516 + *  unsafe version; only works only if nbBits >= 1 */
110517 +MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
110519 +    size_t const value = BIT_lookBitsFast(bitD, nbBits);
110520 +    assert(nbBits >= 1);
110521 +    BIT_skipBits(bitD, nbBits);
110522 +    return value;
110525 +/*! BIT_reloadDStreamFast() :
110526 + *  Similar to BIT_reloadDStream(), but with two differences:
110527 + *  1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold!
110528 + *  2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this
110529 + *     point you must use BIT_reloadDStream() to reload.
110530 + */
110531 +MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD)
110533 +    if (UNLIKELY(bitD->ptr < bitD->limitPtr))
110534 +        return BIT_DStream_overflow;
110535 +    assert(bitD->bitsConsumed <= sizeof(bitD->bitContainer)*8);
110536 +    bitD->ptr -= bitD->bitsConsumed >> 3;
110537 +    bitD->bitsConsumed &= 7;
110538 +    bitD->bitContainer = MEM_readLEST(bitD->ptr);
110539 +    return BIT_DStream_unfinished;
110542 +/*! BIT_reloadDStream() :
110543 + *  Refill `bitD` from buffer previously set in BIT_initDStream() .
110544 + *  This function is safe, it guarantees it will not read beyond src buffer.
110545 + * @return : status of `BIT_DStream_t` internal register.
110546 + *           when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
110547 +MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
110549 +    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* overflow detected, like end of stream */
110550 +        return BIT_DStream_overflow;
110552 +    if (bitD->ptr >= bitD->limitPtr) {
110553 +        return BIT_reloadDStreamFast(bitD);
110554 +    }
110555 +    if (bitD->ptr == bitD->start) {
110556 +        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
110557 +        return BIT_DStream_completed;
110558 +    }
110559 +    /* start < ptr < limitPtr */
110560 +    {   U32 nbBytes = bitD->bitsConsumed >> 3;
110561 +        BIT_DStream_status result = BIT_DStream_unfinished;
110562 +        if (bitD->ptr - nbBytes < bitD->start) {
110563 +            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */
110564 +            result = BIT_DStream_endOfBuffer;
110565 +        }
110566 +        bitD->ptr -= nbBytes;
110567 +        bitD->bitsConsumed -= nbBytes*8;
110568 +        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */
110569 +        return result;
110570 +    }
110573 +/*! BIT_endOfDStream() :
110574 + * @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
110575 + */
110576 +MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
110578 +    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
110582 +#endif /* BITSTREAM_H_MODULE */
110583 diff --git a/lib/zstd/common/compiler.h b/lib/zstd/common/compiler.h
110584 new file mode 100644
110585 index 000000000000..9269b58a93e2
110586 --- /dev/null
110587 +++ b/lib/zstd/common/compiler.h
110588 @@ -0,0 +1,151 @@
110590 + * Copyright (c) Yann Collet, Facebook, Inc.
110591 + * All rights reserved.
110593 + * This source code is licensed under both the BSD-style license (found in the
110594 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
110595 + * in the COPYING file in the root directory of this source tree).
110596 + * You may select, at your option, one of the above-listed licenses.
110597 + */
110599 +#ifndef ZSTD_COMPILER_H
110600 +#define ZSTD_COMPILER_H
110602 +/*-*******************************************************
110603 +*  Compiler specifics
110604 +*********************************************************/
110605 +/* force inlining */
110607 +#if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
110608 +#  define INLINE_KEYWORD inline
110609 +#else
110610 +#  define INLINE_KEYWORD
110611 +#endif
110613 +#define FORCE_INLINE_ATTR __attribute__((always_inline))
110617 +  On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC).
110618 +  This explictly marks such functions as __cdecl so that the code will still compile
110619 +  if a CC other than __cdecl has been made the default.
110621 +#define WIN_CDECL
110624 + * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
110625 + * parameters. They must be inlined for the compiler to eliminate the constant
110626 + * branches.
110627 + */
110628 +#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
110630 + * HINT_INLINE is used to help the compiler generate better code. It is *not*
110631 + * used for "templates", so it can be tweaked based on the compilers
110632 + * performance.
110634 + * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the
110635 + * always_inline attribute.
110637 + * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline
110638 + * attribute.
110639 + */
110640 +#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
110641 +#  define HINT_INLINE static INLINE_KEYWORD
110642 +#else
110643 +#  define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
110644 +#endif
110646 +/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
110647 +#define UNUSED_ATTR __attribute__((unused))
110649 +/* force no inlining */
110650 +#define FORCE_NOINLINE static __attribute__((__noinline__))
110653 +/* target attribute */
110654 +#ifndef __has_attribute
110655 +  #define __has_attribute(x) 0  /* Compatibility with non-clang compilers. */
110656 +#endif
110657 +#define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
110659 +/* Enable runtime BMI2 dispatch based on the CPU.
110660 + * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
110661 + */
110662 +#ifndef DYNAMIC_BMI2
110663 +  #if ((defined(__clang__) && __has_attribute(__target__)) \
110664 +      || (defined(__GNUC__) \
110665 +          && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
110666 +      && (defined(__x86_64__) || defined(_M_X86)) \
110667 +      && !defined(__BMI2__)
110668 +  #  define DYNAMIC_BMI2 1
110669 +  #else
110670 +  #  define DYNAMIC_BMI2 0
110671 +  #endif
110672 +#endif
110674 +/* prefetch
110675 + * can be disabled, by declaring NO_PREFETCH build macro */
110676 +#if ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
110677 +#  define PREFETCH_L1(ptr)  __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
110678 +#  define PREFETCH_L2(ptr)  __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */)
110679 +#elif defined(__aarch64__)
110680 +#  define PREFETCH_L1(ptr)  __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr)))
110681 +#  define PREFETCH_L2(ptr)  __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr)))
110682 +#else
110683 +#  define PREFETCH_L1(ptr) (void)(ptr)  /* disabled */
110684 +#  define PREFETCH_L2(ptr) (void)(ptr)  /* disabled */
110685 +#endif  /* NO_PREFETCH */
110687 +#define CACHELINE_SIZE 64
110689 +#define PREFETCH_AREA(p, s)  {            \
110690 +    const char* const _ptr = (const char*)(p);  \
110691 +    size_t const _size = (size_t)(s);     \
110692 +    size_t _pos;                          \
110693 +    for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) {  \
110694 +        PREFETCH_L2(_ptr + _pos);         \
110695 +    }                                     \
110698 +/* vectorization
110699 + * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax */
110700 +#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__)
110701 +#  if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5)
110702 +#    define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
110703 +#  else
110704 +#    define DONT_VECTORIZE _Pragma("GCC optimize(\"no-tree-vectorize\")")
110705 +#  endif
110706 +#else
110707 +#  define DONT_VECTORIZE
110708 +#endif
110710 +/* Tell the compiler that a branch is likely or unlikely.
110711 + * Only use these macros if it causes the compiler to generate better code.
110712 + * If you can remove a LIKELY/UNLIKELY annotation without speed changes in gcc
110713 + * and clang, please do.
110714 + */
110715 +#define LIKELY(x) (__builtin_expect((x), 1))
110716 +#define UNLIKELY(x) (__builtin_expect((x), 0))
110718 +/* disable warnings */
110720 +/*Like DYNAMIC_BMI2 but for compile time determination of BMI2 support*/
110723 +/* compat. with non-clang compilers */
110724 +#ifndef __has_builtin
110725 +#  define __has_builtin(x) 0
110726 +#endif
110728 +/* compat. with non-clang compilers */
110729 +#ifndef __has_feature
110730 +#  define __has_feature(x) 0
110731 +#endif
110733 +/* detects whether we are being compiled under msan */
110736 +/* detects whether we are being compiled under asan */
110739 +#endif /* ZSTD_COMPILER_H */
110740 diff --git a/lib/zstd/common/cpu.h b/lib/zstd/common/cpu.h
110741 new file mode 100644
110742 index 000000000000..0202d94076a3
110743 --- /dev/null
110744 +++ b/lib/zstd/common/cpu.h
110745 @@ -0,0 +1,194 @@
110747 + * Copyright (c) Facebook, Inc.
110748 + * All rights reserved.
110750 + * This source code is licensed under both the BSD-style license (found in the
110751 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
110752 + * in the COPYING file in the root directory of this source tree).
110753 + * You may select, at your option, one of the above-listed licenses.
110754 + */
110756 +#ifndef ZSTD_COMMON_CPU_H
110757 +#define ZSTD_COMMON_CPU_H
110760 + * Implementation taken from folly/CpuId.h
110761 + * https://github.com/facebook/folly/blob/master/folly/CpuId.h
110762 + */
110764 +#include "mem.h"
110767 +typedef struct {
110768 +    U32 f1c;
110769 +    U32 f1d;
110770 +    U32 f7b;
110771 +    U32 f7c;
110772 +} ZSTD_cpuid_t;
110774 +MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) {
110775 +    U32 f1c = 0;
110776 +    U32 f1d = 0;
110777 +    U32 f7b = 0;
110778 +    U32 f7c = 0;
110779 +#if defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__)
110780 +    /* The following block like the normal cpuid branch below, but gcc
110781 +     * reserves ebx for use of its pic register so we must specially
110782 +     * handle the save and restore to avoid clobbering the register
110783 +     */
110784 +    U32 n;
110785 +    __asm__(
110786 +        "pushl %%ebx\n\t"
110787 +        "cpuid\n\t"
110788 +        "popl %%ebx\n\t"
110789 +        : "=a"(n)
110790 +        : "a"(0)
110791 +        : "ecx", "edx");
110792 +    if (n >= 1) {
110793 +      U32 f1a;
110794 +      __asm__(
110795 +          "pushl %%ebx\n\t"
110796 +          "cpuid\n\t"
110797 +          "popl %%ebx\n\t"
110798 +          : "=a"(f1a), "=c"(f1c), "=d"(f1d)
110799 +          : "a"(1));
110800 +    }
110801 +    if (n >= 7) {
110802 +      __asm__(
110803 +          "pushl %%ebx\n\t"
110804 +          "cpuid\n\t"
110805 +          "movl %%ebx, %%eax\n\t"
110806 +          "popl %%ebx"
110807 +          : "=a"(f7b), "=c"(f7c)
110808 +          : "a"(7), "c"(0)
110809 +          : "edx");
110810 +    }
110811 +#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__)
110812 +    U32 n;
110813 +    __asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx");
110814 +    if (n >= 1) {
110815 +      U32 f1a;
110816 +      __asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx");
110817 +    }
110818 +    if (n >= 7) {
110819 +      U32 f7a;
110820 +      __asm__("cpuid"
110821 +              : "=a"(f7a), "=b"(f7b), "=c"(f7c)
110822 +              : "a"(7), "c"(0)
110823 +              : "edx");
110824 +    }
110825 +#endif
110826 +    {
110827 +        ZSTD_cpuid_t cpuid;
110828 +        cpuid.f1c = f1c;
110829 +        cpuid.f1d = f1d;
110830 +        cpuid.f7b = f7b;
110831 +        cpuid.f7c = f7c;
110832 +        return cpuid;
110833 +    }
110836 +#define X(name, r, bit)                                                        \
110837 +  MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) {                 \
110838 +    return ((cpuid.r) & (1U << bit)) != 0;                                     \
110839 +  }
110841 +/* cpuid(1): Processor Info and Feature Bits. */
110842 +#define C(name, bit) X(name, f1c, bit)
110843 +  C(sse3, 0)
110844 +  C(pclmuldq, 1)
110845 +  C(dtes64, 2)
110846 +  C(monitor, 3)
110847 +  C(dscpl, 4)
110848 +  C(vmx, 5)
110849 +  C(smx, 6)
110850 +  C(eist, 7)
110851 +  C(tm2, 8)
110852 +  C(ssse3, 9)
110853 +  C(cnxtid, 10)
110854 +  C(fma, 12)
110855 +  C(cx16, 13)
110856 +  C(xtpr, 14)
110857 +  C(pdcm, 15)
110858 +  C(pcid, 17)
110859 +  C(dca, 18)
110860 +  C(sse41, 19)
110861 +  C(sse42, 20)
110862 +  C(x2apic, 21)
110863 +  C(movbe, 22)
110864 +  C(popcnt, 23)
110865 +  C(tscdeadline, 24)
110866 +  C(aes, 25)
110867 +  C(xsave, 26)
110868 +  C(osxsave, 27)
110869 +  C(avx, 28)
110870 +  C(f16c, 29)
110871 +  C(rdrand, 30)
110872 +#undef C
110873 +#define D(name, bit) X(name, f1d, bit)
110874 +  D(fpu, 0)
110875 +  D(vme, 1)
110876 +  D(de, 2)
110877 +  D(pse, 3)
110878 +  D(tsc, 4)
110879 +  D(msr, 5)
110880 +  D(pae, 6)
110881 +  D(mce, 7)
110882 +  D(cx8, 8)
110883 +  D(apic, 9)
110884 +  D(sep, 11)
110885 +  D(mtrr, 12)
110886 +  D(pge, 13)
110887 +  D(mca, 14)
110888 +  D(cmov, 15)
110889 +  D(pat, 16)
110890 +  D(pse36, 17)
110891 +  D(psn, 18)
110892 +  D(clfsh, 19)
110893 +  D(ds, 21)
110894 +  D(acpi, 22)
110895 +  D(mmx, 23)
110896 +  D(fxsr, 24)
110897 +  D(sse, 25)
110898 +  D(sse2, 26)
110899 +  D(ss, 27)
110900 +  D(htt, 28)
110901 +  D(tm, 29)
110902 +  D(pbe, 31)
110903 +#undef D
110905 +/* cpuid(7): Extended Features. */
110906 +#define B(name, bit) X(name, f7b, bit)
110907 +  B(bmi1, 3)
110908 +  B(hle, 4)
110909 +  B(avx2, 5)
110910 +  B(smep, 7)
110911 +  B(bmi2, 8)
110912 +  B(erms, 9)
110913 +  B(invpcid, 10)
110914 +  B(rtm, 11)
110915 +  B(mpx, 14)
110916 +  B(avx512f, 16)
110917 +  B(avx512dq, 17)
110918 +  B(rdseed, 18)
110919 +  B(adx, 19)
110920 +  B(smap, 20)
110921 +  B(avx512ifma, 21)
110922 +  B(pcommit, 22)
110923 +  B(clflushopt, 23)
110924 +  B(clwb, 24)
110925 +  B(avx512pf, 26)
110926 +  B(avx512er, 27)
110927 +  B(avx512cd, 28)
110928 +  B(sha, 29)
110929 +  B(avx512bw, 30)
110930 +  B(avx512vl, 31)
110931 +#undef B
110932 +#define C(name, bit) X(name, f7c, bit)
110933 +  C(prefetchwt1, 0)
110934 +  C(avx512vbmi, 1)
110935 +#undef C
110937 +#undef X
110939 +#endif /* ZSTD_COMMON_CPU_H */
110940 diff --git a/lib/zstd/common/debug.c b/lib/zstd/common/debug.c
110941 new file mode 100644
110942 index 000000000000..bb863c9ea616
110943 --- /dev/null
110944 +++ b/lib/zstd/common/debug.c
110945 @@ -0,0 +1,24 @@
110946 +/* ******************************************************************
110947 + * debug
110948 + * Part of FSE library
110949 + * Copyright (c) Yann Collet, Facebook, Inc.
110951 + * You can contact the author at :
110952 + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
110954 + * This source code is licensed under both the BSD-style license (found in the
110955 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
110956 + * in the COPYING file in the root directory of this source tree).
110957 + * You may select, at your option, one of the above-listed licenses.
110958 +****************************************************************** */
110962 + * This module only hosts one global variable
110963 + * which can be used to dynamically influence the verbosity of traces,
110964 + * such as DEBUGLOG and RAWLOG
110965 + */
110967 +#include "debug.h"
110969 +int g_debuglevel = DEBUGLEVEL;
110970 diff --git a/lib/zstd/common/debug.h b/lib/zstd/common/debug.h
110971 new file mode 100644
110972 index 000000000000..6dd88d1fbd02
110973 --- /dev/null
110974 +++ b/lib/zstd/common/debug.h
110975 @@ -0,0 +1,101 @@
110976 +/* ******************************************************************
110977 + * debug
110978 + * Part of FSE library
110979 + * Copyright (c) Yann Collet, Facebook, Inc.
110981 + * You can contact the author at :
110982 + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
110984 + * This source code is licensed under both the BSD-style license (found in the
110985 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
110986 + * in the COPYING file in the root directory of this source tree).
110987 + * You may select, at your option, one of the above-listed licenses.
110988 +****************************************************************** */
110992 + * The purpose of this header is to enable debug functions.
110993 + * They regroup assert(), DEBUGLOG() and RAWLOG() for run-time,
110994 + * and DEBUG_STATIC_ASSERT() for compile-time.
110996 + * By default, DEBUGLEVEL==0, which means run-time debug is disabled.
110998 + * Level 1 enables assert() only.
110999 + * Starting level 2, traces can be generated and pushed to stderr.
111000 + * The higher the level, the more verbose the traces.
111002 + * It's possible to dynamically adjust level using variable g_debug_level,
111003 + * which is only declared if DEBUGLEVEL>=2,
111004 + * and is a global variable, not multi-thread protected (use with care)
111005 + */
111007 +#ifndef DEBUG_H_12987983217
111008 +#define DEBUG_H_12987983217
111012 +/* static assert is triggered at compile time, leaving no runtime artefact.
111013 + * static assert only works with compile-time constants.
111014 + * Also, this variant can only be used inside a function. */
111015 +#define DEBUG_STATIC_ASSERT(c) (void)sizeof(char[(c) ? 1 : -1])
111018 +/* DEBUGLEVEL is expected to be defined externally,
111019 + * typically through compiler command line.
111020 + * Value must be a number. */
111021 +#ifndef DEBUGLEVEL
111022 +#  define DEBUGLEVEL 0
111023 +#endif
111026 +/* recommended values for DEBUGLEVEL :
111027 + * 0 : release mode, no debug, all run-time checks disabled
111028 + * 1 : enables assert() only, no display
111029 + * 2 : reserved, for currently active debug path
111030 + * 3 : events once per object lifetime (CCtx, CDict, etc.)
111031 + * 4 : events once per frame
111032 + * 5 : events once per block
111033 + * 6 : events once per sequence (verbose)
111034 + * 7+: events at every position (*very* verbose)
111036 + * It's generally inconvenient to output traces > 5.
111037 + * In which case, it's possible to selectively trigger high verbosity levels
111038 + * by modifying g_debug_level.
111039 + */
111041 +#if (DEBUGLEVEL>=1)
111042 +#  define ZSTD_DEPS_NEED_ASSERT
111043 +#  include "zstd_deps.h"
111044 +#else
111045 +#  ifndef assert   /* assert may be already defined, due to prior #include <assert.h> */
111046 +#    define assert(condition) ((void)0)   /* disable assert (default) */
111047 +#  endif
111048 +#endif
111050 +#if (DEBUGLEVEL>=2)
111051 +#  define ZSTD_DEPS_NEED_IO
111052 +#  include "zstd_deps.h"
111053 +extern int g_debuglevel; /* the variable is only declared,
111054 +                            it actually lives in debug.c,
111055 +                            and is shared by the whole process.
111056 +                            It's not thread-safe.
111057 +                            It's useful when enabling very verbose levels
111058 +                            on selective conditions (such as position in src) */
111060 +#  define RAWLOG(l, ...) {                                       \
111061 +                if (l<=g_debuglevel) {                           \
111062 +                    ZSTD_DEBUG_PRINT(__VA_ARGS__);               \
111063 +            }   }
111064 +#  define DEBUGLOG(l, ...) {                                     \
111065 +                if (l<=g_debuglevel) {                           \
111066 +                    ZSTD_DEBUG_PRINT(__FILE__ ": " __VA_ARGS__); \
111067 +                    ZSTD_DEBUG_PRINT(" \n");                     \
111068 +            }   }
111069 +#else
111070 +#  define RAWLOG(l, ...)      {}    /* disabled */
111071 +#  define DEBUGLOG(l, ...)    {}    /* disabled */
111072 +#endif
111076 +#endif /* DEBUG_H_12987983217 */
111077 diff --git a/lib/zstd/common/entropy_common.c b/lib/zstd/common/entropy_common.c
111078 new file mode 100644
111079 index 000000000000..53b47a2b52ff
111080 --- /dev/null
111081 +++ b/lib/zstd/common/entropy_common.c
111082 @@ -0,0 +1,357 @@
111083 +/* ******************************************************************
111084 + * Common functions of New Generation Entropy library
111085 + * Copyright (c) Yann Collet, Facebook, Inc.
111087 + *  You can contact the author at :
111088 + *  - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
111089 + *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
111091 + * This source code is licensed under both the BSD-style license (found in the
111092 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
111093 + * in the COPYING file in the root directory of this source tree).
111094 + * You may select, at your option, one of the above-listed licenses.
111095 +****************************************************************** */
111097 +/* *************************************
111098 +*  Dependencies
111099 +***************************************/
111100 +#include "mem.h"
111101 +#include "error_private.h"       /* ERR_*, ERROR */
111102 +#define FSE_STATIC_LINKING_ONLY  /* FSE_MIN_TABLELOG */
111103 +#include "fse.h"
111104 +#define HUF_STATIC_LINKING_ONLY  /* HUF_TABLELOG_ABSOLUTEMAX */
111105 +#include "huf.h"
111108 +/*===   Version   ===*/
111109 +unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
111112 +/*===   Error Management   ===*/
111113 +unsigned FSE_isError(size_t code) { return ERR_isError(code); }
111114 +const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
111116 +unsigned HUF_isError(size_t code) { return ERR_isError(code); }
111117 +const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
111120 +/*-**************************************************************
111121 +*  FSE NCount encoding-decoding
111122 +****************************************************************/
111123 +static U32 FSE_ctz(U32 val)
111125 +    assert(val != 0);
111126 +    {
111127 +#   if (__GNUC__ >= 3)   /* GCC Intrinsic */
111128 +        return __builtin_ctz(val);
111129 +#   else   /* Software version */
111130 +        U32 count = 0;
111131 +        while ((val & 1) == 0) {
111132 +            val >>= 1;
111133 +            ++count;
111134 +        }
111135 +        return count;
111136 +#   endif
111137 +    }
111140 +FORCE_INLINE_TEMPLATE
111141 +size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
111142 +                           const void* headerBuffer, size_t hbSize)
111144 +    const BYTE* const istart = (const BYTE*) headerBuffer;
111145 +    const BYTE* const iend = istart + hbSize;
111146 +    const BYTE* ip = istart;
111147 +    int nbBits;
111148 +    int remaining;
111149 +    int threshold;
111150 +    U32 bitStream;
111151 +    int bitCount;
111152 +    unsigned charnum = 0;
111153 +    unsigned const maxSV1 = *maxSVPtr + 1;
111154 +    int previous0 = 0;
111156 +    if (hbSize < 8) {
111157 +        /* This function only works when hbSize >= 8 */
111158 +        char buffer[8] = {0};
111159 +        ZSTD_memcpy(buffer, headerBuffer, hbSize);
111160 +        {   size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr,
111161 +                                                    buffer, sizeof(buffer));
111162 +            if (FSE_isError(countSize)) return countSize;
111163 +            if (countSize > hbSize) return ERROR(corruption_detected);
111164 +            return countSize;
111165 +    }   }
111166 +    assert(hbSize >= 8);
111168 +    /* init */
111169 +    ZSTD_memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0]));   /* all symbols not present in NCount have a frequency of 0 */
111170 +    bitStream = MEM_readLE32(ip);
111171 +    nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG;   /* extract tableLog */
111172 +    if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
111173 +    bitStream >>= 4;
111174 +    bitCount = 4;
111175 +    *tableLogPtr = nbBits;
111176 +    remaining = (1<<nbBits)+1;
111177 +    threshold = 1<<nbBits;
111178 +    nbBits++;
111180 +    for (;;) {
111181 +        if (previous0) {
111182 +            /* Count the number of repeats. Each time the
111183 +             * 2-bit repeat code is 0b11 there is another
111184 +             * repeat.
111185 +             * Avoid UB by setting the high bit to 1.
111186 +             */
111187 +            int repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
111188 +            while (repeats >= 12) {
111189 +                charnum += 3 * 12;
111190 +                if (LIKELY(ip <= iend-7)) {
111191 +                    ip += 3;
111192 +                } else {
111193 +                    bitCount -= (int)(8 * (iend - 7 - ip));
111194 +                    bitCount &= 31;
111195 +                    ip = iend - 4;
111196 +                }
111197 +                bitStream = MEM_readLE32(ip) >> bitCount;
111198 +                repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
111199 +            }
111200 +            charnum += 3 * repeats;
111201 +            bitStream >>= 2 * repeats;
111202 +            bitCount += 2 * repeats;
111204 +            /* Add the final repeat which isn't 0b11. */
111205 +            assert((bitStream & 3) < 3);
111206 +            charnum += bitStream & 3;
111207 +            bitCount += 2;
111209 +            /* This is an error, but break and return an error
111210 +             * at the end, because returning out of a loop makes
111211 +             * it harder for the compiler to optimize.
111212 +             */
111213 +            if (charnum >= maxSV1) break;
111215 +            /* We don't need to set the normalized count to 0
111216 +             * because we already memset the whole buffer to 0.
111217 +             */
111219 +            if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
111220 +                assert((bitCount >> 3) <= 3); /* For first condition to work */
111221 +                ip += bitCount>>3;
111222 +                bitCount &= 7;
111223 +            } else {
111224 +                bitCount -= (int)(8 * (iend - 4 - ip));
111225 +                bitCount &= 31;
111226 +                ip = iend - 4;
111227 +            }
111228 +            bitStream = MEM_readLE32(ip) >> bitCount;
111229 +        }
111230 +        {
111231 +            int const max = (2*threshold-1) - remaining;
111232 +            int count;
111234 +            if ((bitStream & (threshold-1)) < (U32)max) {
111235 +                count = bitStream & (threshold-1);
111236 +                bitCount += nbBits-1;
111237 +            } else {
111238 +                count = bitStream & (2*threshold-1);
111239 +                if (count >= threshold) count -= max;
111240 +                bitCount += nbBits;
111241 +            }
111243 +            count--;   /* extra accuracy */
111244 +            /* When it matters (small blocks), this is a
111245 +             * predictable branch, because we don't use -1.
111246 +             */
111247 +            if (count >= 0) {
111248 +                remaining -= count;
111249 +            } else {
111250 +                assert(count == -1);
111251 +                remaining += count;
111252 +            }
111253 +            normalizedCounter[charnum++] = (short)count;
111254 +            previous0 = !count;
111256 +            assert(threshold > 1);
111257 +            if (remaining < threshold) {
111258 +                /* This branch can be folded into the
111259 +                 * threshold update condition because we
111260 +                 * know that threshold > 1.
111261 +                 */
111262 +                if (remaining <= 1) break;
111263 +                nbBits = BIT_highbit32(remaining) + 1;
111264 +                threshold = 1 << (nbBits - 1);
111265 +            }
111266 +            if (charnum >= maxSV1) break;
111268 +            if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
111269 +                ip += bitCount>>3;
111270 +                bitCount &= 7;
111271 +            } else {
111272 +                bitCount -= (int)(8 * (iend - 4 - ip));
111273 +                bitCount &= 31;
111274 +                ip = iend - 4;
111275 +            }
111276 +            bitStream = MEM_readLE32(ip) >> bitCount;
111277 +    }   }
111278 +    if (remaining != 1) return ERROR(corruption_detected);
111279 +    /* Only possible when there are too many zeros. */
111280 +    if (charnum > maxSV1) return ERROR(maxSymbolValue_tooSmall);
111281 +    if (bitCount > 32) return ERROR(corruption_detected);
111282 +    *maxSVPtr = charnum-1;
111284 +    ip += (bitCount+7)>>3;
111285 +    return ip-istart;
111288 +/* Avoids the FORCE_INLINE of the _body() function. */
111289 +static size_t FSE_readNCount_body_default(
111290 +        short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
111291 +        const void* headerBuffer, size_t hbSize)
111293 +    return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
111296 +#if DYNAMIC_BMI2
111297 +TARGET_ATTRIBUTE("bmi2") static size_t FSE_readNCount_body_bmi2(
111298 +        short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
111299 +        const void* headerBuffer, size_t hbSize)
111301 +    return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
111303 +#endif
111305 +size_t FSE_readNCount_bmi2(
111306 +        short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
111307 +        const void* headerBuffer, size_t hbSize, int bmi2)
111309 +#if DYNAMIC_BMI2
111310 +    if (bmi2) {
111311 +        return FSE_readNCount_body_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
111312 +    }
111313 +#endif
111314 +    (void)bmi2;
111315 +    return FSE_readNCount_body_default(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
111318 +size_t FSE_readNCount(
111319 +        short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
111320 +        const void* headerBuffer, size_t hbSize)
111322 +    return FSE_readNCount_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize, /* bmi2 */ 0);
111326 +/*! HUF_readStats() :
111327 +    Read compact Huffman tree, saved by HUF_writeCTable().
111328 +    `huffWeight` is destination buffer.
111329 +    `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
111330 +    @return : size read from `src` , or an error Code .
111331 +    Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
111333 +size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
111334 +                     U32* nbSymbolsPtr, U32* tableLogPtr,
111335 +                     const void* src, size_t srcSize)
111337 +    U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
111338 +    return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* bmi2 */ 0);
111341 +FORCE_INLINE_TEMPLATE size_t
111342 +HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats,
111343 +                   U32* nbSymbolsPtr, U32* tableLogPtr,
111344 +                   const void* src, size_t srcSize,
111345 +                   void* workSpace, size_t wkspSize,
111346 +                   int bmi2)
111348 +    U32 weightTotal;
111349 +    const BYTE* ip = (const BYTE*) src;
111350 +    size_t iSize;
111351 +    size_t oSize;
111353 +    if (!srcSize) return ERROR(srcSize_wrong);
111354 +    iSize = ip[0];
111355 +    /* ZSTD_memset(huffWeight, 0, hwSize);   *//* is not necessary, even though some analyzer complain ... */
111357 +    if (iSize >= 128) {  /* special header */
111358 +        oSize = iSize - 127;
111359 +        iSize = ((oSize+1)/2);
111360 +        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
111361 +        if (oSize >= hwSize) return ERROR(corruption_detected);
111362 +        ip += 1;
111363 +        {   U32 n;
111364 +            for (n=0; n<oSize; n+=2) {
111365 +                huffWeight[n]   = ip[n/2] >> 4;
111366 +                huffWeight[n+1] = ip[n/2] & 15;
111367 +    }   }   }
111368 +    else  {   /* header compressed with FSE (normal case) */
111369 +        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
111370 +        /* max (hwSize-1) values decoded, as last one is implied */
111371 +        oSize = FSE_decompress_wksp_bmi2(huffWeight, hwSize-1, ip+1, iSize, 6, workSpace, wkspSize, bmi2);
111372 +        if (FSE_isError(oSize)) return oSize;
111373 +    }
111375 +    /* collect weight stats */
111376 +    ZSTD_memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
111377 +    weightTotal = 0;
111378 +    {   U32 n; for (n=0; n<oSize; n++) {
111379 +            if (huffWeight[n] >= HUF_TABLELOG_MAX) return ERROR(corruption_detected);
111380 +            rankStats[huffWeight[n]]++;
111381 +            weightTotal += (1 << huffWeight[n]) >> 1;
111382 +    }   }
111383 +    if (weightTotal == 0) return ERROR(corruption_detected);
111385 +    /* get last non-null symbol weight (implied, total must be 2^n) */
111386 +    {   U32 const tableLog = BIT_highbit32(weightTotal) + 1;
111387 +        if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
111388 +        *tableLogPtr = tableLog;
111389 +        /* determine last weight */
111390 +        {   U32 const total = 1 << tableLog;
111391 +            U32 const rest = total - weightTotal;
111392 +            U32 const verif = 1 << BIT_highbit32(rest);
111393 +            U32 const lastWeight = BIT_highbit32(rest) + 1;
111394 +            if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */
111395 +            huffWeight[oSize] = (BYTE)lastWeight;
111396 +            rankStats[lastWeight]++;
111397 +    }   }
111399 +    /* check tree construction validity */
111400 +    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */
111402 +    /* results */
111403 +    *nbSymbolsPtr = (U32)(oSize+1);
111404 +    return iSize+1;
111407 +/* Avoids the FORCE_INLINE of the _body() function. */
111408 +static size_t HUF_readStats_body_default(BYTE* huffWeight, size_t hwSize, U32* rankStats,
111409 +                     U32* nbSymbolsPtr, U32* tableLogPtr,
111410 +                     const void* src, size_t srcSize,
111411 +                     void* workSpace, size_t wkspSize)
111413 +    return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 0);
111416 +#if DYNAMIC_BMI2
111417 +static TARGET_ATTRIBUTE("bmi2") size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats,
111418 +                     U32* nbSymbolsPtr, U32* tableLogPtr,
111419 +                     const void* src, size_t srcSize,
111420 +                     void* workSpace, size_t wkspSize)
111422 +    return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 1);
111424 +#endif
111426 +size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats,
111427 +                     U32* nbSymbolsPtr, U32* tableLogPtr,
111428 +                     const void* src, size_t srcSize,
111429 +                     void* workSpace, size_t wkspSize,
111430 +                     int bmi2)
111432 +#if DYNAMIC_BMI2
111433 +    if (bmi2) {
111434 +        return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
111435 +    }
111436 +#endif
111437 +    (void)bmi2;
111438 +    return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
111440 diff --git a/lib/zstd/common/error_private.c b/lib/zstd/common/error_private.c
111441 new file mode 100644
111442 index 000000000000..6d1135f8c373
111443 --- /dev/null
111444 +++ b/lib/zstd/common/error_private.c
111445 @@ -0,0 +1,56 @@
111447 + * Copyright (c) Yann Collet, Facebook, Inc.
111448 + * All rights reserved.
111450 + * This source code is licensed under both the BSD-style license (found in the
111451 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
111452 + * in the COPYING file in the root directory of this source tree).
111453 + * You may select, at your option, one of the above-listed licenses.
111454 + */
111456 +/* The purpose of this file is to have a single list of error strings embedded in binary */
111458 +#include "error_private.h"
111460 +const char* ERR_getErrorString(ERR_enum code)
111462 +#ifdef ZSTD_STRIP_ERROR_STRINGS
111463 +    (void)code;
111464 +    return "Error strings stripped";
111465 +#else
111466 +    static const char* const notErrorCode = "Unspecified error code";
111467 +    switch( code )
111468 +    {
111469 +    case PREFIX(no_error): return "No error detected";
111470 +    case PREFIX(GENERIC):  return "Error (generic)";
111471 +    case PREFIX(prefix_unknown): return "Unknown frame descriptor";
111472 +    case PREFIX(version_unsupported): return "Version not supported";
111473 +    case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter";
111474 +    case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding";
111475 +    case PREFIX(corruption_detected): return "Corrupted block detected";
111476 +    case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
111477 +    case PREFIX(parameter_unsupported): return "Unsupported parameter";
111478 +    case PREFIX(parameter_outOfBound): return "Parameter is out of bound";
111479 +    case PREFIX(init_missing): return "Context should be init first";
111480 +    case PREFIX(memory_allocation): return "Allocation error : not enough memory";
111481 +    case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough";
111482 +    case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
111483 +    case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
111484 +    case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
111485 +    case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
111486 +    case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
111487 +    case PREFIX(dictionary_wrong): return "Dictionary mismatch";
111488 +    case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
111489 +    case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
111490 +    case PREFIX(srcSize_wrong): return "Src size is incorrect";
111491 +    case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer";
111492 +        /* following error codes are not stable and may be removed or changed in a future version */
111493 +    case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
111494 +    case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
111495 +    case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong";
111496 +    case PREFIX(srcBuffer_wrong): return "Source buffer is wrong";
111497 +    case PREFIX(maxCode):
111498 +    default: return notErrorCode;
111499 +    }
111500 +#endif
111502 diff --git a/lib/zstd/common/error_private.h b/lib/zstd/common/error_private.h
111503 new file mode 100644
111504 index 000000000000..d14e686adf95
111505 --- /dev/null
111506 +++ b/lib/zstd/common/error_private.h
111507 @@ -0,0 +1,66 @@
111509 + * Copyright (c) Yann Collet, Facebook, Inc.
111510 + * All rights reserved.
111512 + * This source code is licensed under both the BSD-style license (found in the
111513 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
111514 + * in the COPYING file in the root directory of this source tree).
111515 + * You may select, at your option, one of the above-listed licenses.
111516 + */
111518 +/* Note : this module is expected to remain private, do not expose it */
111520 +#ifndef ERROR_H_MODULE
111521 +#define ERROR_H_MODULE
111525 +/* ****************************************
111526 +*  Dependencies
111527 +******************************************/
111528 +#include "zstd_deps.h"    /* size_t */
111529 +#include <linux/zstd_errors.h>  /* enum list */
111532 +/* ****************************************
111533 +*  Compiler-specific
111534 +******************************************/
111535 +#define ERR_STATIC static __attribute__((unused))
111538 +/*-****************************************
111539 +*  Customization (error_public.h)
111540 +******************************************/
111541 +typedef ZSTD_ErrorCode ERR_enum;
111542 +#define PREFIX(name) ZSTD_error_##name
111545 +/*-****************************************
111546 +*  Error codes handling
111547 +******************************************/
111548 +#undef ERROR   /* already defined on Visual Studio */
111549 +#define ERROR(name) ZSTD_ERROR(name)
111550 +#define ZSTD_ERROR(name) ((size_t)-PREFIX(name))
111552 +ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
111554 +ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); }
111556 +/* check and forward error code */
111557 +#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
111558 +#define CHECK_F(f)   { CHECK_V_F(_var_err__, f); }
111561 +/*-****************************************
111562 +*  Error Strings
111563 +******************************************/
111565 +const char* ERR_getErrorString(ERR_enum code);   /* error_private.c */
111567 +ERR_STATIC const char* ERR_getErrorName(size_t code)
111569 +    return ERR_getErrorString(ERR_getErrorCode(code));
111573 +#endif /* ERROR_H_MODULE */
111574 diff --git a/lib/zstd/common/fse.h b/lib/zstd/common/fse.h
111575 new file mode 100644
111576 index 000000000000..477e642ffb41
111577 --- /dev/null
111578 +++ b/lib/zstd/common/fse.h
111579 @@ -0,0 +1,708 @@
111580 +/* ******************************************************************
111581 + * FSE : Finite State Entropy codec
111582 + * Public Prototypes declaration
111583 + * Copyright (c) Yann Collet, Facebook, Inc.
111585 + * You can contact the author at :
111586 + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
111588 + * This source code is licensed under both the BSD-style license (found in the
111589 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
111590 + * in the COPYING file in the root directory of this source tree).
111591 + * You may select, at your option, one of the above-listed licenses.
111592 +****************************************************************** */
111595 +#ifndef FSE_H
111596 +#define FSE_H
111599 +/*-*****************************************
111600 +*  Dependencies
111601 +******************************************/
111602 +#include "zstd_deps.h"    /* size_t, ptrdiff_t */
111605 +/*-*****************************************
111606 +*  FSE_PUBLIC_API : control library symbols visibility
111607 +******************************************/
111608 +#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
111609 +#  define FSE_PUBLIC_API __attribute__ ((visibility ("default")))
111610 +#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1)   /* Visual expected */
111611 +#  define FSE_PUBLIC_API __declspec(dllexport)
111612 +#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
111613 +#  define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
111614 +#else
111615 +#  define FSE_PUBLIC_API
111616 +#endif
111618 +/*------   Version   ------*/
111619 +#define FSE_VERSION_MAJOR    0
111620 +#define FSE_VERSION_MINOR    9
111621 +#define FSE_VERSION_RELEASE  0
111623 +#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
111624 +#define FSE_QUOTE(str) #str
111625 +#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
111626 +#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
111628 +#define FSE_VERSION_NUMBER  (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE)
111629 +FSE_PUBLIC_API unsigned FSE_versionNumber(void);   /**< library version number; to be used when checking dll version */
111632 +/*-****************************************
111633 +*  FSE simple functions
111634 +******************************************/
111635 +/*! FSE_compress() :
111636 +    Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'.
111637 +    'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize).
111638 +    @return : size of compressed data (<= dstCapacity).
111639 +    Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
111640 +                     if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead.
111641 +                     if FSE_isError(return), compression failed (more details using FSE_getErrorName())
111643 +FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity,
111644 +                             const void* src, size_t srcSize);
111646 +/*! FSE_decompress():
111647 +    Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
111648 +    into already allocated destination buffer 'dst', of size 'dstCapacity'.
111649 +    @return : size of regenerated data (<= maxDstSize),
111650 +              or an error code, which can be tested using FSE_isError() .
111652 +    ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!!
111653 +    Why ? : making this distinction requires a header.
111654 +    Header management is intentionally delegated to the user layer, which can better manage special cases.
111656 +FSE_PUBLIC_API size_t FSE_decompress(void* dst,  size_t dstCapacity,
111657 +                               const void* cSrc, size_t cSrcSize);
111660 +/*-*****************************************
111661 +*  Tool functions
111662 +******************************************/
111663 +FSE_PUBLIC_API size_t FSE_compressBound(size_t size);       /* maximum compressed size */
111665 +/* Error Management */
111666 +FSE_PUBLIC_API unsigned    FSE_isError(size_t code);        /* tells if a return value is an error code */
111667 +FSE_PUBLIC_API const char* FSE_getErrorName(size_t code);   /* provides error code string (useful for debugging) */
111670 +/*-*****************************************
111671 +*  FSE advanced functions
111672 +******************************************/
111673 +/*! FSE_compress2() :
111674 +    Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog'
111675 +    Both parameters can be defined as '0' to mean : use default value
111676 +    @return : size of compressed data
111677 +    Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!!
111678 +                     if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression.
111679 +                     if FSE_isError(return), it's an error code.
111681 +FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
111684 +/*-*****************************************
111685 +*  FSE detailed API
111686 +******************************************/
111688 +FSE_compress() does the following:
111689 +1. count symbol occurrence from source[] into table count[] (see hist.h)
111690 +2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
111691 +3. save normalized counters to memory buffer using writeNCount()
111692 +4. build encoding table 'CTable' from normalized counters
111693 +5. encode the data stream using encoding table 'CTable'
111695 +FSE_decompress() does the following:
111696 +1. read normalized counters with readNCount()
111697 +2. build decoding table 'DTable' from normalized counters
111698 +3. decode the data stream using decoding table 'DTable'
111700 +The following API allows targeting specific sub-functions for advanced tasks.
111701 +For example, it's possible to compress several blocks using the same 'CTable',
111702 +or to save and provide normalized distribution using external method.
111705 +/* *** COMPRESSION *** */
111707 +/*! FSE_optimalTableLog():
111708 +    dynamically downsize 'tableLog' when conditions are met.
111709 +    It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
111710 +    @return : recommended tableLog (necessarily <= 'maxTableLog') */
111711 +FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
111713 +/*! FSE_normalizeCount():
111714 +    normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
111715 +    'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
111716 +    useLowProbCount is a boolean parameter which trades off compressed size for
111717 +    faster header decoding. When it is set to 1, the compressed data will be slightly
111718 +    smaller. And when it is set to 0, FSE_readNCount() and FSE_buildDTable() will be
111719 +    faster. If you are compressing a small amount of data (< 2 KB) then useLowProbCount=0
111720 +    is a good default, since header deserialization makes a big speed difference.
111721 +    Otherwise, useLowProbCount=1 is a good default, since the speed difference is small.
111722 +    @return : tableLog,
111723 +              or an errorCode, which can be tested using FSE_isError() */
111724 +FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog,
111725 +                    const unsigned* count, size_t srcSize, unsigned maxSymbolValue, unsigned useLowProbCount);
111727 +/*! FSE_NCountWriteBound():
111728 +    Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
111729 +    Typically useful for allocation purpose. */
111730 +FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
111732 +/*! FSE_writeNCount():
111733 +    Compactly save 'normalizedCounter' into 'buffer'.
111734 +    @return : size of the compressed table,
111735 +              or an errorCode, which can be tested using FSE_isError(). */
111736 +FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize,
111737 +                                 const short* normalizedCounter,
111738 +                                 unsigned maxSymbolValue, unsigned tableLog);
111740 +/*! Constructor and Destructor of FSE_CTable.
111741 +    Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
111742 +typedef unsigned FSE_CTable;   /* don't allocate that. It's only meant to be more restrictive than void* */
111743 +FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog);
111744 +FSE_PUBLIC_API void        FSE_freeCTable (FSE_CTable* ct);
111746 +/*! FSE_buildCTable():
111747 +    Builds `ct`, which must be already allocated, using FSE_createCTable().
111748 +    @return : 0, or an errorCode, which can be tested using FSE_isError() */
111749 +FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
111751 +/*! FSE_compress_usingCTable():
111752 +    Compress `src` using `ct` into `dst` which must be already allocated.
111753 +    @return : size of compressed data (<= `dstCapacity`),
111754 +              or 0 if compressed data could not fit into `dst`,
111755 +              or an errorCode, which can be tested using FSE_isError() */
111756 +FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct);
111759 +Tutorial :
111760 +----------
111761 +The first step is to count all symbols. FSE_count() does this job very fast.
111762 +Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.
111763 +'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]
111764 +maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)
111765 +FSE_count() will return the number of occurrence of the most frequent symbol.
111766 +This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.
111767 +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
111769 +The next step is to normalize the frequencies.
111770 +FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.
111771 +It also guarantees a minimum of 1 to any Symbol with frequency >= 1.
111772 +You can use 'tableLog'==0 to mean "use default tableLog value".
111773 +If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),
111774 +which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
111776 +The result of FSE_normalizeCount() will be saved into a table,
111777 +called 'normalizedCounter', which is a table of signed short.
111778 +'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.
111779 +The return value is tableLog if everything proceeded as expected.
111780 +It is 0 if there is a single symbol within distribution.
111781 +If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
111783 +'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().
111784 +'buffer' must be already allocated.
111785 +For guaranteed success, buffer size must be at least FSE_headerBound().
111786 +The result of the function is the number of bytes written into 'buffer'.
111787 +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
111789 +'normalizedCounter' can then be used to create the compression table 'CTable'.
111790 +The space required by 'CTable' must be already allocated, using FSE_createCTable().
111791 +You can then use FSE_buildCTable() to fill 'CTable'.
111792 +If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
111794 +'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().
111795 +Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'
111796 +The function returns the size of compressed data (without header), necessarily <= `dstCapacity`.
111797 +If it returns '0', compressed data could not fit into 'dst'.
111798 +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
111802 +/* *** DECOMPRESSION *** */
111804 +/*! FSE_readNCount():
111805 +    Read compactly saved 'normalizedCounter' from 'rBuffer'.
111806 +    @return : size read from 'rBuffer',
111807 +              or an errorCode, which can be tested using FSE_isError().
111808 +              maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
111809 +FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter,
111810 +                           unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
111811 +                           const void* rBuffer, size_t rBuffSize);
111813 +/*! FSE_readNCount_bmi2():
111814 + * Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise.
111815 + */
111816 +FSE_PUBLIC_API size_t FSE_readNCount_bmi2(short* normalizedCounter,
111817 +                           unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
111818 +                           const void* rBuffer, size_t rBuffSize, int bmi2);
111820 +/*! Constructor and Destructor of FSE_DTable.
111821 +    Note that its size depends on 'tableLog' */
111822 +typedef unsigned FSE_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
111823 +FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog);
111824 +FSE_PUBLIC_API void        FSE_freeDTable(FSE_DTable* dt);
111826 +/*! FSE_buildDTable():
111827 +    Builds 'dt', which must be already allocated, using FSE_createDTable().
111828 +    return : 0, or an errorCode, which can be tested using FSE_isError() */
111829 +FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
111831 +/*! FSE_decompress_usingDTable():
111832 +    Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
111833 +    into `dst` which must be already allocated.
111834 +    @return : size of regenerated data (necessarily <= `dstCapacity`),
111835 +              or an errorCode, which can be tested using FSE_isError() */
111836 +FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
111839 +Tutorial :
111840 +----------
111841 +(Note : these functions only decompress FSE-compressed blocks.
111842 + If block is uncompressed, use memcpy() instead
111843 + If block is a single repeated byte, use memset() instead )
111845 +The first step is to obtain the normalized frequencies of symbols.
111846 +This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
111847 +'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
111848 +In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
111849 +or size the table to handle worst case situations (typically 256).
111850 +FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
111851 +The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
111852 +Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
111853 +If there is an error, the function will return an error code, which can be tested using FSE_isError().
111855 +The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
111856 +This is performed by the function FSE_buildDTable().
111857 +The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
111858 +If there is an error, the function will return an error code, which can be tested using FSE_isError().
111860 +`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().
111861 +`cSrcSize` must be strictly correct, otherwise decompression will fail.
111862 +FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
111863 +If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
111866 +#endif  /* FSE_H */
111868 +#if !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
111869 +#define FSE_H_FSE_STATIC_LINKING_ONLY
111871 +/* *** Dependency *** */
111872 +#include "bitstream.h"
111875 +/* *****************************************
111876 +*  Static allocation
111877 +*******************************************/
111878 +/* FSE buffer bounds */
111879 +#define FSE_NCOUNTBOUND 512
111880 +#define FSE_BLOCKBOUND(size) ((size) + ((size)>>7) + 4 /* fse states */ + sizeof(size_t) /* bitContainer */)
111881 +#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
111883 +/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
111884 +#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue)   (1 + (1<<((maxTableLog)-1)) + (((maxSymbolValue)+1)*2))
111885 +#define FSE_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<(maxTableLog)))
111887 +/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */
111888 +#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue)   (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable))
111889 +#define FSE_DTABLE_SIZE(maxTableLog)                   (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable))
111892 +/* *****************************************
111893 + *  FSE advanced API
111894 + ***************************************** */
111896 +unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
111897 +/**< same as FSE_optimalTableLog(), which used `minus==2` */
111899 +/* FSE_compress_wksp() :
111900 + * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
111901 + * FSE_COMPRESS_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable.
111902 + */
111903 +#define FSE_COMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue)   ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) )
111904 +size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
111906 +size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits);
111907 +/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
111909 +size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
111910 +/**< build a fake FSE_CTable, designed to compress always the same symbolValue */
111912 +/* FSE_buildCTable_wksp() :
111913 + * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
111914 + * `wkspSize` must be >= `FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)` of `unsigned`.
111915 + */
111916 +#define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (maxSymbolValue + 2 + (1ull << (tableLog - 2)))
111917 +#define FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) (sizeof(unsigned) * FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog))
111918 +size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
111920 +#define FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) (sizeof(short) * (maxSymbolValue + 1) + (1ULL << maxTableLog) + 8)
111921 +#define FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ((FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) + sizeof(unsigned) - 1) / sizeof(unsigned))
111922 +FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
111923 +/**< Same as FSE_buildDTable(), using an externally allocated `workspace` produced with `FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxSymbolValue)` */
111925 +size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
111926 +/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
111928 +size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
111929 +/**< build a fake FSE_DTable, designed to always generate the same symbolValue */
111931 +#define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1)
111932 +#define FSE_DECOMPRESS_WKSP_SIZE(maxTableLog, maxSymbolValue) (FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(unsigned))
111933 +size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize);
111934 +/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)` */
111936 +size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2);
111937 +/**< Same as FSE_decompress_wksp() but with dynamic BMI2 support. Pass 1 if your CPU supports BMI2 or 0 if it doesn't. */
111939 +typedef enum {
111940 +   FSE_repeat_none,  /**< Cannot use the previous table */
111941 +   FSE_repeat_check, /**< Can use the previous table but it must be checked */
111942 +   FSE_repeat_valid  /**< Can use the previous table and it is assumed to be valid */
111943 + } FSE_repeat;
111945 +/* *****************************************
111946 +*  FSE symbol compression API
111947 +*******************************************/
111949 +   This API consists of small unitary functions, which highly benefit from being inlined.
111950 +   Hence their body are included in next section.
111952 +typedef struct {
111953 +    ptrdiff_t   value;
111954 +    const void* stateTable;
111955 +    const void* symbolTT;
111956 +    unsigned    stateLog;
111957 +} FSE_CState_t;
111959 +static void FSE_initCState(FSE_CState_t* CStatePtr, const FSE_CTable* ct);
111961 +static void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* CStatePtr, unsigned symbol);
111963 +static void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* CStatePtr);
111965 +/**<
111966 +These functions are inner components of FSE_compress_usingCTable().
111967 +They allow the creation of custom streams, mixing multiple tables and bit sources.
111969 +A key property to keep in mind is that encoding and decoding are done **in reverse direction**.
111970 +So the first symbol you will encode is the last you will decode, like a LIFO stack.
111972 +You will need a few variables to track your CStream. They are :
111974 +FSE_CTable    ct;         // Provided by FSE_buildCTable()
111975 +BIT_CStream_t bitStream;  // bitStream tracking structure
111976 +FSE_CState_t  state;      // State tracking structure (can have several)
111979 +The first thing to do is to init bitStream and state.
111980 +    size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);
111981 +    FSE_initCState(&state, ct);
111983 +Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();
111984 +You can then encode your input data, byte after byte.
111985 +FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.
111986 +Remember decoding will be done in reverse direction.
111987 +    FSE_encodeByte(&bitStream, &state, symbol);
111989 +At any time, you can also add any bit sequence.
111990 +Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders
111991 +    BIT_addBits(&bitStream, bitField, nbBits);
111993 +The above methods don't commit data to memory, they just store it into local register, for speed.
111994 +Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
111995 +Writing data to memory is a manual operation, performed by the flushBits function.
111996 +    BIT_flushBits(&bitStream);
111998 +Your last FSE encoding operation shall be to flush your last state value(s).
111999 +    FSE_flushState(&bitStream, &state);
112001 +Finally, you must close the bitStream.
112002 +The function returns the size of CStream in bytes.
112003 +If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)
112004 +If there is an error, it returns an errorCode (which can be tested using FSE_isError()).
112005 +    size_t size = BIT_closeCStream(&bitStream);
112009 +/* *****************************************
112010 +*  FSE symbol decompression API
112011 +*******************************************/
112012 +typedef struct {
112013 +    size_t      state;
112014 +    const void* table;   /* precise table may vary, depending on U16 */
112015 +} FSE_DState_t;
112018 +static void     FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
112020 +static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
112022 +static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
112024 +/**<
112025 +Let's now decompose FSE_decompress_usingDTable() into its unitary components.
112026 +You will decode FSE-encoded symbols from the bitStream,
112027 +and also any other bitFields you put in, **in reverse order**.
112029 +You will need a few variables to track your bitStream. They are :
112031 +BIT_DStream_t DStream;    // Stream context
112032 +FSE_DState_t  DState;     // State context. Multiple ones are possible
112033 +FSE_DTable*   DTablePtr;  // Decoding table, provided by FSE_buildDTable()
112035 +The first thing to do is to init the bitStream.
112036 +    errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
112038 +You should then retrieve your initial state(s)
112039 +(in reverse flushing order if you have several ones) :
112040 +    errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
112042 +You can then decode your data, symbol after symbol.
112043 +For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.
112044 +Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).
112045 +    unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
112047 +You can retrieve any bitfield you eventually stored into the bitStream (in reverse order)
112048 +Note : maximum allowed nbBits is 25, for 32-bits compatibility
112049 +    size_t bitField = BIT_readBits(&DStream, nbBits);
112051 +All above operations only read from local register (which size depends on size_t).
112052 +Refueling the register from memory is manually performed by the reload method.
112053 +    endSignal = FSE_reloadDStream(&DStream);
112055 +BIT_reloadDStream() result tells if there is still some more data to read from DStream.
112056 +BIT_DStream_unfinished : there is still some data left into the DStream.
112057 +BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.
112058 +BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.
112059 +BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
112061 +When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,
112062 +to properly detect the exact end of stream.
112063 +After each decoded symbol, check if DStream is fully consumed using this simple test :
112064 +    BIT_reloadDStream(&DStream) >= BIT_DStream_completed
112066 +When it's done, verify decompression is fully completed, by checking both DStream and the relevant states.
112067 +Checking if DStream has reached its end is performed by :
112068 +    BIT_endOfDStream(&DStream);
112069 +Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.
112070 +    FSE_endOfDState(&DState);
112074 +/* *****************************************
112075 +*  FSE unsafe API
112076 +*******************************************/
112077 +static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
112078 +/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
112081 +/* *****************************************
112082 +*  Implementation of inlined functions
112083 +*******************************************/
112084 +typedef struct {
112085 +    int deltaFindState;
112086 +    U32 deltaNbBits;
112087 +} FSE_symbolCompressionTransform; /* total 8 bytes */
112089 +MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct)
112091 +    const void* ptr = ct;
112092 +    const U16* u16ptr = (const U16*) ptr;
112093 +    const U32 tableLog = MEM_read16(ptr);
112094 +    statePtr->value = (ptrdiff_t)1<<tableLog;
112095 +    statePtr->stateTable = u16ptr+2;
112096 +    statePtr->symbolTT = ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1);
112097 +    statePtr->stateLog = tableLog;
112101 +/*! FSE_initCState2() :
112102 +*   Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
112103 +*   uses the smallest state value possible, saving the cost of this symbol */
112104 +MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol)
112106 +    FSE_initCState(statePtr, ct);
112107 +    {   const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
112108 +        const U16* stateTable = (const U16*)(statePtr->stateTable);
112109 +        U32 nbBitsOut  = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16);
112110 +        statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
112111 +        statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
112112 +    }
112115 +MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, unsigned symbol)
112117 +    FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
112118 +    const U16* const stateTable = (const U16*)(statePtr->stateTable);
112119 +    U32 const nbBitsOut  = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
112120 +    BIT_addBits(bitC, statePtr->value, nbBitsOut);
112121 +    statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
112124 +MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr)
112126 +    BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
112127 +    BIT_flushBits(bitC);
112131 +/* FSE_getMaxNbBits() :
112132 + * Approximate maximum cost of a symbol, in bits.
112133 + * Fractional get rounded up (i.e : a symbol with a normalized frequency of 3 gives the same result as a frequency of 2)
112134 + * note 1 : assume symbolValue is valid (<= maxSymbolValue)
112135 + * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
112136 +MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue)
112138 +    const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
112139 +    return (symbolTT[symbolValue].deltaNbBits + ((1<<16)-1)) >> 16;
112142 +/* FSE_bitCost() :
112143 + * Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits)
112144 + * note 1 : assume symbolValue is valid (<= maxSymbolValue)
112145 + * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
112146 +MEM_STATIC U32 FSE_bitCost(const void* symbolTTPtr, U32 tableLog, U32 symbolValue, U32 accuracyLog)
112148 +    const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
112149 +    U32 const minNbBits = symbolTT[symbolValue].deltaNbBits >> 16;
112150 +    U32 const threshold = (minNbBits+1) << 16;
112151 +    assert(tableLog < 16);
112152 +    assert(accuracyLog < 31-tableLog);  /* ensure enough room for renormalization double shift */
112153 +    {   U32 const tableSize = 1 << tableLog;
112154 +        U32 const deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize);
112155 +        U32 const normalizedDeltaFromThreshold = (deltaFromThreshold << accuracyLog) >> tableLog;   /* linear interpolation (very approximate) */
112156 +        U32 const bitMultiplier = 1 << accuracyLog;
112157 +        assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold);
112158 +        assert(normalizedDeltaFromThreshold <= bitMultiplier);
112159 +        return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold;
112160 +    }
112164 +/* ======    Decompression    ====== */
112166 +typedef struct {
112167 +    U16 tableLog;
112168 +    U16 fastMode;
112169 +} FSE_DTableHeader;   /* sizeof U32 */
112171 +typedef struct
112173 +    unsigned short newState;
112174 +    unsigned char  symbol;
112175 +    unsigned char  nbBits;
112176 +} FSE_decode_t;   /* size == U32 */
112178 +MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
112180 +    const void* ptr = dt;
112181 +    const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr;
112182 +    DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
112183 +    BIT_reloadDStream(bitD);
112184 +    DStatePtr->table = dt + 1;
112187 +MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr)
112189 +    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
112190 +    return DInfo.symbol;
112193 +MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
112195 +    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
112196 +    U32 const nbBits = DInfo.nbBits;
112197 +    size_t const lowBits = BIT_readBits(bitD, nbBits);
112198 +    DStatePtr->state = DInfo.newState + lowBits;
112201 +MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
112203 +    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
112204 +    U32 const nbBits = DInfo.nbBits;
112205 +    BYTE const symbol = DInfo.symbol;
112206 +    size_t const lowBits = BIT_readBits(bitD, nbBits);
112208 +    DStatePtr->state = DInfo.newState + lowBits;
112209 +    return symbol;
112212 +/*! FSE_decodeSymbolFast() :
112213 +    unsafe, only works if no symbol has a probability > 50% */
112214 +MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
112216 +    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
112217 +    U32 const nbBits = DInfo.nbBits;
112218 +    BYTE const symbol = DInfo.symbol;
112219 +    size_t const lowBits = BIT_readBitsFast(bitD, nbBits);
112221 +    DStatePtr->state = DInfo.newState + lowBits;
112222 +    return symbol;
112225 +MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
112227 +    return DStatePtr->state == 0;
112232 +#ifndef FSE_COMMONDEFS_ONLY
112234 +/* **************************************************************
112235 +*  Tuning parameters
112236 +****************************************************************/
112237 +/*!MEMORY_USAGE :
112238 +*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
112239 +*  Increasing memory usage improves compression ratio
112240 +*  Reduced memory usage can improve speed, due to cache effect
112241 +*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
112242 +#ifndef FSE_MAX_MEMORY_USAGE
112243 +#  define FSE_MAX_MEMORY_USAGE 14
112244 +#endif
112245 +#ifndef FSE_DEFAULT_MEMORY_USAGE
112246 +#  define FSE_DEFAULT_MEMORY_USAGE 13
112247 +#endif
112248 +#if (FSE_DEFAULT_MEMORY_USAGE > FSE_MAX_MEMORY_USAGE)
112249 +#  error "FSE_DEFAULT_MEMORY_USAGE must be <= FSE_MAX_MEMORY_USAGE"
112250 +#endif
112252 +/*!FSE_MAX_SYMBOL_VALUE :
112253 +*  Maximum symbol value authorized.
112254 +*  Required for proper stack allocation */
112255 +#ifndef FSE_MAX_SYMBOL_VALUE
112256 +#  define FSE_MAX_SYMBOL_VALUE 255
112257 +#endif
112259 +/* **************************************************************
112260 +*  template functions type & suffix
112261 +****************************************************************/
112262 +#define FSE_FUNCTION_TYPE BYTE
112263 +#define FSE_FUNCTION_EXTENSION
112264 +#define FSE_DECODE_TYPE FSE_decode_t
112267 +#endif   /* !FSE_COMMONDEFS_ONLY */
112270 +/* ***************************************************************
112271 +*  Constants
112272 +*****************************************************************/
112273 +#define FSE_MAX_TABLELOG  (FSE_MAX_MEMORY_USAGE-2)
112274 +#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
112275 +#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
112276 +#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
112277 +#define FSE_MIN_TABLELOG 5
112279 +#define FSE_TABLELOG_ABSOLUTE_MAX 15
112280 +#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
112281 +#  error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
112282 +#endif
112284 +#define FSE_TABLESTEP(tableSize) (((tableSize)>>1) + ((tableSize)>>3) + 3)
112287 +#endif /* FSE_STATIC_LINKING_ONLY */
112288 diff --git a/lib/zstd/common/fse_decompress.c b/lib/zstd/common/fse_decompress.c
112289 new file mode 100644
112290 index 000000000000..2c8bbe3e4c14
112291 --- /dev/null
112292 +++ b/lib/zstd/common/fse_decompress.c
112293 @@ -0,0 +1,390 @@
112294 +/* ******************************************************************
112295 + * FSE : Finite State Entropy decoder
112296 + * Copyright (c) Yann Collet, Facebook, Inc.
112298 + *  You can contact the author at :
112299 + *  - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
112300 + *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
112302 + * This source code is licensed under both the BSD-style license (found in the
112303 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
112304 + * in the COPYING file in the root directory of this source tree).
112305 + * You may select, at your option, one of the above-listed licenses.
112306 +****************************************************************** */
112309 +/* **************************************************************
112310 +*  Includes
112311 +****************************************************************/
112312 +#include "debug.h"      /* assert */
112313 +#include "bitstream.h"
112314 +#include "compiler.h"
112315 +#define FSE_STATIC_LINKING_ONLY
112316 +#include "fse.h"
112317 +#include "error_private.h"
112318 +#define ZSTD_DEPS_NEED_MALLOC
112319 +#include "zstd_deps.h"
112322 +/* **************************************************************
112323 +*  Error Management
112324 +****************************************************************/
112325 +#define FSE_isError ERR_isError
112326 +#define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)   /* use only *after* variable declarations */
112329 +/* **************************************************************
112330 +*  Templates
112331 +****************************************************************/
112333 +  designed to be included
112334 +  for type-specific functions (template emulation in C)
112335 +  Objective is to write these functions only once, for improved maintenance
112338 +/* safety checks */
112339 +#ifndef FSE_FUNCTION_EXTENSION
112340 +#  error "FSE_FUNCTION_EXTENSION must be defined"
112341 +#endif
112342 +#ifndef FSE_FUNCTION_TYPE
112343 +#  error "FSE_FUNCTION_TYPE must be defined"
112344 +#endif
112346 +/* Function names */
112347 +#define FSE_CAT(X,Y) X##Y
112348 +#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
112349 +#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
112352 +/* Function templates */
112353 +FSE_DTable* FSE_createDTable (unsigned tableLog)
112355 +    if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
112356 +    return (FSE_DTable*)ZSTD_malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
112359 +void FSE_freeDTable (FSE_DTable* dt)
112361 +    ZSTD_free(dt);
112364 +static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
112366 +    void* const tdPtr = dt+1;   /* because *dt is unsigned, 32-bits aligned on 32-bits */
112367 +    FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
112368 +    U16* symbolNext = (U16*)workSpace;
112369 +    BYTE* spread = (BYTE*)(symbolNext + maxSymbolValue + 1);
112371 +    U32 const maxSV1 = maxSymbolValue + 1;
112372 +    U32 const tableSize = 1 << tableLog;
112373 +    U32 highThreshold = tableSize-1;
112375 +    /* Sanity Checks */
112376 +    if (FSE_BUILD_DTABLE_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(maxSymbolValue_tooLarge);
112377 +    if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
112378 +    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
112380 +    /* Init, lay down lowprob symbols */
112381 +    {   FSE_DTableHeader DTableH;
112382 +        DTableH.tableLog = (U16)tableLog;
112383 +        DTableH.fastMode = 1;
112384 +        {   S16 const largeLimit= (S16)(1 << (tableLog-1));
112385 +            U32 s;
112386 +            for (s=0; s<maxSV1; s++) {
112387 +                if (normalizedCounter[s]==-1) {
112388 +                    tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
112389 +                    symbolNext[s] = 1;
112390 +                } else {
112391 +                    if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
112392 +                    symbolNext[s] = normalizedCounter[s];
112393 +        }   }   }
112394 +        ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
112395 +    }
112397 +    /* Spread symbols */
112398 +    if (highThreshold == tableSize - 1) {
112399 +        size_t const tableMask = tableSize-1;
112400 +        size_t const step = FSE_TABLESTEP(tableSize);
112401 +        /* First lay down the symbols in order.
112402 +         * We use a uint64_t to lay down 8 bytes at a time. This reduces branch
112403 +         * misses since small blocks generally have small table logs, so nearly
112404 +         * all symbols have counts <= 8. We ensure we have 8 bytes at the end of
112405 +         * our buffer to handle the over-write.
112406 +         */
112407 +        {
112408 +            U64 const add = 0x0101010101010101ull;
112409 +            size_t pos = 0;
112410 +            U64 sv = 0;
112411 +            U32 s;
112412 +            for (s=0; s<maxSV1; ++s, sv += add) {
112413 +                int i;
112414 +                int const n = normalizedCounter[s];
112415 +                MEM_write64(spread + pos, sv);
112416 +                for (i = 8; i < n; i += 8) {
112417 +                    MEM_write64(spread + pos + i, sv);
112418 +                }
112419 +                pos += n;
112420 +            }
112421 +        }
112422 +        /* Now we spread those positions across the table.
112423 +         * The benefit of doing it in two stages is that we avoid the the
112424 +         * variable size inner loop, which caused lots of branch misses.
112425 +         * Now we can run through all the positions without any branch misses.
112426 +         * We unroll the loop twice, since that is what emperically worked best.
112427 +         */
112428 +        {
112429 +            size_t position = 0;
112430 +            size_t s;
112431 +            size_t const unroll = 2;
112432 +            assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
112433 +            for (s = 0; s < (size_t)tableSize; s += unroll) {
112434 +                size_t u;
112435 +                for (u = 0; u < unroll; ++u) {
112436 +                    size_t const uPosition = (position + (u * step)) & tableMask;
112437 +                    tableDecode[uPosition].symbol = spread[s + u];
112438 +                }
112439 +                position = (position + (unroll * step)) & tableMask;
112440 +            }
112441 +            assert(position == 0);
112442 +        }
112443 +    } else {
112444 +        U32 const tableMask = tableSize-1;
112445 +        U32 const step = FSE_TABLESTEP(tableSize);
112446 +        U32 s, position = 0;
112447 +        for (s=0; s<maxSV1; s++) {
112448 +            int i;
112449 +            for (i=0; i<normalizedCounter[s]; i++) {
112450 +                tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
112451 +                position = (position + step) & tableMask;
112452 +                while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
112453 +        }   }
112454 +        if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */
112455 +    }
112457 +    /* Build Decoding table */
112458 +    {   U32 u;
112459 +        for (u=0; u<tableSize; u++) {
112460 +            FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
112461 +            U32 const nextState = symbolNext[symbol]++;
112462 +            tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
112463 +            tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
112464 +    }   }
112466 +    return 0;
112469 +size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
112471 +    return FSE_buildDTable_internal(dt, normalizedCounter, maxSymbolValue, tableLog, workSpace, wkspSize);
112475 +#ifndef FSE_COMMONDEFS_ONLY
112477 +/*-*******************************************************
112478 +*  Decompression (Byte symbols)
112479 +*********************************************************/
112480 +size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
112482 +    void* ptr = dt;
112483 +    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
112484 +    void* dPtr = dt + 1;
112485 +    FSE_decode_t* const cell = (FSE_decode_t*)dPtr;
112487 +    DTableH->tableLog = 0;
112488 +    DTableH->fastMode = 0;
112490 +    cell->newState = 0;
112491 +    cell->symbol = symbolValue;
112492 +    cell->nbBits = 0;
112494 +    return 0;
112498 +size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
112500 +    void* ptr = dt;
112501 +    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
112502 +    void* dPtr = dt + 1;
112503 +    FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
112504 +    const unsigned tableSize = 1 << nbBits;
112505 +    const unsigned tableMask = tableSize - 1;
112506 +    const unsigned maxSV1 = tableMask+1;
112507 +    unsigned s;
112509 +    /* Sanity checks */
112510 +    if (nbBits < 1) return ERROR(GENERIC);         /* min size */
112512 +    /* Build Decoding Table */
112513 +    DTableH->tableLog = (U16)nbBits;
112514 +    DTableH->fastMode = 1;
112515 +    for (s=0; s<maxSV1; s++) {
112516 +        dinfo[s].newState = 0;
112517 +        dinfo[s].symbol = (BYTE)s;
112518 +        dinfo[s].nbBits = (BYTE)nbBits;
112519 +    }
112521 +    return 0;
112524 +FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
112525 +          void* dst, size_t maxDstSize,
112526 +    const void* cSrc, size_t cSrcSize,
112527 +    const FSE_DTable* dt, const unsigned fast)
112529 +    BYTE* const ostart = (BYTE*) dst;
112530 +    BYTE* op = ostart;
112531 +    BYTE* const omax = op + maxDstSize;
112532 +    BYTE* const olimit = omax-3;
112534 +    BIT_DStream_t bitD;
112535 +    FSE_DState_t state1;
112536 +    FSE_DState_t state2;
112538 +    /* Init */
112539 +    CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
112541 +    FSE_initDState(&state1, &bitD, dt);
112542 +    FSE_initDState(&state2, &bitD, dt);
112544 +#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
112546 +    /* 4 symbols per loop */
112547 +    for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) & (op<olimit) ; op+=4) {
112548 +        op[0] = FSE_GETSYMBOL(&state1);
112550 +        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
112551 +            BIT_reloadDStream(&bitD);
112553 +        op[1] = FSE_GETSYMBOL(&state2);
112555 +        if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
112556 +            { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
112558 +        op[2] = FSE_GETSYMBOL(&state1);
112560 +        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
112561 +            BIT_reloadDStream(&bitD);
112563 +        op[3] = FSE_GETSYMBOL(&state2);
112564 +    }
112566 +    /* tail */
112567 +    /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
112568 +    while (1) {
112569 +        if (op>(omax-2)) return ERROR(dstSize_tooSmall);
112570 +        *op++ = FSE_GETSYMBOL(&state1);
112571 +        if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
112572 +            *op++ = FSE_GETSYMBOL(&state2);
112573 +            break;
112574 +        }
112576 +        if (op>(omax-2)) return ERROR(dstSize_tooSmall);
112577 +        *op++ = FSE_GETSYMBOL(&state2);
112578 +        if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
112579 +            *op++ = FSE_GETSYMBOL(&state1);
112580 +            break;
112581 +    }   }
112583 +    return op-ostart;
112587 +size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
112588 +                            const void* cSrc, size_t cSrcSize,
112589 +                            const FSE_DTable* dt)
112591 +    const void* ptr = dt;
112592 +    const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
112593 +    const U32 fastMode = DTableH->fastMode;
112595 +    /* select fast mode (static) */
112596 +    if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
112597 +    return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
112601 +size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
112603 +    return FSE_decompress_wksp_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, /* bmi2 */ 0);
112606 +typedef struct {
112607 +    short ncount[FSE_MAX_SYMBOL_VALUE + 1];
112608 +    FSE_DTable dtable[1]; /* Dynamically sized */
112609 +} FSE_DecompressWksp;
112612 +FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body(
112613 +        void* dst, size_t dstCapacity,
112614 +        const void* cSrc, size_t cSrcSize,
112615 +        unsigned maxLog, void* workSpace, size_t wkspSize,
112616 +        int bmi2)
112618 +    const BYTE* const istart = (const BYTE*)cSrc;
112619 +    const BYTE* ip = istart;
112620 +    unsigned tableLog;
112621 +    unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
112622 +    FSE_DecompressWksp* const wksp = (FSE_DecompressWksp*)workSpace;
112624 +    DEBUG_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0);
112625 +    if (wkspSize < sizeof(*wksp)) return ERROR(GENERIC);
112627 +    /* normal FSE decoding mode */
112628 +    {
112629 +        size_t const NCountLength = FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2);
112630 +        if (FSE_isError(NCountLength)) return NCountLength;
112631 +        if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
112632 +        assert(NCountLength <= cSrcSize);
112633 +        ip += NCountLength;
112634 +        cSrcSize -= NCountLength;
112635 +    }
112637 +    if (FSE_DECOMPRESS_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(tableLog_tooLarge);
112638 +    workSpace = wksp->dtable + FSE_DTABLE_SIZE_U32(tableLog);
112639 +    wkspSize -= sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
112641 +    CHECK_F( FSE_buildDTable_internal(wksp->dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) );
112643 +    {
112644 +        const void* ptr = wksp->dtable;
112645 +        const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
112646 +        const U32 fastMode = DTableH->fastMode;
112648 +        /* select fast mode (static) */
112649 +        if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 1);
112650 +        return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 0);
112651 +    }
112654 +/* Avoids the FORCE_INLINE of the _body() function. */
112655 +static size_t FSE_decompress_wksp_body_default(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
112657 +    return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 0);
112660 +#if DYNAMIC_BMI2
112661 +TARGET_ATTRIBUTE("bmi2") static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
112663 +    return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 1);
112665 +#endif
112667 +size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2)
112669 +#if DYNAMIC_BMI2
112670 +    if (bmi2) {
112671 +        return FSE_decompress_wksp_body_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
112672 +    }
112673 +#endif
112674 +    (void)bmi2;
112675 +    return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
112679 +typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
112683 +#endif   /* FSE_COMMONDEFS_ONLY */
112684 diff --git a/lib/zstd/common/huf.h b/lib/zstd/common/huf.h
112685 new file mode 100644
112686 index 000000000000..b5dbd386c5e6
112687 --- /dev/null
112688 +++ b/lib/zstd/common/huf.h
112689 @@ -0,0 +1,355 @@
112690 +/* ******************************************************************
112691 + * huff0 huffman codec,
112692 + * part of Finite State Entropy library
112693 + * Copyright (c) Yann Collet, Facebook, Inc.
112695 + * You can contact the author at :
112696 + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
112698 + * This source code is licensed under both the BSD-style license (found in the
112699 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
112700 + * in the COPYING file in the root directory of this source tree).
112701 + * You may select, at your option, one of the above-listed licenses.
112702 +****************************************************************** */
112705 +#ifndef HUF_H_298734234
112706 +#define HUF_H_298734234
112708 +/* *** Dependencies *** */
112709 +#include "zstd_deps.h"    /* size_t */
112712 +/* *** library symbols visibility *** */
112713 +/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual,
112714 + *        HUF symbols remain "private" (internal symbols for library only).
112715 + *        Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */
112716 +#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
112717 +#  define HUF_PUBLIC_API __attribute__ ((visibility ("default")))
112718 +#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1)   /* Visual expected */
112719 +#  define HUF_PUBLIC_API __declspec(dllexport)
112720 +#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
112721 +#  define HUF_PUBLIC_API __declspec(dllimport)  /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */
112722 +#else
112723 +#  define HUF_PUBLIC_API
112724 +#endif
112727 +/* ========================== */
112728 +/* ***  simple functions  *** */
112729 +/* ========================== */
112731 +/** HUF_compress() :
112732 + *  Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'.
112733 + * 'dst' buffer must be already allocated.
112734 + *  Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize).
112735 + * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB.
112736 + * @return : size of compressed data (<= `dstCapacity`).
112737 + *  Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
112738 + *                   if HUF_isError(return), compression failed (more details using HUF_getErrorName())
112739 + */
112740 +HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity,
112741 +                             const void* src, size_t srcSize);
112743 +/** HUF_decompress() :
112744 + *  Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
112745 + *  into already allocated buffer 'dst', of minimum size 'dstSize'.
112746 + * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data.
112747 + *  Note : in contrast with FSE, HUF_decompress can regenerate
112748 + *         RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
112749 + *         because it knows size to regenerate (originalSize).
112750 + * @return : size of regenerated data (== originalSize),
112751 + *           or an error code, which can be tested using HUF_isError()
112752 + */
112753 +HUF_PUBLIC_API size_t HUF_decompress(void* dst,  size_t originalSize,
112754 +                               const void* cSrc, size_t cSrcSize);
112757 +/* ***   Tool functions *** */
112758 +#define HUF_BLOCKSIZE_MAX (128 * 1024)                  /**< maximum input size for a single block compressed with HUF_compress */
112759 +HUF_PUBLIC_API size_t HUF_compressBound(size_t size);   /**< maximum compressed size (worst case) */
112761 +/* Error Management */
112762 +HUF_PUBLIC_API unsigned    HUF_isError(size_t code);       /**< tells if a return value is an error code */
112763 +HUF_PUBLIC_API const char* HUF_getErrorName(size_t code);  /**< provides error code string (useful for debugging) */
112766 +/* ***   Advanced function   *** */
112768 +/** HUF_compress2() :
112769 + *  Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`.
112770 + * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX .
112771 + * `tableLog` must be `<= HUF_TABLELOG_MAX` . */
112772 +HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity,
112773 +                               const void* src, size_t srcSize,
112774 +                               unsigned maxSymbolValue, unsigned tableLog);
112776 +/** HUF_compress4X_wksp() :
112777 + *  Same as HUF_compress2(), but uses externally allocated `workSpace`.
112778 + * `workspace` must have minimum alignment of 4, and be at least as large as HUF_WORKSPACE_SIZE */
112779 +#define HUF_WORKSPACE_SIZE ((6 << 10) + 256)
112780 +#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32))
112781 +HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
112782 +                                     const void* src, size_t srcSize,
112783 +                                     unsigned maxSymbolValue, unsigned tableLog,
112784 +                                     void* workSpace, size_t wkspSize);
112786 +#endif   /* HUF_H_298734234 */
112788 +/* ******************************************************************
112789 + *  WARNING !!
112790 + *  The following section contains advanced and experimental definitions
112791 + *  which shall never be used in the context of a dynamic library,
112792 + *  because they are not guaranteed to remain stable in the future.
112793 + *  Only consider them in association with static linking.
112794 + * *****************************************************************/
112795 +#if !defined(HUF_H_HUF_STATIC_LINKING_ONLY)
112796 +#define HUF_H_HUF_STATIC_LINKING_ONLY
112798 +/* *** Dependencies *** */
112799 +#include "mem.h"   /* U32 */
112800 +#define FSE_STATIC_LINKING_ONLY
112801 +#include "fse.h"
112804 +/* *** Constants *** */
112805 +#define HUF_TABLELOG_MAX      12      /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
112806 +#define HUF_TABLELOG_DEFAULT  11      /* default tableLog value when none specified */
112807 +#define HUF_SYMBOLVALUE_MAX  255
112809 +#define HUF_TABLELOG_ABSOLUTEMAX  15  /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
112810 +#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
112811 +#  error "HUF_TABLELOG_MAX is too large !"
112812 +#endif
112815 +/* ****************************************
112816 +*  Static allocation
112817 +******************************************/
112818 +/* HUF buffer bounds */
112819 +#define HUF_CTABLEBOUND 129
112820 +#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8)   /* only true when incompressible is pre-filtered with fast heuristic */
112821 +#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
112823 +/* static allocation of HUF's Compression Table */
112824 +/* this is a private definition, just exposed for allocation and strict aliasing purpose. never EVER access its members directly */
112825 +struct HUF_CElt_s {
112826 +  U16  val;
112827 +  BYTE nbBits;
112828 +};   /* typedef'd to HUF_CElt */
112829 +typedef struct HUF_CElt_s HUF_CElt;   /* consider it an incomplete type */
112830 +#define HUF_CTABLE_SIZE_U32(maxSymbolValue)   ((maxSymbolValue)+1)   /* Use tables of U32, for proper alignment */
112831 +#define HUF_CTABLE_SIZE(maxSymbolValue)       (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32))
112832 +#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
112833 +    HUF_CElt name[HUF_CTABLE_SIZE_U32(maxSymbolValue)] /* no final ; */
112835 +/* static allocation of HUF's DTable */
112836 +typedef U32 HUF_DTable;
112837 +#define HUF_DTABLE_SIZE(maxTableLog)   (1 + (1<<(maxTableLog)))
112838 +#define HUF_CREATE_STATIC_DTABLEX1(DTable, maxTableLog) \
112839 +        HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) }
112840 +#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
112841 +        HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }
112844 +/* ****************************************
112845 +*  Advanced decompression functions
112846 +******************************************/
112847 +size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
112848 +#ifndef HUF_FORCE_DECOMPRESS_X1
112849 +size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
112850 +#endif
112852 +size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< decodes RLE and uncompressed */
112853 +size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */
112854 +size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */
112855 +size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
112856 +size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< single-symbol decoder */
112857 +#ifndef HUF_FORCE_DECOMPRESS_X1
112858 +size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
112859 +size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< double-symbols decoder */
112860 +#endif
112863 +/* ****************************************
112864 + *  HUF detailed API
112865 + * ****************************************/
112867 +/*! HUF_compress() does the following:
112868 + *  1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h")
112869 + *  2. (optional) refine tableLog using HUF_optimalTableLog()
112870 + *  3. build Huffman table from count using HUF_buildCTable()
112871 + *  4. save Huffman table to memory buffer using HUF_writeCTable()
112872 + *  5. encode the data stream using HUF_compress4X_usingCTable()
112874 + *  The following API allows targeting specific sub-functions for advanced tasks.
112875 + *  For example, it's possible to compress several blocks using the same 'CTable',
112876 + *  or to save and regenerate 'CTable' using external methods.
112877 + */
112878 +unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
112879 +size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits);   /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */
112880 +size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
112881 +size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize);
112882 +size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
112883 +size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
112884 +int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
112886 +typedef enum {
112887 +   HUF_repeat_none,  /**< Cannot use the previous table */
112888 +   HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
112889 +   HUF_repeat_valid  /**< Can use the previous table and it is assumed to be valid */
112890 + } HUF_repeat;
112891 +/** HUF_compress4X_repeat() :
112892 + *  Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
112893 + *  If it uses hufTable it does not modify hufTable or repeat.
112894 + *  If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
112895 + *  If preferRepeat then the old table will always be used if valid. */
112896 +size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
112897 +                       const void* src, size_t srcSize,
112898 +                       unsigned maxSymbolValue, unsigned tableLog,
112899 +                       void* workSpace, size_t wkspSize,    /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
112900 +                       HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
112902 +/** HUF_buildCTable_wksp() :
112903 + *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
112904 + * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.
112905 + */
112906 +#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1)
112907 +#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
112908 +size_t HUF_buildCTable_wksp (HUF_CElt* tree,
112909 +                       const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
112910 +                             void* workSpace, size_t wkspSize);
112912 +/*! HUF_readStats() :
112913 + *  Read compact Huffman tree, saved by HUF_writeCTable().
112914 + * `huffWeight` is destination buffer.
112915 + * @return : size read from `src` , or an error Code .
112916 + *  Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
112917 +size_t HUF_readStats(BYTE* huffWeight, size_t hwSize,
112918 +                     U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
112919 +                     const void* src, size_t srcSize);
112921 +/*! HUF_readStats_wksp() :
112922 + * Same as HUF_readStats() but takes an external workspace which must be
112923 + * 4-byte aligned and its size must be >= HUF_READ_STATS_WORKSPACE_SIZE.
112924 + * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
112925 + */
112926 +#define HUF_READ_STATS_WORKSPACE_SIZE_U32 FSE_DECOMPRESS_WKSP_SIZE_U32(6, HUF_TABLELOG_MAX-1)
112927 +#define HUF_READ_STATS_WORKSPACE_SIZE (HUF_READ_STATS_WORKSPACE_SIZE_U32 * sizeof(unsigned))
112928 +size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize,
112929 +                          U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
112930 +                          const void* src, size_t srcSize,
112931 +                          void* workspace, size_t wkspSize,
112932 +                          int bmi2);
112934 +/** HUF_readCTable() :
112935 + *  Loading a CTable saved with HUF_writeCTable() */
112936 +size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights);
112938 +/** HUF_getNbBits() :
112939 + *  Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX
112940 + *  Note 1 : is not inlined, as HUF_CElt definition is private
112941 + *  Note 2 : const void* used, so that it can provide a statically allocated table as argument (which uses type U32) */
112942 +U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue);
112945 + * HUF_decompress() does the following:
112946 + * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics
112947 + * 2. build Huffman table from save, using HUF_readDTableX?()
112948 + * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable()
112949 + */
112951 +/** HUF_selectDecoder() :
112952 + *  Tells which decoder is likely to decode faster,
112953 + *  based on a set of pre-computed metrics.
112954 + * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
112955 + *  Assumption : 0 < dstSize <= 128 KB */
112956 +U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
112959 + *  The minimum workspace size for the `workSpace` used in
112960 + *  HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp().
112962 + *  The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when
112963 + *  HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.
112964 + *  Buffer overflow errors may potentially occur if code modifications result in
112965 + *  a required workspace size greater than that specified in the following
112966 + *  macro.
112967 + */
112968 +#define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9))
112969 +#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
112971 +#ifndef HUF_FORCE_DECOMPRESS_X2
112972 +size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize);
112973 +size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
112974 +#endif
112975 +#ifndef HUF_FORCE_DECOMPRESS_X1
112976 +size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
112977 +size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
112978 +#endif
112980 +size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
112981 +#ifndef HUF_FORCE_DECOMPRESS_X2
112982 +size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
112983 +#endif
112984 +#ifndef HUF_FORCE_DECOMPRESS_X1
112985 +size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
112986 +#endif
112989 +/* ====================== */
112990 +/* single stream variants */
112991 +/* ====================== */
112993 +size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
112994 +size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);  /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
112995 +size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
112996 +/** HUF_compress1X_repeat() :
112997 + *  Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
112998 + *  If it uses hufTable it does not modify hufTable or repeat.
112999 + *  If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
113000 + *  If preferRepeat then the old table will always be used if valid. */
113001 +size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
113002 +                       const void* src, size_t srcSize,
113003 +                       unsigned maxSymbolValue, unsigned tableLog,
113004 +                       void* workSpace, size_t wkspSize,   /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
113005 +                       HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
113007 +size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
113008 +#ifndef HUF_FORCE_DECOMPRESS_X1
113009 +size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbol decoder */
113010 +#endif
113012 +size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
113013 +size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);
113014 +#ifndef HUF_FORCE_DECOMPRESS_X2
113015 +size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
113016 +size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< single-symbol decoder */
113017 +#endif
113018 +#ifndef HUF_FORCE_DECOMPRESS_X1
113019 +size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
113020 +size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< double-symbols decoder */
113021 +#endif
113023 +size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);   /**< automatic selection of sing or double symbol decoder, based on DTable */
113024 +#ifndef HUF_FORCE_DECOMPRESS_X2
113025 +size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
113026 +#endif
113027 +#ifndef HUF_FORCE_DECOMPRESS_X1
113028 +size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
113029 +#endif
113031 +/* BMI2 variants.
113032 + * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
113033 + */
113034 +size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
113035 +#ifndef HUF_FORCE_DECOMPRESS_X2
113036 +size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
113037 +#endif
113038 +size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
113039 +size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
113040 +#ifndef HUF_FORCE_DECOMPRESS_X2
113041 +size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2);
113042 +#endif
113044 +#endif /* HUF_STATIC_LINKING_ONLY */
113045 diff --git a/lib/zstd/common/mem.h b/lib/zstd/common/mem.h
113046 new file mode 100644
113047 index 000000000000..4b5db5756a6f
113048 --- /dev/null
113049 +++ b/lib/zstd/common/mem.h
113050 @@ -0,0 +1,259 @@
113051 +/* SPDX-License-Identifier: GPL-2.0-only */
113053 + * Copyright (c) Yann Collet, Facebook, Inc.
113054 + * All rights reserved.
113056 + * This source code is licensed under both the BSD-style license (found in the
113057 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
113058 + * in the COPYING file in the root directory of this source tree).
113059 + * You may select, at your option, one of the above-listed licenses.
113060 + */
113062 +#ifndef MEM_H_MODULE
113063 +#define MEM_H_MODULE
113065 +/*-****************************************
113066 +*  Dependencies
113067 +******************************************/
113068 +#include <asm/unaligned.h>  /* get_unaligned, put_unaligned* */
113069 +#include <linux/compiler.h>  /* inline */
113070 +#include <linux/swab.h>  /* swab32, swab64 */
113071 +#include <linux/types.h>  /* size_t, ptrdiff_t */
113072 +#include "debug.h"  /* DEBUG_STATIC_ASSERT */
113074 +/*-****************************************
113075 +*  Compiler specifics
113076 +******************************************/
113077 +#define MEM_STATIC static inline
113079 +/*-**************************************************************
113080 +*  Basic Types
113081 +*****************************************************************/
113082 +typedef uint8_t  BYTE;
113083 +typedef uint16_t U16;
113084 +typedef int16_t  S16;
113085 +typedef uint32_t U32;
113086 +typedef int32_t  S32;
113087 +typedef uint64_t U64;
113088 +typedef int64_t  S64;
113090 +/*-**************************************************************
113091 +*  Memory I/O API
113092 +*****************************************************************/
113093 +/*=== Static platform detection ===*/
113094 +MEM_STATIC unsigned MEM_32bits(void);
113095 +MEM_STATIC unsigned MEM_64bits(void);
113096 +MEM_STATIC unsigned MEM_isLittleEndian(void);
113098 +/*=== Native unaligned read/write ===*/
113099 +MEM_STATIC U16 MEM_read16(const void* memPtr);
113100 +MEM_STATIC U32 MEM_read32(const void* memPtr);
113101 +MEM_STATIC U64 MEM_read64(const void* memPtr);
113102 +MEM_STATIC size_t MEM_readST(const void* memPtr);
113104 +MEM_STATIC void MEM_write16(void* memPtr, U16 value);
113105 +MEM_STATIC void MEM_write32(void* memPtr, U32 value);
113106 +MEM_STATIC void MEM_write64(void* memPtr, U64 value);
113108 +/*=== Little endian unaligned read/write ===*/
113109 +MEM_STATIC U16 MEM_readLE16(const void* memPtr);
113110 +MEM_STATIC U32 MEM_readLE24(const void* memPtr);
113111 +MEM_STATIC U32 MEM_readLE32(const void* memPtr);
113112 +MEM_STATIC U64 MEM_readLE64(const void* memPtr);
113113 +MEM_STATIC size_t MEM_readLEST(const void* memPtr);
113115 +MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val);
113116 +MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val);
113117 +MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32);
113118 +MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64);
113119 +MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val);
113121 +/*=== Big endian unaligned read/write ===*/
113122 +MEM_STATIC U32 MEM_readBE32(const void* memPtr);
113123 +MEM_STATIC U64 MEM_readBE64(const void* memPtr);
113124 +MEM_STATIC size_t MEM_readBEST(const void* memPtr);
113126 +MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32);
113127 +MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64);
113128 +MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val);
113130 +/*=== Byteswap ===*/
113131 +MEM_STATIC U32 MEM_swap32(U32 in);
113132 +MEM_STATIC U64 MEM_swap64(U64 in);
113133 +MEM_STATIC size_t MEM_swapST(size_t in);
113135 +/*-**************************************************************
113136 +*  Memory I/O Implementation
113137 +*****************************************************************/
113138 +MEM_STATIC unsigned MEM_32bits(void)
113140 +    return sizeof(size_t) == 4;
113143 +MEM_STATIC unsigned MEM_64bits(void)
113145 +    return sizeof(size_t) == 8;
113148 +#if defined(__LITTLE_ENDIAN)
113149 +#define MEM_LITTLE_ENDIAN 1
113150 +#else
113151 +#define MEM_LITTLE_ENDIAN 0
113152 +#endif
113154 +MEM_STATIC unsigned MEM_isLittleEndian(void)
113156 +    return MEM_LITTLE_ENDIAN;
113159 +MEM_STATIC U16 MEM_read16(const void *memPtr)
113161 +    return get_unaligned((const U16 *)memPtr);
113164 +MEM_STATIC U32 MEM_read32(const void *memPtr)
113166 +    return get_unaligned((const U32 *)memPtr);
113169 +MEM_STATIC U64 MEM_read64(const void *memPtr)
113171 +    return get_unaligned((const U64 *)memPtr);
113174 +MEM_STATIC size_t MEM_readST(const void *memPtr)
113176 +    return get_unaligned((const size_t *)memPtr);
113179 +MEM_STATIC void MEM_write16(void *memPtr, U16 value)
113181 +    put_unaligned(value, (U16 *)memPtr);
113184 +MEM_STATIC void MEM_write32(void *memPtr, U32 value)
113186 +    put_unaligned(value, (U32 *)memPtr);
113189 +MEM_STATIC void MEM_write64(void *memPtr, U64 value)
113191 +    put_unaligned(value, (U64 *)memPtr);
113194 +/*=== Little endian r/w ===*/
113196 +MEM_STATIC U16 MEM_readLE16(const void *memPtr)
113198 +    return get_unaligned_le16(memPtr);
113201 +MEM_STATIC void MEM_writeLE16(void *memPtr, U16 val)
113203 +    put_unaligned_le16(val, memPtr);
113206 +MEM_STATIC U32 MEM_readLE24(const void *memPtr)
113208 +    return MEM_readLE16(memPtr) + (((const BYTE *)memPtr)[2] << 16);
113211 +MEM_STATIC void MEM_writeLE24(void *memPtr, U32 val)
113213 +       MEM_writeLE16(memPtr, (U16)val);
113214 +       ((BYTE *)memPtr)[2] = (BYTE)(val >> 16);
113217 +MEM_STATIC U32 MEM_readLE32(const void *memPtr)
113219 +    return get_unaligned_le32(memPtr);
113222 +MEM_STATIC void MEM_writeLE32(void *memPtr, U32 val32)
113224 +    put_unaligned_le32(val32, memPtr);
113227 +MEM_STATIC U64 MEM_readLE64(const void *memPtr)
113229 +    return get_unaligned_le64(memPtr);
113232 +MEM_STATIC void MEM_writeLE64(void *memPtr, U64 val64)
113234 +    put_unaligned_le64(val64, memPtr);
113237 +MEM_STATIC size_t MEM_readLEST(const void *memPtr)
113239 +       if (MEM_32bits())
113240 +               return (size_t)MEM_readLE32(memPtr);
113241 +       else
113242 +               return (size_t)MEM_readLE64(memPtr);
113245 +MEM_STATIC void MEM_writeLEST(void *memPtr, size_t val)
113247 +       if (MEM_32bits())
113248 +               MEM_writeLE32(memPtr, (U32)val);
113249 +       else
113250 +               MEM_writeLE64(memPtr, (U64)val);
113253 +/*=== Big endian r/w ===*/
113255 +MEM_STATIC U32 MEM_readBE32(const void *memPtr)
113257 +    return get_unaligned_be32(memPtr);
113260 +MEM_STATIC void MEM_writeBE32(void *memPtr, U32 val32)
113262 +    put_unaligned_be32(val32, memPtr);
113265 +MEM_STATIC U64 MEM_readBE64(const void *memPtr)
113267 +    return get_unaligned_be64(memPtr);
113270 +MEM_STATIC void MEM_writeBE64(void *memPtr, U64 val64)
113272 +    put_unaligned_be64(val64, memPtr);
113275 +MEM_STATIC size_t MEM_readBEST(const void *memPtr)
113277 +       if (MEM_32bits())
113278 +               return (size_t)MEM_readBE32(memPtr);
113279 +       else
113280 +               return (size_t)MEM_readBE64(memPtr);
113283 +MEM_STATIC void MEM_writeBEST(void *memPtr, size_t val)
113285 +       if (MEM_32bits())
113286 +               MEM_writeBE32(memPtr, (U32)val);
113287 +       else
113288 +               MEM_writeBE64(memPtr, (U64)val);
113291 +MEM_STATIC U32 MEM_swap32(U32 in)
113293 +    return swab32(in);
113296 +MEM_STATIC U64 MEM_swap64(U64 in)
113298 +    return swab64(in);
113301 +MEM_STATIC size_t MEM_swapST(size_t in)
113303 +    if (MEM_32bits())
113304 +        return (size_t)MEM_swap32((U32)in);
113305 +    else
113306 +        return (size_t)MEM_swap64((U64)in);
113309 +#endif /* MEM_H_MODULE */
113310 diff --git a/lib/zstd/common/zstd_common.c b/lib/zstd/common/zstd_common.c
113311 new file mode 100644
113312 index 000000000000..3d7e35b309b5
113313 --- /dev/null
113314 +++ b/lib/zstd/common/zstd_common.c
113315 @@ -0,0 +1,83 @@
113317 + * Copyright (c) Yann Collet, Facebook, Inc.
113318 + * All rights reserved.
113320 + * This source code is licensed under both the BSD-style license (found in the
113321 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
113322 + * in the COPYING file in the root directory of this source tree).
113323 + * You may select, at your option, one of the above-listed licenses.
113324 + */
113328 +/*-*************************************
113329 +*  Dependencies
113330 +***************************************/
113331 +#define ZSTD_DEPS_NEED_MALLOC
113332 +#include "zstd_deps.h"   /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */
113333 +#include "error_private.h"
113334 +#include "zstd_internal.h"
113337 +/*-****************************************
113338 +*  Version
113339 +******************************************/
113340 +unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; }
113342 +const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; }
113345 +/*-****************************************
113346 +*  ZSTD Error Management
113347 +******************************************/
113348 +#undef ZSTD_isError   /* defined within zstd_internal.h */
113349 +/*! ZSTD_isError() :
113350 + *  tells if a return value is an error code
113351 + *  symbol is required for external callers */
113352 +unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
113354 +/*! ZSTD_getErrorName() :
113355 + *  provides error code string from function result (useful for debugging) */
113356 +const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); }
113358 +/*! ZSTD_getError() :
113359 + *  convert a `size_t` function result into a proper ZSTD_errorCode enum */
113360 +ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
113362 +/*! ZSTD_getErrorString() :
113363 + *  provides error code string from enum */
113364 +const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
113368 +/*=**************************************************************
113369 +*  Custom allocator
113370 +****************************************************************/
113371 +void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem)
113373 +    if (customMem.customAlloc)
113374 +        return customMem.customAlloc(customMem.opaque, size);
113375 +    return ZSTD_malloc(size);
113378 +void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem)
113380 +    if (customMem.customAlloc) {
113381 +        /* calloc implemented as malloc+memset;
113382 +         * not as efficient as calloc, but next best guess for custom malloc */
113383 +        void* const ptr = customMem.customAlloc(customMem.opaque, size);
113384 +        ZSTD_memset(ptr, 0, size);
113385 +        return ptr;
113386 +    }
113387 +    return ZSTD_calloc(1, size);
113390 +void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
113392 +    if (ptr!=NULL) {
113393 +        if (customMem.customFree)
113394 +            customMem.customFree(customMem.opaque, ptr);
113395 +        else
113396 +            ZSTD_free(ptr);
113397 +    }
113399 diff --git a/lib/zstd/common/zstd_deps.h b/lib/zstd/common/zstd_deps.h
113400 new file mode 100644
113401 index 000000000000..853b72426215
113402 --- /dev/null
113403 +++ b/lib/zstd/common/zstd_deps.h
113404 @@ -0,0 +1,125 @@
113405 +/* SPDX-License-Identifier: GPL-2.0-only */
113407 + * Copyright (c) Facebook, Inc.
113408 + * All rights reserved.
113410 + * This source code is licensed under both the BSD-style license (found in the
113411 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
113412 + * in the COPYING file in the root directory of this source tree).
113413 + * You may select, at your option, one of the above-listed licenses.
113414 + */
113417 + * This file provides common libc dependencies that zstd requires.
113418 + * The purpose is to allow replacing this file with a custom implementation
113419 + * to compile zstd without libc support.
113420 + */
113422 +/* Need:
113423 + * NULL
113424 + * INT_MAX
113425 + * UINT_MAX
113426 + * ZSTD_memcpy()
113427 + * ZSTD_memset()
113428 + * ZSTD_memmove()
113429 + */
113430 +#ifndef ZSTD_DEPS_COMMON
113431 +#define ZSTD_DEPS_COMMON
113433 +#include <linux/limits.h>
113434 +#include <linux/stddef.h>
113436 +#define ZSTD_memcpy(d,s,n) __builtin_memcpy((d),(s),(n))
113437 +#define ZSTD_memmove(d,s,n) __builtin_memmove((d),(s),(n))
113438 +#define ZSTD_memset(d,s,n) __builtin_memset((d),(s),(n))
113440 +#endif /* ZSTD_DEPS_COMMON */
113443 + * Define malloc as always failing. That means the user must
113444 + * either use ZSTD_customMem or statically allocate memory.
113445 + * Need:
113446 + * ZSTD_malloc()
113447 + * ZSTD_free()
113448 + * ZSTD_calloc()
113449 + */
113450 +#ifdef ZSTD_DEPS_NEED_MALLOC
113451 +#ifndef ZSTD_DEPS_MALLOC
113452 +#define ZSTD_DEPS_MALLOC
113454 +#define ZSTD_malloc(s) ({ (void)(s); NULL; })
113455 +#define ZSTD_free(p) ((void)(p))
113456 +#define ZSTD_calloc(n,s) ({ (void)(n); (void)(s); NULL; })
113458 +#endif /* ZSTD_DEPS_MALLOC */
113459 +#endif /* ZSTD_DEPS_NEED_MALLOC */
113462 + * Provides 64-bit math support.
113463 + * Need:
113464 + * U64 ZSTD_div64(U64 dividend, U32 divisor)
113465 + */
113466 +#ifdef ZSTD_DEPS_NEED_MATH64
113467 +#ifndef ZSTD_DEPS_MATH64
113468 +#define ZSTD_DEPS_MATH64
113470 +#include <linux/math64.h>
113472 +static uint64_t ZSTD_div64(uint64_t dividend, uint32_t divisor) {
113473 +  return div_u64(dividend, divisor);
113476 +#endif /* ZSTD_DEPS_MATH64 */
113477 +#endif /* ZSTD_DEPS_NEED_MATH64 */
113480 + * This is only requested when DEBUGLEVEL >= 1, meaning
113481 + * it is disabled in production.
113482 + * Need:
113483 + * assert()
113484 + */
113485 +#ifdef ZSTD_DEPS_NEED_ASSERT
113486 +#ifndef ZSTD_DEPS_ASSERT
113487 +#define ZSTD_DEPS_ASSERT
113489 +#include <linux/kernel.h>
113491 +#define assert(x) WARN_ON((x))
113493 +#endif /* ZSTD_DEPS_ASSERT */
113494 +#endif /* ZSTD_DEPS_NEED_ASSERT */
113497 + * This is only requested when DEBUGLEVEL >= 2, meaning
113498 + * it is disabled in production.
113499 + * Need:
113500 + * ZSTD_DEBUG_PRINT()
113501 + */
113502 +#ifdef ZSTD_DEPS_NEED_IO
113503 +#ifndef ZSTD_DEPS_IO
113504 +#define ZSTD_DEPS_IO
113506 +#include <linux/printk.h>
113508 +#define ZSTD_DEBUG_PRINT(...) pr_debug(__VA_ARGS__)
113510 +#endif /* ZSTD_DEPS_IO */
113511 +#endif /* ZSTD_DEPS_NEED_IO */
113514 + * Only requested when MSAN is enabled.
113515 + * Need:
113516 + * intptr_t
113517 + */
113518 +#ifdef ZSTD_DEPS_NEED_STDINT
113519 +#ifndef ZSTD_DEPS_STDINT
113520 +#define ZSTD_DEPS_STDINT
113523 + * The Linux Kernel doesn't provide intptr_t, only uintptr_t, which
113524 + * is an unsigned long.
113525 + */
113526 +typedef long intptr_t;
113528 +#endif /* ZSTD_DEPS_STDINT */
113529 +#endif /* ZSTD_DEPS_NEED_STDINT */
113530 diff --git a/lib/zstd/common/zstd_internal.h b/lib/zstd/common/zstd_internal.h
113531 new file mode 100644
113532 index 000000000000..1f939cbe05ed
113533 --- /dev/null
113534 +++ b/lib/zstd/common/zstd_internal.h
113535 @@ -0,0 +1,450 @@
113537 + * Copyright (c) Yann Collet, Facebook, Inc.
113538 + * All rights reserved.
113540 + * This source code is licensed under both the BSD-style license (found in the
113541 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
113542 + * in the COPYING file in the root directory of this source tree).
113543 + * You may select, at your option, one of the above-listed licenses.
113544 + */
113546 +#ifndef ZSTD_CCOMMON_H_MODULE
113547 +#define ZSTD_CCOMMON_H_MODULE
113549 +/* this module contains definitions which must be identical
113550 + * across compression, decompression and dictBuilder.
113551 + * It also contains a few functions useful to at least 2 of them
113552 + * and which benefit from being inlined */
113554 +/*-*************************************
113555 +*  Dependencies
113556 +***************************************/
113557 +#include "compiler.h"
113558 +#include "mem.h"
113559 +#include "debug.h"                 /* assert, DEBUGLOG, RAWLOG, g_debuglevel */
113560 +#include "error_private.h"
113561 +#define ZSTD_STATIC_LINKING_ONLY
113562 +#include <linux/zstd.h>
113563 +#define FSE_STATIC_LINKING_ONLY
113564 +#include "fse.h"
113565 +#define HUF_STATIC_LINKING_ONLY
113566 +#include "huf.h"
113567 +#include <linux/xxhash.h>                /* XXH_reset, update, digest */
113568 +#define ZSTD_TRACE 0
113571 +/* ---- static assert (debug) --- */
113572 +#define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)
113573 +#define ZSTD_isError ERR_isError   /* for inlining */
113574 +#define FSE_isError  ERR_isError
113575 +#define HUF_isError  ERR_isError
113578 +/*-*************************************
113579 +*  shared macros
113580 +***************************************/
113581 +#undef MIN
113582 +#undef MAX
113583 +#define MIN(a,b) ((a)<(b) ? (a) : (b))
113584 +#define MAX(a,b) ((a)>(b) ? (a) : (b))
113587 + * Ignore: this is an internal helper.
113589 + * This is a helper function to help force C99-correctness during compilation.
113590 + * Under strict compilation modes, variadic macro arguments can't be empty.
113591 + * However, variadic function arguments can be. Using a function therefore lets
113592 + * us statically check that at least one (string) argument was passed,
113593 + * independent of the compilation flags.
113594 + */
113595 +static INLINE_KEYWORD UNUSED_ATTR
113596 +void _force_has_format_string(const char *format, ...) {
113597 +  (void)format;
113601 + * Ignore: this is an internal helper.
113603 + * We want to force this function invocation to be syntactically correct, but
113604 + * we don't want to force runtime evaluation of its arguments.
113605 + */
113606 +#define _FORCE_HAS_FORMAT_STRING(...) \
113607 +  if (0) { \
113608 +    _force_has_format_string(__VA_ARGS__); \
113609 +  }
113612 + * Return the specified error if the condition evaluates to true.
113614 + * In debug modes, prints additional information.
113615 + * In order to do that (particularly, printing the conditional that failed),
113616 + * this can't just wrap RETURN_ERROR().
113617 + */
113618 +#define RETURN_ERROR_IF(cond, err, ...) \
113619 +  if (cond) { \
113620 +    RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \
113621 +           __FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \
113622 +    _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
113623 +    RAWLOG(3, ": " __VA_ARGS__); \
113624 +    RAWLOG(3, "\n"); \
113625 +    return ERROR(err); \
113626 +  }
113629 + * Unconditionally return the specified error.
113631 + * In debug modes, prints additional information.
113632 + */
113633 +#define RETURN_ERROR(err, ...) \
113634 +  do { \
113635 +    RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \
113636 +           __FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \
113637 +    _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
113638 +    RAWLOG(3, ": " __VA_ARGS__); \
113639 +    RAWLOG(3, "\n"); \
113640 +    return ERROR(err); \
113641 +  } while(0);
113644 + * If the provided expression evaluates to an error code, returns that error code.
113646 + * In debug modes, prints additional information.
113647 + */
113648 +#define FORWARD_IF_ERROR(err, ...) \
113649 +  do { \
113650 +    size_t const err_code = (err); \
113651 +    if (ERR_isError(err_code)) { \
113652 +      RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \
113653 +             __FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \
113654 +      _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
113655 +      RAWLOG(3, ": " __VA_ARGS__); \
113656 +      RAWLOG(3, "\n"); \
113657 +      return err_code; \
113658 +    } \
113659 +  } while(0);
113662 +/*-*************************************
113663 +*  Common constants
113664 +***************************************/
113665 +#define ZSTD_OPT_NUM    (1<<12)
113667 +#define ZSTD_REP_NUM      3                 /* number of repcodes */
113668 +#define ZSTD_REP_MOVE     (ZSTD_REP_NUM-1)
113669 +static UNUSED_ATTR const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
113671 +#define KB *(1 <<10)
113672 +#define MB *(1 <<20)
113673 +#define GB *(1U<<30)
113675 +#define BIT7 128
113676 +#define BIT6  64
113677 +#define BIT5  32
113678 +#define BIT4  16
113679 +#define BIT1   2
113680 +#define BIT0   1
113682 +#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
113683 +static UNUSED_ATTR const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
113684 +static UNUSED_ATTR const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
113686 +#define ZSTD_FRAMEIDSIZE 4   /* magic number size */
113688 +#define ZSTD_BLOCKHEADERSIZE 3   /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
113689 +static UNUSED_ATTR const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
113690 +typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
113692 +#define ZSTD_FRAMECHECKSUMSIZE 4
113694 +#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
113695 +#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */)   /* for a non-null block */
113697 +#define HufLog 12
113698 +typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
113700 +#define LONGNBSEQ 0x7F00
113702 +#define MINMATCH 3
113704 +#define Litbits  8
113705 +#define MaxLit ((1<<Litbits) - 1)
113706 +#define MaxML   52
113707 +#define MaxLL   35
113708 +#define DefaultMaxOff 28
113709 +#define MaxOff  31
113710 +#define MaxSeq MAX(MaxLL, MaxML)   /* Assumption : MaxOff < MaxLL,MaxML */
113711 +#define MLFSELog    9
113712 +#define LLFSELog    9
113713 +#define OffFSELog   8
113714 +#define MaxFSELog  MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
113716 +#define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */
113717 +/* Each table cannot take more than #symbols * FSELog bits */
113718 +#define ZSTD_MAX_FSE_HEADERS_SIZE (((MaxML + 1) * MLFSELog + (MaxLL + 1) * LLFSELog + (MaxOff + 1) * OffFSELog + 7) / 8)
113720 +static UNUSED_ATTR const U32 LL_bits[MaxLL+1] = {
113721 +     0, 0, 0, 0, 0, 0, 0, 0,
113722 +     0, 0, 0, 0, 0, 0, 0, 0,
113723 +     1, 1, 1, 1, 2, 2, 3, 3,
113724 +     4, 6, 7, 8, 9,10,11,12,
113725 +    13,14,15,16
113727 +static UNUSED_ATTR const S16 LL_defaultNorm[MaxLL+1] = {
113728 +     4, 3, 2, 2, 2, 2, 2, 2,
113729 +     2, 2, 2, 2, 2, 1, 1, 1,
113730 +     2, 2, 2, 2, 2, 2, 2, 2,
113731 +     2, 3, 2, 1, 1, 1, 1, 1,
113732 +    -1,-1,-1,-1
113734 +#define LL_DEFAULTNORMLOG 6  /* for static allocation */
113735 +static UNUSED_ATTR const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
113737 +static UNUSED_ATTR const U32 ML_bits[MaxML+1] = {
113738 +     0, 0, 0, 0, 0, 0, 0, 0,
113739 +     0, 0, 0, 0, 0, 0, 0, 0,
113740 +     0, 0, 0, 0, 0, 0, 0, 0,
113741 +     0, 0, 0, 0, 0, 0, 0, 0,
113742 +     1, 1, 1, 1, 2, 2, 3, 3,
113743 +     4, 4, 5, 7, 8, 9,10,11,
113744 +    12,13,14,15,16
113746 +static UNUSED_ATTR const S16 ML_defaultNorm[MaxML+1] = {
113747 +     1, 4, 3, 2, 2, 2, 2, 2,
113748 +     2, 1, 1, 1, 1, 1, 1, 1,
113749 +     1, 1, 1, 1, 1, 1, 1, 1,
113750 +     1, 1, 1, 1, 1, 1, 1, 1,
113751 +     1, 1, 1, 1, 1, 1, 1, 1,
113752 +     1, 1, 1, 1, 1, 1,-1,-1,
113753 +    -1,-1,-1,-1,-1
113755 +#define ML_DEFAULTNORMLOG 6  /* for static allocation */
113756 +static UNUSED_ATTR const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
113758 +static UNUSED_ATTR const S16 OF_defaultNorm[DefaultMaxOff+1] = {
113759 +     1, 1, 1, 1, 1, 1, 2, 2,
113760 +     2, 1, 1, 1, 1, 1, 1, 1,
113761 +     1, 1, 1, 1, 1, 1, 1, 1,
113762 +    -1,-1,-1,-1,-1
113764 +#define OF_DEFAULTNORMLOG 5  /* for static allocation */
113765 +static UNUSED_ATTR const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
113768 +/*-*******************************************
113769 +*  Shared functions to include for inlining
113770 +*********************************************/
113771 +static void ZSTD_copy8(void* dst, const void* src) {
113772 +    ZSTD_memcpy(dst, src, 8);
113775 +#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
113776 +static void ZSTD_copy16(void* dst, const void* src) {
113777 +    ZSTD_memcpy(dst, src, 16);
113779 +#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }
113781 +#define WILDCOPY_OVERLENGTH 32
113782 +#define WILDCOPY_VECLEN 16
113784 +typedef enum {
113785 +    ZSTD_no_overlap,
113786 +    ZSTD_overlap_src_before_dst
113787 +    /*  ZSTD_overlap_dst_before_src, */
113788 +} ZSTD_overlap_e;
113790 +/*! ZSTD_wildcopy() :
113791 + *  Custom version of ZSTD_memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0)
113792 + *  @param ovtype controls the overlap detection
113793 + *         - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
113794 + *         - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart.
113795 + *           The src buffer must be before the dst buffer.
113796 + */
113797 +MEM_STATIC FORCE_INLINE_ATTR
113798 +void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype)
113800 +    ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
113801 +    const BYTE* ip = (const BYTE*)src;
113802 +    BYTE* op = (BYTE*)dst;
113803 +    BYTE* const oend = op + length;
113805 +    assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff <= -WILDCOPY_VECLEN));
113807 +    if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
113808 +        /* Handle short offset copies. */
113809 +        do {
113810 +            COPY8(op, ip)
113811 +        } while (op < oend);
113812 +    } else {
113813 +        assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
113814 +        /* Separate out the first COPY16() call because the copy length is
113815 +         * almost certain to be short, so the branches have different
113816 +         * probabilities. Since it is almost certain to be short, only do
113817 +         * one COPY16() in the first call. Then, do two calls per loop since
113818 +         * at that point it is more likely to have a high trip count.
113819 +         */
113820 +#ifdef __aarch64__
113821 +        do {
113822 +            COPY16(op, ip);
113823 +        }
113824 +        while (op < oend);
113825 +#else
113826 +        ZSTD_copy16(op, ip);
113827 +        if (16 >= length) return;
113828 +        op += 16;
113829 +        ip += 16;
113830 +        do {
113831 +            COPY16(op, ip);
113832 +            COPY16(op, ip);
113833 +        }
113834 +        while (op < oend);
113835 +#endif
113836 +    }
113839 +MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
113841 +    size_t const length = MIN(dstCapacity, srcSize);
113842 +    if (length > 0) {
113843 +        ZSTD_memcpy(dst, src, length);
113844 +    }
113845 +    return length;
113848 +/* define "workspace is too large" as this number of times larger than needed */
113849 +#define ZSTD_WORKSPACETOOLARGE_FACTOR 3
113851 +/* when workspace is continuously too large
113852 + * during at least this number of times,
113853 + * context's memory usage is considered wasteful,
113854 + * because it's sized to handle a worst case scenario which rarely happens.
113855 + * In which case, resize it down to free some memory */
113856 +#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128
113858 +/* Controls whether the input/output buffer is buffered or stable. */
113859 +typedef enum {
113860 +    ZSTD_bm_buffered = 0,  /* Buffer the input/output */
113861 +    ZSTD_bm_stable = 1     /* ZSTD_inBuffer/ZSTD_outBuffer is stable */
113862 +} ZSTD_bufferMode_e;
113865 +/*-*******************************************
113866 +*  Private declarations
113867 +*********************************************/
113868 +typedef struct seqDef_s {
113869 +    U32 offset;         /* Offset code of the sequence */
113870 +    U16 litLength;
113871 +    U16 matchLength;
113872 +} seqDef;
113874 +typedef struct {
113875 +    seqDef* sequencesStart;
113876 +    seqDef* sequences;      /* ptr to end of sequences */
113877 +    BYTE* litStart;
113878 +    BYTE* lit;              /* ptr to end of literals */
113879 +    BYTE* llCode;
113880 +    BYTE* mlCode;
113881 +    BYTE* ofCode;
113882 +    size_t maxNbSeq;
113883 +    size_t maxNbLit;
113885 +    /* longLengthPos and longLengthID to allow us to represent either a single litLength or matchLength
113886 +     * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment
113887 +     * the existing value of the litLength or matchLength by 0x10000.
113888 +     */
113889 +    U32   longLengthID;   /* 0 == no longLength; 1 == Represent the long literal; 2 == Represent the long match; */
113890 +    U32   longLengthPos;  /* Index of the sequence to apply long length modification to */
113891 +} seqStore_t;
113893 +typedef struct {
113894 +    U32 litLength;
113895 +    U32 matchLength;
113896 +} ZSTD_sequenceLength;
113899 + * Returns the ZSTD_sequenceLength for the given sequences. It handles the decoding of long sequences
113900 + * indicated by longLengthPos and longLengthID, and adds MINMATCH back to matchLength.
113901 + */
113902 +MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore, seqDef const* seq)
113904 +    ZSTD_sequenceLength seqLen;
113905 +    seqLen.litLength = seq->litLength;
113906 +    seqLen.matchLength = seq->matchLength + MINMATCH;
113907 +    if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) {
113908 +        if (seqStore->longLengthID == 1) {
113909 +            seqLen.litLength += 0xFFFF;
113910 +        }
113911 +        if (seqStore->longLengthID == 2) {
113912 +            seqLen.matchLength += 0xFFFF;
113913 +        }
113914 +    }
113915 +    return seqLen;
113919 + * Contains the compressed frame size and an upper-bound for the decompressed frame size.
113920 + * Note: before using `compressedSize`, check for errors using ZSTD_isError().
113921 + *       similarly, before using `decompressedBound`, check for errors using:
113922 + *          `decompressedBound != ZSTD_CONTENTSIZE_ERROR`
113923 + */
113924 +typedef struct {
113925 +    size_t compressedSize;
113926 +    unsigned long long decompressedBound;
113927 +} ZSTD_frameSizeInfo;   /* decompress & legacy */
113929 +const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx);   /* compress & dictBuilder */
113930 +void ZSTD_seqToCodes(const seqStore_t* seqStorePtr);   /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
113932 +/* custom memory allocation functions */
113933 +void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem);
113934 +void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem);
113935 +void ZSTD_customFree(void* ptr, ZSTD_customMem customMem);
113938 +MEM_STATIC U32 ZSTD_highbit32(U32 val)   /* compress, dictBuilder, decodeCorpus */
113940 +    assert(val != 0);
113941 +    {
113942 +#   if (__GNUC__ >= 3)   /* GCC Intrinsic */
113943 +        return __builtin_clz (val) ^ 31;
113944 +#   else   /* Software version */
113945 +        static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
113946 +        U32 v = val;
113947 +        v |= v >> 1;
113948 +        v |= v >> 2;
113949 +        v |= v >> 4;
113950 +        v |= v >> 8;
113951 +        v |= v >> 16;
113952 +        return DeBruijnClz[(v * 0x07C4ACDDU) >> 27];
113953 +#   endif
113954 +    }
113958 +/* ZSTD_invalidateRepCodes() :
113959 + * ensures next compression will not use repcodes from previous block.
113960 + * Note : only works with regular variant;
113961 + *        do not use with extDict variant ! */
113962 +void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx);   /* zstdmt, adaptive_compression (shouldn't get this definition from here) */
113965 +typedef struct {
113966 +    blockType_e blockType;
113967 +    U32 lastBlock;
113968 +    U32 origSize;
113969 +} blockProperties_t;   /* declared here for decompress and fullbench */
113971 +/*! ZSTD_getcBlockSize() :
113972 + *  Provides the size of compressed block from block header `src` */
113973 +/* Used by: decompress, fullbench (does not get its definition from here) */
113974 +size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
113975 +                          blockProperties_t* bpPtr);
113977 +/*! ZSTD_decodeSeqHeaders() :
113978 + *  decode sequence header from src */
113979 +/* Used by: decompress, fullbench (does not get its definition from here) */
113980 +size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
113981 +                       const void* src, size_t srcSize);
113985 +#endif   /* ZSTD_CCOMMON_H_MODULE */
113986 diff --git a/lib/zstd/compress.c b/lib/zstd/compress.c
113987 deleted file mode 100644
113988 index b080264ed3ad..000000000000
113989 --- a/lib/zstd/compress.c
113990 +++ /dev/null
113991 @@ -1,3485 +0,0 @@
113993 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
113994 - * All rights reserved.
113996 - * This source code is licensed under the BSD-style license found in the
113997 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
113998 - * An additional grant of patent rights can be found in the PATENTS file in the
113999 - * same directory.
114001 - * This program is free software; you can redistribute it and/or modify it under
114002 - * the terms of the GNU General Public License version 2 as published by the
114003 - * Free Software Foundation. This program is dual-licensed; you may select
114004 - * either version 2 of the GNU General Public License ("GPL") or BSD license
114005 - * ("BSD").
114006 - */
114008 -/*-*************************************
114009 -*  Dependencies
114010 -***************************************/
114011 -#include "fse.h"
114012 -#include "huf.h"
114013 -#include "mem.h"
114014 -#include "zstd_internal.h" /* includes zstd.h */
114015 -#include <linux/kernel.h>
114016 -#include <linux/module.h>
114017 -#include <linux/string.h> /* memset */
114019 -/*-*************************************
114020 -*  Constants
114021 -***************************************/
114022 -static const U32 g_searchStrength = 8; /* control skip over incompressible data */
114023 -#define HASH_READ_SIZE 8
114024 -typedef enum { ZSTDcs_created = 0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
114026 -/*-*************************************
114027 -*  Helper functions
114028 -***************************************/
114029 -size_t ZSTD_compressBound(size_t srcSize) { return FSE_compressBound(srcSize) + 12; }
114031 -/*-*************************************
114032 -*  Sequence storage
114033 -***************************************/
114034 -static void ZSTD_resetSeqStore(seqStore_t *ssPtr)
114036 -       ssPtr->lit = ssPtr->litStart;
114037 -       ssPtr->sequences = ssPtr->sequencesStart;
114038 -       ssPtr->longLengthID = 0;
114041 -/*-*************************************
114042 -*  Context memory management
114043 -***************************************/
114044 -struct ZSTD_CCtx_s {
114045 -       const BYTE *nextSrc;  /* next block here to continue on curr prefix */
114046 -       const BYTE *base;     /* All regular indexes relative to this position */
114047 -       const BYTE *dictBase; /* extDict indexes relative to this position */
114048 -       U32 dictLimit;  /* below that point, need extDict */
114049 -       U32 lowLimit;    /* below that point, no more data */
114050 -       U32 nextToUpdate;     /* index from which to continue dictionary update */
114051 -       U32 nextToUpdate3;    /* index from which to continue dictionary update */
114052 -       U32 hashLog3;    /* dispatch table : larger == faster, more memory */
114053 -       U32 loadedDictEnd;    /* index of end of dictionary */
114054 -       U32 forceWindow;      /* force back-references to respect limit of 1<<wLog, even for dictionary */
114055 -       U32 forceRawDict;     /* Force loading dictionary in "content-only" mode (no header analysis) */
114056 -       ZSTD_compressionStage_e stage;
114057 -       U32 rep[ZSTD_REP_NUM];
114058 -       U32 repToConfirm[ZSTD_REP_NUM];
114059 -       U32 dictID;
114060 -       ZSTD_parameters params;
114061 -       void *workSpace;
114062 -       size_t workSpaceSize;
114063 -       size_t blockSize;
114064 -       U64 frameContentSize;
114065 -       struct xxh64_state xxhState;
114066 -       ZSTD_customMem customMem;
114068 -       seqStore_t seqStore; /* sequences storage ptrs */
114069 -       U32 *hashTable;
114070 -       U32 *hashTable3;
114071 -       U32 *chainTable;
114072 -       HUF_CElt *hufTable;
114073 -       U32 flagStaticTables;
114074 -       HUF_repeat flagStaticHufTable;
114075 -       FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
114076 -       FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
114077 -       FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
114078 -       unsigned tmpCounters[HUF_COMPRESS_WORKSPACE_SIZE_U32];
114081 -size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams)
114083 -       size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << cParams.windowLog);
114084 -       U32 const divider = (cParams.searchLength == 3) ? 3 : 4;
114085 -       size_t const maxNbSeq = blockSize / divider;
114086 -       size_t const tokenSpace = blockSize + 11 * maxNbSeq;
114087 -       size_t const chainSize = (cParams.strategy == ZSTD_fast) ? 0 : (1 << cParams.chainLog);
114088 -       size_t const hSize = ((size_t)1) << cParams.hashLog;
114089 -       U32 const hashLog3 = (cParams.searchLength > 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog);
114090 -       size_t const h3Size = ((size_t)1) << hashLog3;
114091 -       size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
114092 -       size_t const optSpace =
114093 -           ((MaxML + 1) + (MaxLL + 1) + (MaxOff + 1) + (1 << Litbits)) * sizeof(U32) + (ZSTD_OPT_NUM + 1) * (sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
114094 -       size_t const workspaceSize = tableSpace + (256 * sizeof(U32)) /* huffTable */ + tokenSpace +
114095 -                                    (((cParams.strategy == ZSTD_btopt) || (cParams.strategy == ZSTD_btopt2)) ? optSpace : 0);
114097 -       return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_CCtx)) + ZSTD_ALIGN(workspaceSize);
114100 -static ZSTD_CCtx *ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
114102 -       ZSTD_CCtx *cctx;
114103 -       if (!customMem.customAlloc || !customMem.customFree)
114104 -               return NULL;
114105 -       cctx = (ZSTD_CCtx *)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);
114106 -       if (!cctx)
114107 -               return NULL;
114108 -       memset(cctx, 0, sizeof(ZSTD_CCtx));
114109 -       cctx->customMem = customMem;
114110 -       return cctx;
114113 -ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize)
114115 -       ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
114116 -       ZSTD_CCtx *cctx = ZSTD_createCCtx_advanced(stackMem);
114117 -       if (cctx) {
114118 -               cctx->workSpace = ZSTD_stackAllocAll(cctx->customMem.opaque, &cctx->workSpaceSize);
114119 -       }
114120 -       return cctx;
114123 -size_t ZSTD_freeCCtx(ZSTD_CCtx *cctx)
114125 -       if (cctx == NULL)
114126 -               return 0; /* support free on NULL */
114127 -       ZSTD_free(cctx->workSpace, cctx->customMem);
114128 -       ZSTD_free(cctx, cctx->customMem);
114129 -       return 0; /* reserved as a potential error code in the future */
114132 -const seqStore_t *ZSTD_getSeqStore(const ZSTD_CCtx *ctx) /* hidden interface */ { return &(ctx->seqStore); }
114134 -static ZSTD_parameters ZSTD_getParamsFromCCtx(const ZSTD_CCtx *cctx) { return cctx->params; }
114136 -/** ZSTD_checkParams() :
114137 -       ensure param values remain within authorized range.
114138 -       @return : 0, or an error code if one value is beyond authorized range */
114139 -size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
114141 -#define CLAMPCHECK(val, min, max)                                       \
114142 -       {                                                               \
114143 -               if ((val < min) | (val > max))                          \
114144 -                       return ERROR(compressionParameter_unsupported); \
114145 -       }
114146 -       CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
114147 -       CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
114148 -       CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
114149 -       CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
114150 -       CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
114151 -       CLAMPCHECK(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX);
114152 -       if ((U32)(cParams.strategy) > (U32)ZSTD_btopt2)
114153 -               return ERROR(compressionParameter_unsupported);
114154 -       return 0;
114157 -/** ZSTD_cycleLog() :
114158 - *  condition for correct operation : hashLog > 1 */
114159 -static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
114161 -       U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
114162 -       return hashLog - btScale;
114165 -/** ZSTD_adjustCParams() :
114166 -       optimize `cPar` for a given input (`srcSize` and `dictSize`).
114167 -       mostly downsizing to reduce memory consumption and initialization.
114168 -       Both `srcSize` and `dictSize` are optional (use 0 if unknown),
114169 -       but if both are 0, no optimization can be done.
114170 -       Note : cPar is considered validated at this stage. Use ZSTD_checkParams() to ensure that. */
114171 -ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
114173 -       if (srcSize + dictSize == 0)
114174 -               return cPar; /* no size information available : no adjustment */
114176 -       /* resize params, to use less memory when necessary */
114177 -       {
114178 -               U32 const minSrcSize = (srcSize == 0) ? 500 : 0;
114179 -               U64 const rSize = srcSize + dictSize + minSrcSize;
114180 -               if (rSize < ((U64)1 << ZSTD_WINDOWLOG_MAX)) {
114181 -                       U32 const srcLog = MAX(ZSTD_HASHLOG_MIN, ZSTD_highbit32((U32)(rSize)-1) + 1);
114182 -                       if (cPar.windowLog > srcLog)
114183 -                               cPar.windowLog = srcLog;
114184 -               }
114185 -       }
114186 -       if (cPar.hashLog > cPar.windowLog)
114187 -               cPar.hashLog = cPar.windowLog;
114188 -       {
114189 -               U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
114190 -               if (cycleLog > cPar.windowLog)
114191 -                       cPar.chainLog -= (cycleLog - cPar.windowLog);
114192 -       }
114194 -       if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
114195 -               cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* required for frame header */
114197 -       return cPar;
114200 -static U32 ZSTD_equivalentParams(ZSTD_parameters param1, ZSTD_parameters param2)
114202 -       return (param1.cParams.hashLog == param2.cParams.hashLog) & (param1.cParams.chainLog == param2.cParams.chainLog) &
114203 -              (param1.cParams.strategy == param2.cParams.strategy) & ((param1.cParams.searchLength == 3) == (param2.cParams.searchLength == 3));
114206 -/*! ZSTD_continueCCtx() :
114207 -       reuse CCtx without reset (note : requires no dictionary) */
114208 -static size_t ZSTD_continueCCtx(ZSTD_CCtx *cctx, ZSTD_parameters params, U64 frameContentSize)
114210 -       U32 const end = (U32)(cctx->nextSrc - cctx->base);
114211 -       cctx->params = params;
114212 -       cctx->frameContentSize = frameContentSize;
114213 -       cctx->lowLimit = end;
114214 -       cctx->dictLimit = end;
114215 -       cctx->nextToUpdate = end + 1;
114216 -       cctx->stage = ZSTDcs_init;
114217 -       cctx->dictID = 0;
114218 -       cctx->loadedDictEnd = 0;
114219 -       {
114220 -               int i;
114221 -               for (i = 0; i < ZSTD_REP_NUM; i++)
114222 -                       cctx->rep[i] = repStartValue[i];
114223 -       }
114224 -       cctx->seqStore.litLengthSum = 0; /* force reset of btopt stats */
114225 -       xxh64_reset(&cctx->xxhState, 0);
114226 -       return 0;
114229 -typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset, ZSTDcrp_fullReset } ZSTD_compResetPolicy_e;
114231 -/*! ZSTD_resetCCtx_advanced() :
114232 -       note : `params` must be validated */
114233 -static size_t ZSTD_resetCCtx_advanced(ZSTD_CCtx *zc, ZSTD_parameters params, U64 frameContentSize, ZSTD_compResetPolicy_e const crp)
114235 -       if (crp == ZSTDcrp_continue)
114236 -               if (ZSTD_equivalentParams(params, zc->params)) {
114237 -                       zc->flagStaticTables = 0;
114238 -                       zc->flagStaticHufTable = HUF_repeat_none;
114239 -                       return ZSTD_continueCCtx(zc, params, frameContentSize);
114240 -               }
114242 -       {
114243 -               size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog);
114244 -               U32 const divider = (params.cParams.searchLength == 3) ? 3 : 4;
114245 -               size_t const maxNbSeq = blockSize / divider;
114246 -               size_t const tokenSpace = blockSize + 11 * maxNbSeq;
114247 -               size_t const chainSize = (params.cParams.strategy == ZSTD_fast) ? 0 : (1 << params.cParams.chainLog);
114248 -               size_t const hSize = ((size_t)1) << params.cParams.hashLog;
114249 -               U32 const hashLog3 = (params.cParams.searchLength > 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, params.cParams.windowLog);
114250 -               size_t const h3Size = ((size_t)1) << hashLog3;
114251 -               size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
114252 -               void *ptr;
114254 -               /* Check if workSpace is large enough, alloc a new one if needed */
114255 -               {
114256 -                       size_t const optSpace = ((MaxML + 1) + (MaxLL + 1) + (MaxOff + 1) + (1 << Litbits)) * sizeof(U32) +
114257 -                                               (ZSTD_OPT_NUM + 1) * (sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
114258 -                       size_t const neededSpace = tableSpace + (256 * sizeof(U32)) /* huffTable */ + tokenSpace +
114259 -                                                  (((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) ? optSpace : 0);
114260 -                       if (zc->workSpaceSize < neededSpace) {
114261 -                               ZSTD_free(zc->workSpace, zc->customMem);
114262 -                               zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
114263 -                               if (zc->workSpace == NULL)
114264 -                                       return ERROR(memory_allocation);
114265 -                               zc->workSpaceSize = neededSpace;
114266 -                       }
114267 -               }
114269 -               if (crp != ZSTDcrp_noMemset)
114270 -                       memset(zc->workSpace, 0, tableSpace); /* reset tables only */
114271 -               xxh64_reset(&zc->xxhState, 0);
114272 -               zc->hashLog3 = hashLog3;
114273 -               zc->hashTable = (U32 *)(zc->workSpace);
114274 -               zc->chainTable = zc->hashTable + hSize;
114275 -               zc->hashTable3 = zc->chainTable + chainSize;
114276 -               ptr = zc->hashTable3 + h3Size;
114277 -               zc->hufTable = (HUF_CElt *)ptr;
114278 -               zc->flagStaticTables = 0;
114279 -               zc->flagStaticHufTable = HUF_repeat_none;
114280 -               ptr = ((U32 *)ptr) + 256; /* note : HUF_CElt* is incomplete type, size is simulated using U32 */
114282 -               zc->nextToUpdate = 1;
114283 -               zc->nextSrc = NULL;
114284 -               zc->base = NULL;
114285 -               zc->dictBase = NULL;
114286 -               zc->dictLimit = 0;
114287 -               zc->lowLimit = 0;
114288 -               zc->params = params;
114289 -               zc->blockSize = blockSize;
114290 -               zc->frameContentSize = frameContentSize;
114291 -               {
114292 -                       int i;
114293 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
114294 -                               zc->rep[i] = repStartValue[i];
114295 -               }
114297 -               if ((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) {
114298 -                       zc->seqStore.litFreq = (U32 *)ptr;
114299 -                       zc->seqStore.litLengthFreq = zc->seqStore.litFreq + (1 << Litbits);
114300 -                       zc->seqStore.matchLengthFreq = zc->seqStore.litLengthFreq + (MaxLL + 1);
114301 -                       zc->seqStore.offCodeFreq = zc->seqStore.matchLengthFreq + (MaxML + 1);
114302 -                       ptr = zc->seqStore.offCodeFreq + (MaxOff + 1);
114303 -                       zc->seqStore.matchTable = (ZSTD_match_t *)ptr;
114304 -                       ptr = zc->seqStore.matchTable + ZSTD_OPT_NUM + 1;
114305 -                       zc->seqStore.priceTable = (ZSTD_optimal_t *)ptr;
114306 -                       ptr = zc->seqStore.priceTable + ZSTD_OPT_NUM + 1;
114307 -                       zc->seqStore.litLengthSum = 0;
114308 -               }
114309 -               zc->seqStore.sequencesStart = (seqDef *)ptr;
114310 -               ptr = zc->seqStore.sequencesStart + maxNbSeq;
114311 -               zc->seqStore.llCode = (BYTE *)ptr;
114312 -               zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
114313 -               zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
114314 -               zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
114316 -               zc->stage = ZSTDcs_init;
114317 -               zc->dictID = 0;
114318 -               zc->loadedDictEnd = 0;
114320 -               return 0;
114321 -       }
114324 -/* ZSTD_invalidateRepCodes() :
114325 - * ensures next compression will not use repcodes from previous block.
114326 - * Note : only works with regular variant;
114327 - *        do not use with extDict variant ! */
114328 -void ZSTD_invalidateRepCodes(ZSTD_CCtx *cctx)
114330 -       int i;
114331 -       for (i = 0; i < ZSTD_REP_NUM; i++)
114332 -               cctx->rep[i] = 0;
114335 -/*! ZSTD_copyCCtx() :
114336 -*   Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
114337 -*   Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
114338 -*   @return : 0, or an error code */
114339 -size_t ZSTD_copyCCtx(ZSTD_CCtx *dstCCtx, const ZSTD_CCtx *srcCCtx, unsigned long long pledgedSrcSize)
114341 -       if (srcCCtx->stage != ZSTDcs_init)
114342 -               return ERROR(stage_wrong);
114344 -       memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
114345 -       {
114346 -               ZSTD_parameters params = srcCCtx->params;
114347 -               params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
114348 -               ZSTD_resetCCtx_advanced(dstCCtx, params, pledgedSrcSize, ZSTDcrp_noMemset);
114349 -       }
114351 -       /* copy tables */
114352 -       {
114353 -               size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog);
114354 -               size_t const hSize = ((size_t)1) << srcCCtx->params.cParams.hashLog;
114355 -               size_t const h3Size = (size_t)1 << srcCCtx->hashLog3;
114356 -               size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
114357 -               memcpy(dstCCtx->workSpace, srcCCtx->workSpace, tableSpace);
114358 -       }
114360 -       /* copy dictionary offsets */
114361 -       dstCCtx->nextToUpdate = srcCCtx->nextToUpdate;
114362 -       dstCCtx->nextToUpdate3 = srcCCtx->nextToUpdate3;
114363 -       dstCCtx->nextSrc = srcCCtx->nextSrc;
114364 -       dstCCtx->base = srcCCtx->base;
114365 -       dstCCtx->dictBase = srcCCtx->dictBase;
114366 -       dstCCtx->dictLimit = srcCCtx->dictLimit;
114367 -       dstCCtx->lowLimit = srcCCtx->lowLimit;
114368 -       dstCCtx->loadedDictEnd = srcCCtx->loadedDictEnd;
114369 -       dstCCtx->dictID = srcCCtx->dictID;
114371 -       /* copy entropy tables */
114372 -       dstCCtx->flagStaticTables = srcCCtx->flagStaticTables;
114373 -       dstCCtx->flagStaticHufTable = srcCCtx->flagStaticHufTable;
114374 -       if (srcCCtx->flagStaticTables) {
114375 -               memcpy(dstCCtx->litlengthCTable, srcCCtx->litlengthCTable, sizeof(dstCCtx->litlengthCTable));
114376 -               memcpy(dstCCtx->matchlengthCTable, srcCCtx->matchlengthCTable, sizeof(dstCCtx->matchlengthCTable));
114377 -               memcpy(dstCCtx->offcodeCTable, srcCCtx->offcodeCTable, sizeof(dstCCtx->offcodeCTable));
114378 -       }
114379 -       if (srcCCtx->flagStaticHufTable) {
114380 -               memcpy(dstCCtx->hufTable, srcCCtx->hufTable, 256 * 4);
114381 -       }
114383 -       return 0;
114386 -/*! ZSTD_reduceTable() :
114387 -*   reduce table indexes by `reducerValue` */
114388 -static void ZSTD_reduceTable(U32 *const table, U32 const size, U32 const reducerValue)
114390 -       U32 u;
114391 -       for (u = 0; u < size; u++) {
114392 -               if (table[u] < reducerValue)
114393 -                       table[u] = 0;
114394 -               else
114395 -                       table[u] -= reducerValue;
114396 -       }
114399 -/*! ZSTD_reduceIndex() :
114400 -*   rescale all indexes to avoid future overflow (indexes are U32) */
114401 -static void ZSTD_reduceIndex(ZSTD_CCtx *zc, const U32 reducerValue)
114403 -       {
114404 -               U32 const hSize = 1 << zc->params.cParams.hashLog;
114405 -               ZSTD_reduceTable(zc->hashTable, hSize, reducerValue);
114406 -       }
114408 -       {
114409 -               U32 const chainSize = (zc->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << zc->params.cParams.chainLog);
114410 -               ZSTD_reduceTable(zc->chainTable, chainSize, reducerValue);
114411 -       }
114413 -       {
114414 -               U32 const h3Size = (zc->hashLog3) ? 1 << zc->hashLog3 : 0;
114415 -               ZSTD_reduceTable(zc->hashTable3, h3Size, reducerValue);
114416 -       }
114419 -/*-*******************************************************
114420 -*  Block entropic compression
114421 -*********************************************************/
114423 -/* See doc/zstd_compression_format.md for detailed format description */
114425 -size_t ZSTD_noCompressBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
114427 -       if (srcSize + ZSTD_blockHeaderSize > dstCapacity)
114428 -               return ERROR(dstSize_tooSmall);
114429 -       memcpy((BYTE *)dst + ZSTD_blockHeaderSize, src, srcSize);
114430 -       ZSTD_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw);
114431 -       return ZSTD_blockHeaderSize + srcSize;
114434 -static size_t ZSTD_noCompressLiterals(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
114436 -       BYTE *const ostart = (BYTE * const)dst;
114437 -       U32 const flSize = 1 + (srcSize > 31) + (srcSize > 4095);
114439 -       if (srcSize + flSize > dstCapacity)
114440 -               return ERROR(dstSize_tooSmall);
114442 -       switch (flSize) {
114443 -       case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_basic + (srcSize << 3)); break;
114444 -       case 2: /* 2 - 2 - 12 */ ZSTD_writeLE16(ostart, (U16)((U32)set_basic + (1 << 2) + (srcSize << 4))); break;
114445 -       default: /*note : should not be necessary : flSize is within {1,2,3} */
114446 -       case 3: /* 2 - 2 - 20 */ ZSTD_writeLE32(ostart, (U32)((U32)set_basic + (3 << 2) + (srcSize << 4))); break;
114447 -       }
114449 -       memcpy(ostart + flSize, src, srcSize);
114450 -       return srcSize + flSize;
114453 -static size_t ZSTD_compressRleLiteralsBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
114455 -       BYTE *const ostart = (BYTE * const)dst;
114456 -       U32 const flSize = 1 + (srcSize > 31) + (srcSize > 4095);
114458 -       (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */
114460 -       switch (flSize) {
114461 -       case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_rle + (srcSize << 3)); break;
114462 -       case 2: /* 2 - 2 - 12 */ ZSTD_writeLE16(ostart, (U16)((U32)set_rle + (1 << 2) + (srcSize << 4))); break;
114463 -       default: /*note : should not be necessary : flSize is necessarily within {1,2,3} */
114464 -       case 3: /* 2 - 2 - 20 */ ZSTD_writeLE32(ostart, (U32)((U32)set_rle + (3 << 2) + (srcSize << 4))); break;
114465 -       }
114467 -       ostart[flSize] = *(const BYTE *)src;
114468 -       return flSize + 1;
114471 -static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; }
114473 -static size_t ZSTD_compressLiterals(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
114475 -       size_t const minGain = ZSTD_minGain(srcSize);
114476 -       size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
114477 -       BYTE *const ostart = (BYTE *)dst;
114478 -       U32 singleStream = srcSize < 256;
114479 -       symbolEncodingType_e hType = set_compressed;
114480 -       size_t cLitSize;
114482 -/* small ? don't even attempt compression (speed opt) */
114483 -#define LITERAL_NOENTROPY 63
114484 -       {
114485 -               size_t const minLitSize = zc->flagStaticHufTable == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY;
114486 -               if (srcSize <= minLitSize)
114487 -                       return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
114488 -       }
114490 -       if (dstCapacity < lhSize + 1)
114491 -               return ERROR(dstSize_tooSmall); /* not enough space for compression */
114492 -       {
114493 -               HUF_repeat repeat = zc->flagStaticHufTable;
114494 -               int const preferRepeat = zc->params.cParams.strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
114495 -               if (repeat == HUF_repeat_valid && lhSize == 3)
114496 -                       singleStream = 1;
114497 -               cLitSize = singleStream ? HUF_compress1X_repeat(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, zc->tmpCounters,
114498 -                                                               sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat)
114499 -                                       : HUF_compress4X_repeat(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, zc->tmpCounters,
114500 -                                                               sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat);
114501 -               if (repeat != HUF_repeat_none) {
114502 -                       hType = set_repeat;
114503 -               } /* reused the existing table */
114504 -               else {
114505 -                       zc->flagStaticHufTable = HUF_repeat_check;
114506 -               } /* now have a table to reuse */
114507 -       }
114509 -       if ((cLitSize == 0) | (cLitSize >= srcSize - minGain)) {
114510 -               zc->flagStaticHufTable = HUF_repeat_none;
114511 -               return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
114512 -       }
114513 -       if (cLitSize == 1) {
114514 -               zc->flagStaticHufTable = HUF_repeat_none;
114515 -               return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
114516 -       }
114518 -       /* Build header */
114519 -       switch (lhSize) {
114520 -       case 3: /* 2 - 2 - 10 - 10 */
114521 -       {
114522 -               U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 14);
114523 -               ZSTD_writeLE24(ostart, lhc);
114524 -               break;
114525 -       }
114526 -       case 4: /* 2 - 2 - 14 - 14 */
114527 -       {
114528 -               U32 const lhc = hType + (2 << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 18);
114529 -               ZSTD_writeLE32(ostart, lhc);
114530 -               break;
114531 -       }
114532 -       default: /* should not be necessary, lhSize is only {3,4,5} */
114533 -       case 5:  /* 2 - 2 - 18 - 18 */
114534 -       {
114535 -               U32 const lhc = hType + (3 << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 22);
114536 -               ZSTD_writeLE32(ostart, lhc);
114537 -               ostart[4] = (BYTE)(cLitSize >> 10);
114538 -               break;
114539 -       }
114540 -       }
114541 -       return lhSize + cLitSize;
114544 -static const BYTE LL_Code[64] = {0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 16, 16, 17, 17, 18, 18,
114545 -                                19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23,
114546 -                                23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24};
114548 -static const BYTE ML_Code[128] = {0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
114549 -                                 26, 27, 28, 29, 30, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38,
114550 -                                 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
114551 -                                 40, 40, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 42,
114552 -                                 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42};
114554 -void ZSTD_seqToCodes(const seqStore_t *seqStorePtr)
114556 -       BYTE const LL_deltaCode = 19;
114557 -       BYTE const ML_deltaCode = 36;
114558 -       const seqDef *const sequences = seqStorePtr->sequencesStart;
114559 -       BYTE *const llCodeTable = seqStorePtr->llCode;
114560 -       BYTE *const ofCodeTable = seqStorePtr->ofCode;
114561 -       BYTE *const mlCodeTable = seqStorePtr->mlCode;
114562 -       U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
114563 -       U32 u;
114564 -       for (u = 0; u < nbSeq; u++) {
114565 -               U32 const llv = sequences[u].litLength;
114566 -               U32 const mlv = sequences[u].matchLength;
114567 -               llCodeTable[u] = (llv > 63) ? (BYTE)ZSTD_highbit32(llv) + LL_deltaCode : LL_Code[llv];
114568 -               ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
114569 -               mlCodeTable[u] = (mlv > 127) ? (BYTE)ZSTD_highbit32(mlv) + ML_deltaCode : ML_Code[mlv];
114570 -       }
114571 -       if (seqStorePtr->longLengthID == 1)
114572 -               llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
114573 -       if (seqStorePtr->longLengthID == 2)
114574 -               mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
114577 -ZSTD_STATIC size_t ZSTD_compressSequences_internal(ZSTD_CCtx *zc, void *dst, size_t dstCapacity)
114579 -       const int longOffsets = zc->params.cParams.windowLog > STREAM_ACCUMULATOR_MIN;
114580 -       const seqStore_t *seqStorePtr = &(zc->seqStore);
114581 -       FSE_CTable *CTable_LitLength = zc->litlengthCTable;
114582 -       FSE_CTable *CTable_OffsetBits = zc->offcodeCTable;
114583 -       FSE_CTable *CTable_MatchLength = zc->matchlengthCTable;
114584 -       U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */
114585 -       const seqDef *const sequences = seqStorePtr->sequencesStart;
114586 -       const BYTE *const ofCodeTable = seqStorePtr->ofCode;
114587 -       const BYTE *const llCodeTable = seqStorePtr->llCode;
114588 -       const BYTE *const mlCodeTable = seqStorePtr->mlCode;
114589 -       BYTE *const ostart = (BYTE *)dst;
114590 -       BYTE *const oend = ostart + dstCapacity;
114591 -       BYTE *op = ostart;
114592 -       size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
114593 -       BYTE *seqHead;
114595 -       U32 *count;
114596 -       S16 *norm;
114597 -       U32 *workspace;
114598 -       size_t workspaceSize = sizeof(zc->tmpCounters);
114599 -       {
114600 -               size_t spaceUsed32 = 0;
114601 -               count = (U32 *)zc->tmpCounters + spaceUsed32;
114602 -               spaceUsed32 += MaxSeq + 1;
114603 -               norm = (S16 *)((U32 *)zc->tmpCounters + spaceUsed32);
114604 -               spaceUsed32 += ALIGN(sizeof(S16) * (MaxSeq + 1), sizeof(U32)) >> 2;
114606 -               workspace = (U32 *)zc->tmpCounters + spaceUsed32;
114607 -               workspaceSize -= (spaceUsed32 << 2);
114608 -       }
114610 -       /* Compress literals */
114611 -       {
114612 -               const BYTE *const literals = seqStorePtr->litStart;
114613 -               size_t const litSize = seqStorePtr->lit - literals;
114614 -               size_t const cSize = ZSTD_compressLiterals(zc, op, dstCapacity, literals, litSize);
114615 -               if (ZSTD_isError(cSize))
114616 -                       return cSize;
114617 -               op += cSize;
114618 -       }
114620 -       /* Sequences Header */
114621 -       if ((oend - op) < 3 /*max nbSeq Size*/ + 1 /*seqHead */)
114622 -               return ERROR(dstSize_tooSmall);
114623 -       if (nbSeq < 0x7F)
114624 -               *op++ = (BYTE)nbSeq;
114625 -       else if (nbSeq < LONGNBSEQ)
114626 -               op[0] = (BYTE)((nbSeq >> 8) + 0x80), op[1] = (BYTE)nbSeq, op += 2;
114627 -       else
114628 -               op[0] = 0xFF, ZSTD_writeLE16(op + 1, (U16)(nbSeq - LONGNBSEQ)), op += 3;
114629 -       if (nbSeq == 0)
114630 -               return op - ostart;
114632 -       /* seqHead : flags for FSE encoding type */
114633 -       seqHead = op++;
114635 -#define MIN_SEQ_FOR_DYNAMIC_FSE 64
114636 -#define MAX_SEQ_FOR_STATIC_FSE 1000
114638 -       /* convert length/distances into codes */
114639 -       ZSTD_seqToCodes(seqStorePtr);
114641 -       /* CTable for Literal Lengths */
114642 -       {
114643 -               U32 max = MaxLL;
114644 -               size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace);
114645 -               if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
114646 -                       *op++ = llCodeTable[0];
114647 -                       FSE_buildCTable_rle(CTable_LitLength, (BYTE)max);
114648 -                       LLtype = set_rle;
114649 -               } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
114650 -                       LLtype = set_repeat;
114651 -               } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (LL_defaultNormLog - 1)))) {
114652 -                       FSE_buildCTable_wksp(CTable_LitLength, LL_defaultNorm, MaxLL, LL_defaultNormLog, workspace, workspaceSize);
114653 -                       LLtype = set_basic;
114654 -               } else {
114655 -                       size_t nbSeq_1 = nbSeq;
114656 -                       const U32 tableLog = FSE_optimalTableLog(LLFSELog, nbSeq, max);
114657 -                       if (count[llCodeTable[nbSeq - 1]] > 1) {
114658 -                               count[llCodeTable[nbSeq - 1]]--;
114659 -                               nbSeq_1--;
114660 -                       }
114661 -                       FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
114662 -                       {
114663 -                               size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
114664 -                               if (FSE_isError(NCountSize))
114665 -                                       return NCountSize;
114666 -                               op += NCountSize;
114667 -                       }
114668 -                       FSE_buildCTable_wksp(CTable_LitLength, norm, max, tableLog, workspace, workspaceSize);
114669 -                       LLtype = set_compressed;
114670 -               }
114671 -       }
114673 -       /* CTable for Offsets */
114674 -       {
114675 -               U32 max = MaxOff;
114676 -               size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace);
114677 -               if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
114678 -                       *op++ = ofCodeTable[0];
114679 -                       FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max);
114680 -                       Offtype = set_rle;
114681 -               } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
114682 -                       Offtype = set_repeat;
114683 -               } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (OF_defaultNormLog - 1)))) {
114684 -                       FSE_buildCTable_wksp(CTable_OffsetBits, OF_defaultNorm, MaxOff, OF_defaultNormLog, workspace, workspaceSize);
114685 -                       Offtype = set_basic;
114686 -               } else {
114687 -                       size_t nbSeq_1 = nbSeq;
114688 -                       const U32 tableLog = FSE_optimalTableLog(OffFSELog, nbSeq, max);
114689 -                       if (count[ofCodeTable[nbSeq - 1]] > 1) {
114690 -                               count[ofCodeTable[nbSeq - 1]]--;
114691 -                               nbSeq_1--;
114692 -                       }
114693 -                       FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
114694 -                       {
114695 -                               size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
114696 -                               if (FSE_isError(NCountSize))
114697 -                                       return NCountSize;
114698 -                               op += NCountSize;
114699 -                       }
114700 -                       FSE_buildCTable_wksp(CTable_OffsetBits, norm, max, tableLog, workspace, workspaceSize);
114701 -                       Offtype = set_compressed;
114702 -               }
114703 -       }
114705 -       /* CTable for MatchLengths */
114706 -       {
114707 -               U32 max = MaxML;
114708 -               size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace);
114709 -               if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
114710 -                       *op++ = *mlCodeTable;
114711 -                       FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max);
114712 -                       MLtype = set_rle;
114713 -               } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
114714 -                       MLtype = set_repeat;
114715 -               } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (ML_defaultNormLog - 1)))) {
114716 -                       FSE_buildCTable_wksp(CTable_MatchLength, ML_defaultNorm, MaxML, ML_defaultNormLog, workspace, workspaceSize);
114717 -                       MLtype = set_basic;
114718 -               } else {
114719 -                       size_t nbSeq_1 = nbSeq;
114720 -                       const U32 tableLog = FSE_optimalTableLog(MLFSELog, nbSeq, max);
114721 -                       if (count[mlCodeTable[nbSeq - 1]] > 1) {
114722 -                               count[mlCodeTable[nbSeq - 1]]--;
114723 -                               nbSeq_1--;
114724 -                       }
114725 -                       FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
114726 -                       {
114727 -                               size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
114728 -                               if (FSE_isError(NCountSize))
114729 -                                       return NCountSize;
114730 -                               op += NCountSize;
114731 -                       }
114732 -                       FSE_buildCTable_wksp(CTable_MatchLength, norm, max, tableLog, workspace, workspaceSize);
114733 -                       MLtype = set_compressed;
114734 -               }
114735 -       }
114737 -       *seqHead = (BYTE)((LLtype << 6) + (Offtype << 4) + (MLtype << 2));
114738 -       zc->flagStaticTables = 0;
114740 -       /* Encoding Sequences */
114741 -       {
114742 -               BIT_CStream_t blockStream;
114743 -               FSE_CState_t stateMatchLength;
114744 -               FSE_CState_t stateOffsetBits;
114745 -               FSE_CState_t stateLitLength;
114747 -               CHECK_E(BIT_initCStream(&blockStream, op, oend - op), dstSize_tooSmall); /* not enough space remaining */
114749 -               /* first symbols */
114750 -               FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq - 1]);
114751 -               FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq - 1]);
114752 -               FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq - 1]);
114753 -               BIT_addBits(&blockStream, sequences[nbSeq - 1].litLength, LL_bits[llCodeTable[nbSeq - 1]]);
114754 -               if (ZSTD_32bits())
114755 -                       BIT_flushBits(&blockStream);
114756 -               BIT_addBits(&blockStream, sequences[nbSeq - 1].matchLength, ML_bits[mlCodeTable[nbSeq - 1]]);
114757 -               if (ZSTD_32bits())
114758 -                       BIT_flushBits(&blockStream);
114759 -               if (longOffsets) {
114760 -                       U32 const ofBits = ofCodeTable[nbSeq - 1];
114761 -                       int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN - 1);
114762 -                       if (extraBits) {
114763 -                               BIT_addBits(&blockStream, sequences[nbSeq - 1].offset, extraBits);
114764 -                               BIT_flushBits(&blockStream);
114765 -                       }
114766 -                       BIT_addBits(&blockStream, sequences[nbSeq - 1].offset >> extraBits, ofBits - extraBits);
114767 -               } else {
114768 -                       BIT_addBits(&blockStream, sequences[nbSeq - 1].offset, ofCodeTable[nbSeq - 1]);
114769 -               }
114770 -               BIT_flushBits(&blockStream);
114772 -               {
114773 -                       size_t n;
114774 -                       for (n = nbSeq - 2; n < nbSeq; n--) { /* intentional underflow */
114775 -                               BYTE const llCode = llCodeTable[n];
114776 -                               BYTE const ofCode = ofCodeTable[n];
114777 -                               BYTE const mlCode = mlCodeTable[n];
114778 -                               U32 const llBits = LL_bits[llCode];
114779 -                               U32 const ofBits = ofCode; /* 32b*/ /* 64b*/
114780 -                               U32 const mlBits = ML_bits[mlCode];
114781 -                               /* (7)*/                                                            /* (7)*/
114782 -                               FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */  /* 15 */
114783 -                               FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */
114784 -                               if (ZSTD_32bits())
114785 -                                       BIT_flushBits(&blockStream);                              /* (7)*/
114786 -                               FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */
114787 -                               if (ZSTD_32bits() || (ofBits + mlBits + llBits >= 64 - 7 - (LLFSELog + MLFSELog + OffFSELog)))
114788 -                                       BIT_flushBits(&blockStream); /* (7)*/
114789 -                               BIT_addBits(&blockStream, sequences[n].litLength, llBits);
114790 -                               if (ZSTD_32bits() && ((llBits + mlBits) > 24))
114791 -                                       BIT_flushBits(&blockStream);
114792 -                               BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
114793 -                               if (ZSTD_32bits())
114794 -                                       BIT_flushBits(&blockStream); /* (7)*/
114795 -                               if (longOffsets) {
114796 -                                       int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN - 1);
114797 -                                       if (extraBits) {
114798 -                                               BIT_addBits(&blockStream, sequences[n].offset, extraBits);
114799 -                                               BIT_flushBits(&blockStream); /* (7)*/
114800 -                                       }
114801 -                                       BIT_addBits(&blockStream, sequences[n].offset >> extraBits, ofBits - extraBits); /* 31 */
114802 -                               } else {
114803 -                                       BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */
114804 -                               }
114805 -                               BIT_flushBits(&blockStream); /* (7)*/
114806 -                       }
114807 -               }
114809 -               FSE_flushCState(&blockStream, &stateMatchLength);
114810 -               FSE_flushCState(&blockStream, &stateOffsetBits);
114811 -               FSE_flushCState(&blockStream, &stateLitLength);
114813 -               {
114814 -                       size_t const streamSize = BIT_closeCStream(&blockStream);
114815 -                       if (streamSize == 0)
114816 -                               return ERROR(dstSize_tooSmall); /* not enough space */
114817 -                       op += streamSize;
114818 -               }
114819 -       }
114820 -       return op - ostart;
114823 -ZSTD_STATIC size_t ZSTD_compressSequences(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, size_t srcSize)
114825 -       size_t const cSize = ZSTD_compressSequences_internal(zc, dst, dstCapacity);
114826 -       size_t const minGain = ZSTD_minGain(srcSize);
114827 -       size_t const maxCSize = srcSize - minGain;
114828 -       /* If the srcSize <= dstCapacity, then there is enough space to write a
114829 -        * raw uncompressed block. Since we ran out of space, the block must not
114830 -        * be compressible, so fall back to a raw uncompressed block.
114831 -        */
114832 -       int const uncompressibleError = cSize == ERROR(dstSize_tooSmall) && srcSize <= dstCapacity;
114833 -       int i;
114835 -       if (ZSTD_isError(cSize) && !uncompressibleError)
114836 -               return cSize;
114837 -       if (cSize >= maxCSize || uncompressibleError) {
114838 -               zc->flagStaticHufTable = HUF_repeat_none;
114839 -               return 0;
114840 -       }
114841 -       /* confirm repcodes */
114842 -       for (i = 0; i < ZSTD_REP_NUM; i++)
114843 -               zc->rep[i] = zc->repToConfirm[i];
114844 -       return cSize;
114847 -/*! ZSTD_storeSeq() :
114848 -       Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
114849 -       `offsetCode` : distance to match, or 0 == repCode.
114850 -       `matchCode` : matchLength - MINMATCH
114852 -ZSTD_STATIC void ZSTD_storeSeq(seqStore_t *seqStorePtr, size_t litLength, const void *literals, U32 offsetCode, size_t matchCode)
114854 -       /* copy Literals */
114855 -       ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
114856 -       seqStorePtr->lit += litLength;
114858 -       /* literal Length */
114859 -       if (litLength > 0xFFFF) {
114860 -               seqStorePtr->longLengthID = 1;
114861 -               seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
114862 -       }
114863 -       seqStorePtr->sequences[0].litLength = (U16)litLength;
114865 -       /* match offset */
114866 -       seqStorePtr->sequences[0].offset = offsetCode + 1;
114868 -       /* match Length */
114869 -       if (matchCode > 0xFFFF) {
114870 -               seqStorePtr->longLengthID = 2;
114871 -               seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
114872 -       }
114873 -       seqStorePtr->sequences[0].matchLength = (U16)matchCode;
114875 -       seqStorePtr->sequences++;
114878 -/*-*************************************
114879 -*  Match length counter
114880 -***************************************/
114881 -static unsigned ZSTD_NbCommonBytes(register size_t val)
114883 -       if (ZSTD_isLittleEndian()) {
114884 -               if (ZSTD_64bits()) {
114885 -                       return (__builtin_ctzll((U64)val) >> 3);
114886 -               } else { /* 32 bits */
114887 -                       return (__builtin_ctz((U32)val) >> 3);
114888 -               }
114889 -       } else { /* Big Endian CPU */
114890 -               if (ZSTD_64bits()) {
114891 -                       return (__builtin_clzll(val) >> 3);
114892 -               } else { /* 32 bits */
114893 -                       return (__builtin_clz((U32)val) >> 3);
114894 -               }
114895 -       }
114898 -static size_t ZSTD_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *const pInLimit)
114900 -       const BYTE *const pStart = pIn;
114901 -       const BYTE *const pInLoopLimit = pInLimit - (sizeof(size_t) - 1);
114903 -       while (pIn < pInLoopLimit) {
114904 -               size_t const diff = ZSTD_readST(pMatch) ^ ZSTD_readST(pIn);
114905 -               if (!diff) {
114906 -                       pIn += sizeof(size_t);
114907 -                       pMatch += sizeof(size_t);
114908 -                       continue;
114909 -               }
114910 -               pIn += ZSTD_NbCommonBytes(diff);
114911 -               return (size_t)(pIn - pStart);
114912 -       }
114913 -       if (ZSTD_64bits())
114914 -               if ((pIn < (pInLimit - 3)) && (ZSTD_read32(pMatch) == ZSTD_read32(pIn))) {
114915 -                       pIn += 4;
114916 -                       pMatch += 4;
114917 -               }
114918 -       if ((pIn < (pInLimit - 1)) && (ZSTD_read16(pMatch) == ZSTD_read16(pIn))) {
114919 -               pIn += 2;
114920 -               pMatch += 2;
114921 -       }
114922 -       if ((pIn < pInLimit) && (*pMatch == *pIn))
114923 -               pIn++;
114924 -       return (size_t)(pIn - pStart);
114927 -/** ZSTD_count_2segments() :
114928 -*   can count match length with `ip` & `match` in 2 different segments.
114929 -*   convention : on reaching mEnd, match count continue starting from iStart
114931 -static size_t ZSTD_count_2segments(const BYTE *ip, const BYTE *match, const BYTE *iEnd, const BYTE *mEnd, const BYTE *iStart)
114933 -       const BYTE *const vEnd = MIN(ip + (mEnd - match), iEnd);
114934 -       size_t const matchLength = ZSTD_count(ip, match, vEnd);
114935 -       if (match + matchLength != mEnd)
114936 -               return matchLength;
114937 -       return matchLength + ZSTD_count(ip + matchLength, iStart, iEnd);
114940 -/*-*************************************
114941 -*  Hashes
114942 -***************************************/
114943 -static const U32 prime3bytes = 506832829U;
114944 -static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32 - 24)) * prime3bytes) >> (32 - h); }
114945 -ZSTD_STATIC size_t ZSTD_hash3Ptr(const void *ptr, U32 h) { return ZSTD_hash3(ZSTD_readLE32(ptr), h); } /* only in zstd_opt.h */
114947 -static const U32 prime4bytes = 2654435761U;
114948 -static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32 - h); }
114949 -static size_t ZSTD_hash4Ptr(const void *ptr, U32 h) { return ZSTD_hash4(ZSTD_read32(ptr), h); }
114951 -static const U64 prime5bytes = 889523592379ULL;
114952 -static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64 - 40)) * prime5bytes) >> (64 - h)); }
114953 -static size_t ZSTD_hash5Ptr(const void *p, U32 h) { return ZSTD_hash5(ZSTD_readLE64(p), h); }
114955 -static const U64 prime6bytes = 227718039650203ULL;
114956 -static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64 - 48)) * prime6bytes) >> (64 - h)); }
114957 -static size_t ZSTD_hash6Ptr(const void *p, U32 h) { return ZSTD_hash6(ZSTD_readLE64(p), h); }
114959 -static const U64 prime7bytes = 58295818150454627ULL;
114960 -static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64 - 56)) * prime7bytes) >> (64 - h)); }
114961 -static size_t ZSTD_hash7Ptr(const void *p, U32 h) { return ZSTD_hash7(ZSTD_readLE64(p), h); }
114963 -static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
114964 -static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u)*prime8bytes) >> (64 - h)); }
114965 -static size_t ZSTD_hash8Ptr(const void *p, U32 h) { return ZSTD_hash8(ZSTD_readLE64(p), h); }
114967 -static size_t ZSTD_hashPtr(const void *p, U32 hBits, U32 mls)
114969 -       switch (mls) {
114970 -       // case 3: return ZSTD_hash3Ptr(p, hBits);
114971 -       default:
114972 -       case 4: return ZSTD_hash4Ptr(p, hBits);
114973 -       case 5: return ZSTD_hash5Ptr(p, hBits);
114974 -       case 6: return ZSTD_hash6Ptr(p, hBits);
114975 -       case 7: return ZSTD_hash7Ptr(p, hBits);
114976 -       case 8: return ZSTD_hash8Ptr(p, hBits);
114977 -       }
114980 -/*-*************************************
114981 -*  Fast Scan
114982 -***************************************/
114983 -static void ZSTD_fillHashTable(ZSTD_CCtx *zc, const void *end, const U32 mls)
114985 -       U32 *const hashTable = zc->hashTable;
114986 -       U32 const hBits = zc->params.cParams.hashLog;
114987 -       const BYTE *const base = zc->base;
114988 -       const BYTE *ip = base + zc->nextToUpdate;
114989 -       const BYTE *const iend = ((const BYTE *)end) - HASH_READ_SIZE;
114990 -       const size_t fastHashFillStep = 3;
114992 -       while (ip <= iend) {
114993 -               hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
114994 -               ip += fastHashFillStep;
114995 -       }
114998 -FORCE_INLINE
114999 -void ZSTD_compressBlock_fast_generic(ZSTD_CCtx *cctx, const void *src, size_t srcSize, const U32 mls)
115001 -       U32 *const hashTable = cctx->hashTable;
115002 -       U32 const hBits = cctx->params.cParams.hashLog;
115003 -       seqStore_t *seqStorePtr = &(cctx->seqStore);
115004 -       const BYTE *const base = cctx->base;
115005 -       const BYTE *const istart = (const BYTE *)src;
115006 -       const BYTE *ip = istart;
115007 -       const BYTE *anchor = istart;
115008 -       const U32 lowestIndex = cctx->dictLimit;
115009 -       const BYTE *const lowest = base + lowestIndex;
115010 -       const BYTE *const iend = istart + srcSize;
115011 -       const BYTE *const ilimit = iend - HASH_READ_SIZE;
115012 -       U32 offset_1 = cctx->rep[0], offset_2 = cctx->rep[1];
115013 -       U32 offsetSaved = 0;
115015 -       /* init */
115016 -       ip += (ip == lowest);
115017 -       {
115018 -               U32 const maxRep = (U32)(ip - lowest);
115019 -               if (offset_2 > maxRep)
115020 -                       offsetSaved = offset_2, offset_2 = 0;
115021 -               if (offset_1 > maxRep)
115022 -                       offsetSaved = offset_1, offset_1 = 0;
115023 -       }
115025 -       /* Main Search Loop */
115026 -       while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
115027 -               size_t mLength;
115028 -               size_t const h = ZSTD_hashPtr(ip, hBits, mls);
115029 -               U32 const curr = (U32)(ip - base);
115030 -               U32 const matchIndex = hashTable[h];
115031 -               const BYTE *match = base + matchIndex;
115032 -               hashTable[h] = curr; /* update hash table */
115034 -               if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
115035 -                       mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4;
115036 -                       ip++;
115037 -                       ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
115038 -               } else {
115039 -                       U32 offset;
115040 -                       if ((matchIndex <= lowestIndex) || (ZSTD_read32(match) != ZSTD_read32(ip))) {
115041 -                               ip += ((ip - anchor) >> g_searchStrength) + 1;
115042 -                               continue;
115043 -                       }
115044 -                       mLength = ZSTD_count(ip + 4, match + 4, iend) + 4;
115045 -                       offset = (U32)(ip - match);
115046 -                       while (((ip > anchor) & (match > lowest)) && (ip[-1] == match[-1])) {
115047 -                               ip--;
115048 -                               match--;
115049 -                               mLength++;
115050 -                       } /* catch up */
115051 -                       offset_2 = offset_1;
115052 -                       offset_1 = offset;
115054 -                       ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
115055 -               }
115057 -               /* match found */
115058 -               ip += mLength;
115059 -               anchor = ip;
115061 -               if (ip <= ilimit) {
115062 -                       /* Fill Table */
115063 -                       hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2; /* here because curr+2 could be > iend-8 */
115064 -                       hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base);
115065 -                       /* check immediate repcode */
115066 -                       while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
115067 -                               /* store sequence */
115068 -                               size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4;
115069 -                               {
115070 -                                       U32 const tmpOff = offset_2;
115071 -                                       offset_2 = offset_1;
115072 -                                       offset_1 = tmpOff;
115073 -                               } /* swap offset_2 <=> offset_1 */
115074 -                               hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
115075 -                               ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength - MINMATCH);
115076 -                               ip += rLength;
115077 -                               anchor = ip;
115078 -                               continue; /* faster when present ... (?) */
115079 -                       }
115080 -               }
115081 -       }
115083 -       /* save reps for next block */
115084 -       cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
115085 -       cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
115087 -       /* Last Literals */
115088 -       {
115089 -               size_t const lastLLSize = iend - anchor;
115090 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
115091 -               seqStorePtr->lit += lastLLSize;
115092 -       }
115095 -static void ZSTD_compressBlock_fast(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
115097 -       const U32 mls = ctx->params.cParams.searchLength;
115098 -       switch (mls) {
115099 -       default: /* includes case 3 */
115100 -       case 4: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4); return;
115101 -       case 5: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5); return;
115102 -       case 6: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6); return;
115103 -       case 7: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7); return;
115104 -       }
115107 -static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 mls)
115109 -       U32 *hashTable = ctx->hashTable;
115110 -       const U32 hBits = ctx->params.cParams.hashLog;
115111 -       seqStore_t *seqStorePtr = &(ctx->seqStore);
115112 -       const BYTE *const base = ctx->base;
115113 -       const BYTE *const dictBase = ctx->dictBase;
115114 -       const BYTE *const istart = (const BYTE *)src;
115115 -       const BYTE *ip = istart;
115116 -       const BYTE *anchor = istart;
115117 -       const U32 lowestIndex = ctx->lowLimit;
115118 -       const BYTE *const dictStart = dictBase + lowestIndex;
115119 -       const U32 dictLimit = ctx->dictLimit;
115120 -       const BYTE *const lowPrefixPtr = base + dictLimit;
115121 -       const BYTE *const dictEnd = dictBase + dictLimit;
115122 -       const BYTE *const iend = istart + srcSize;
115123 -       const BYTE *const ilimit = iend - 8;
115124 -       U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
115126 -       /* Search Loop */
115127 -       while (ip < ilimit) { /* < instead of <=, because (ip+1) */
115128 -               const size_t h = ZSTD_hashPtr(ip, hBits, mls);
115129 -               const U32 matchIndex = hashTable[h];
115130 -               const BYTE *matchBase = matchIndex < dictLimit ? dictBase : base;
115131 -               const BYTE *match = matchBase + matchIndex;
115132 -               const U32 curr = (U32)(ip - base);
115133 -               const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
115134 -               const BYTE *repBase = repIndex < dictLimit ? dictBase : base;
115135 -               const BYTE *repMatch = repBase + repIndex;
115136 -               size_t mLength;
115137 -               hashTable[h] = curr; /* update hash table */
115139 -               if ((((U32)((dictLimit - 1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) &&
115140 -                   (ZSTD_read32(repMatch) == ZSTD_read32(ip + 1))) {
115141 -                       const BYTE *repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
115142 -                       mLength = ZSTD_count_2segments(ip + 1 + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repMatchEnd, lowPrefixPtr) + EQUAL_READ32;
115143 -                       ip++;
115144 -                       ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
115145 -               } else {
115146 -                       if ((matchIndex < lowestIndex) || (ZSTD_read32(match) != ZSTD_read32(ip))) {
115147 -                               ip += ((ip - anchor) >> g_searchStrength) + 1;
115148 -                               continue;
115149 -                       }
115150 -                       {
115151 -                               const BYTE *matchEnd = matchIndex < dictLimit ? dictEnd : iend;
115152 -                               const BYTE *lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
115153 -                               U32 offset;
115154 -                               mLength = ZSTD_count_2segments(ip + EQUAL_READ32, match + EQUAL_READ32, iend, matchEnd, lowPrefixPtr) + EQUAL_READ32;
115155 -                               while (((ip > anchor) & (match > lowMatchPtr)) && (ip[-1] == match[-1])) {
115156 -                                       ip--;
115157 -                                       match--;
115158 -                                       mLength++;
115159 -                               } /* catch up */
115160 -                               offset = curr - matchIndex;
115161 -                               offset_2 = offset_1;
115162 -                               offset_1 = offset;
115163 -                               ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
115164 -                       }
115165 -               }
115167 -               /* found a match : store it */
115168 -               ip += mLength;
115169 -               anchor = ip;
115171 -               if (ip <= ilimit) {
115172 -                       /* Fill Table */
115173 -                       hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2;
115174 -                       hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base);
115175 -                       /* check immediate repcode */
115176 -                       while (ip <= ilimit) {
115177 -                               U32 const curr2 = (U32)(ip - base);
115178 -                               U32 const repIndex2 = curr2 - offset_2;
115179 -                               const BYTE *repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
115180 -                               if ((((U32)((dictLimit - 1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
115181 -                                   && (ZSTD_read32(repMatch2) == ZSTD_read32(ip))) {
115182 -                                       const BYTE *const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
115183 -                                       size_t repLength2 =
115184 -                                           ZSTD_count_2segments(ip + EQUAL_READ32, repMatch2 + EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
115185 -                                       U32 tmpOffset = offset_2;
115186 -                                       offset_2 = offset_1;
115187 -                                       offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
115188 -                                       ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2 - MINMATCH);
115189 -                                       hashTable[ZSTD_hashPtr(ip, hBits, mls)] = curr2;
115190 -                                       ip += repLength2;
115191 -                                       anchor = ip;
115192 -                                       continue;
115193 -                               }
115194 -                               break;
115195 -                       }
115196 -               }
115197 -       }
115199 -       /* save reps for next block */
115200 -       ctx->repToConfirm[0] = offset_1;
115201 -       ctx->repToConfirm[1] = offset_2;
115203 -       /* Last Literals */
115204 -       {
115205 -               size_t const lastLLSize = iend - anchor;
115206 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
115207 -               seqStorePtr->lit += lastLLSize;
115208 -       }
115211 -static void ZSTD_compressBlock_fast_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
115213 -       U32 const mls = ctx->params.cParams.searchLength;
115214 -       switch (mls) {
115215 -       default: /* includes case 3 */
115216 -       case 4: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4); return;
115217 -       case 5: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5); return;
115218 -       case 6: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6); return;
115219 -       case 7: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7); return;
115220 -       }
115223 -/*-*************************************
115224 -*  Double Fast
115225 -***************************************/
115226 -static void ZSTD_fillDoubleHashTable(ZSTD_CCtx *cctx, const void *end, const U32 mls)
115228 -       U32 *const hashLarge = cctx->hashTable;
115229 -       U32 const hBitsL = cctx->params.cParams.hashLog;
115230 -       U32 *const hashSmall = cctx->chainTable;
115231 -       U32 const hBitsS = cctx->params.cParams.chainLog;
115232 -       const BYTE *const base = cctx->base;
115233 -       const BYTE *ip = base + cctx->nextToUpdate;
115234 -       const BYTE *const iend = ((const BYTE *)end) - HASH_READ_SIZE;
115235 -       const size_t fastHashFillStep = 3;
115237 -       while (ip <= iend) {
115238 -               hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
115239 -               hashLarge[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
115240 -               ip += fastHashFillStep;
115241 -       }
115244 -FORCE_INLINE
115245 -void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx *cctx, const void *src, size_t srcSize, const U32 mls)
115247 -       U32 *const hashLong = cctx->hashTable;
115248 -       const U32 hBitsL = cctx->params.cParams.hashLog;
115249 -       U32 *const hashSmall = cctx->chainTable;
115250 -       const U32 hBitsS = cctx->params.cParams.chainLog;
115251 -       seqStore_t *seqStorePtr = &(cctx->seqStore);
115252 -       const BYTE *const base = cctx->base;
115253 -       const BYTE *const istart = (const BYTE *)src;
115254 -       const BYTE *ip = istart;
115255 -       const BYTE *anchor = istart;
115256 -       const U32 lowestIndex = cctx->dictLimit;
115257 -       const BYTE *const lowest = base + lowestIndex;
115258 -       const BYTE *const iend = istart + srcSize;
115259 -       const BYTE *const ilimit = iend - HASH_READ_SIZE;
115260 -       U32 offset_1 = cctx->rep[0], offset_2 = cctx->rep[1];
115261 -       U32 offsetSaved = 0;
115263 -       /* init */
115264 -       ip += (ip == lowest);
115265 -       {
115266 -               U32 const maxRep = (U32)(ip - lowest);
115267 -               if (offset_2 > maxRep)
115268 -                       offsetSaved = offset_2, offset_2 = 0;
115269 -               if (offset_1 > maxRep)
115270 -                       offsetSaved = offset_1, offset_1 = 0;
115271 -       }
115273 -       /* Main Search Loop */
115274 -       while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
115275 -               size_t mLength;
115276 -               size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
115277 -               size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
115278 -               U32 const curr = (U32)(ip - base);
115279 -               U32 const matchIndexL = hashLong[h2];
115280 -               U32 const matchIndexS = hashSmall[h];
115281 -               const BYTE *matchLong = base + matchIndexL;
115282 -               const BYTE *match = base + matchIndexS;
115283 -               hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
115285 -               if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) { /* note : by construction, offset_1 <= curr */
115286 -                       mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4;
115287 -                       ip++;
115288 -                       ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
115289 -               } else {
115290 -                       U32 offset;
115291 -                       if ((matchIndexL > lowestIndex) && (ZSTD_read64(matchLong) == ZSTD_read64(ip))) {
115292 -                               mLength = ZSTD_count(ip + 8, matchLong + 8, iend) + 8;
115293 -                               offset = (U32)(ip - matchLong);
115294 -                               while (((ip > anchor) & (matchLong > lowest)) && (ip[-1] == matchLong[-1])) {
115295 -                                       ip--;
115296 -                                       matchLong--;
115297 -                                       mLength++;
115298 -                               } /* catch up */
115299 -                       } else if ((matchIndexS > lowestIndex) && (ZSTD_read32(match) == ZSTD_read32(ip))) {
115300 -                               size_t const h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8);
115301 -                               U32 const matchIndex3 = hashLong[h3];
115302 -                               const BYTE *match3 = base + matchIndex3;
115303 -                               hashLong[h3] = curr + 1;
115304 -                               if ((matchIndex3 > lowestIndex) && (ZSTD_read64(match3) == ZSTD_read64(ip + 1))) {
115305 -                                       mLength = ZSTD_count(ip + 9, match3 + 8, iend) + 8;
115306 -                                       ip++;
115307 -                                       offset = (U32)(ip - match3);
115308 -                                       while (((ip > anchor) & (match3 > lowest)) && (ip[-1] == match3[-1])) {
115309 -                                               ip--;
115310 -                                               match3--;
115311 -                                               mLength++;
115312 -                                       } /* catch up */
115313 -                               } else {
115314 -                                       mLength = ZSTD_count(ip + 4, match + 4, iend) + 4;
115315 -                                       offset = (U32)(ip - match);
115316 -                                       while (((ip > anchor) & (match > lowest)) && (ip[-1] == match[-1])) {
115317 -                                               ip--;
115318 -                                               match--;
115319 -                                               mLength++;
115320 -                                       } /* catch up */
115321 -                               }
115322 -                       } else {
115323 -                               ip += ((ip - anchor) >> g_searchStrength) + 1;
115324 -                               continue;
115325 -                       }
115327 -                       offset_2 = offset_1;
115328 -                       offset_1 = offset;
115330 -                       ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
115331 -               }
115333 -               /* match found */
115334 -               ip += mLength;
115335 -               anchor = ip;
115337 -               if (ip <= ilimit) {
115338 -                       /* Fill Table */
115339 -                       hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] =
115340 -                           curr + 2; /* here because curr+2 could be > iend-8 */
115341 -                       hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base);
115343 -                       /* check immediate repcode */
115344 -                       while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
115345 -                               /* store sequence */
115346 -                               size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4;
115347 -                               {
115348 -                                       U32 const tmpOff = offset_2;
115349 -                                       offset_2 = offset_1;
115350 -                                       offset_1 = tmpOff;
115351 -                               } /* swap offset_2 <=> offset_1 */
115352 -                               hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
115353 -                               hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
115354 -                               ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength - MINMATCH);
115355 -                               ip += rLength;
115356 -                               anchor = ip;
115357 -                               continue; /* faster when present ... (?) */
115358 -                       }
115359 -               }
115360 -       }
115362 -       /* save reps for next block */
115363 -       cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
115364 -       cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
115366 -       /* Last Literals */
115367 -       {
115368 -               size_t const lastLLSize = iend - anchor;
115369 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
115370 -               seqStorePtr->lit += lastLLSize;
115371 -       }
115374 -static void ZSTD_compressBlock_doubleFast(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
115376 -       const U32 mls = ctx->params.cParams.searchLength;
115377 -       switch (mls) {
115378 -       default: /* includes case 3 */
115379 -       case 4: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4); return;
115380 -       case 5: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 5); return;
115381 -       case 6: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 6); return;
115382 -       case 7: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 7); return;
115383 -       }
115386 -static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 mls)
115388 -       U32 *const hashLong = ctx->hashTable;
115389 -       U32 const hBitsL = ctx->params.cParams.hashLog;
115390 -       U32 *const hashSmall = ctx->chainTable;
115391 -       U32 const hBitsS = ctx->params.cParams.chainLog;
115392 -       seqStore_t *seqStorePtr = &(ctx->seqStore);
115393 -       const BYTE *const base = ctx->base;
115394 -       const BYTE *const dictBase = ctx->dictBase;
115395 -       const BYTE *const istart = (const BYTE *)src;
115396 -       const BYTE *ip = istart;
115397 -       const BYTE *anchor = istart;
115398 -       const U32 lowestIndex = ctx->lowLimit;
115399 -       const BYTE *const dictStart = dictBase + lowestIndex;
115400 -       const U32 dictLimit = ctx->dictLimit;
115401 -       const BYTE *const lowPrefixPtr = base + dictLimit;
115402 -       const BYTE *const dictEnd = dictBase + dictLimit;
115403 -       const BYTE *const iend = istart + srcSize;
115404 -       const BYTE *const ilimit = iend - 8;
115405 -       U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
115407 -       /* Search Loop */
115408 -       while (ip < ilimit) { /* < instead of <=, because (ip+1) */
115409 -               const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
115410 -               const U32 matchIndex = hashSmall[hSmall];
115411 -               const BYTE *matchBase = matchIndex < dictLimit ? dictBase : base;
115412 -               const BYTE *match = matchBase + matchIndex;
115414 -               const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
115415 -               const U32 matchLongIndex = hashLong[hLong];
115416 -               const BYTE *matchLongBase = matchLongIndex < dictLimit ? dictBase : base;
115417 -               const BYTE *matchLong = matchLongBase + matchLongIndex;
115419 -               const U32 curr = (U32)(ip - base);
115420 -               const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
115421 -               const BYTE *repBase = repIndex < dictLimit ? dictBase : base;
115422 -               const BYTE *repMatch = repBase + repIndex;
115423 -               size_t mLength;
115424 -               hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
115426 -               if ((((U32)((dictLimit - 1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) &&
115427 -                   (ZSTD_read32(repMatch) == ZSTD_read32(ip + 1))) {
115428 -                       const BYTE *repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
115429 -                       mLength = ZSTD_count_2segments(ip + 1 + 4, repMatch + 4, iend, repMatchEnd, lowPrefixPtr) + 4;
115430 -                       ip++;
115431 -                       ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
115432 -               } else {
115433 -                       if ((matchLongIndex > lowestIndex) && (ZSTD_read64(matchLong) == ZSTD_read64(ip))) {
115434 -                               const BYTE *matchEnd = matchLongIndex < dictLimit ? dictEnd : iend;
115435 -                               const BYTE *lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr;
115436 -                               U32 offset;
115437 -                               mLength = ZSTD_count_2segments(ip + 8, matchLong + 8, iend, matchEnd, lowPrefixPtr) + 8;
115438 -                               offset = curr - matchLongIndex;
115439 -                               while (((ip > anchor) & (matchLong > lowMatchPtr)) && (ip[-1] == matchLong[-1])) {
115440 -                                       ip--;
115441 -                                       matchLong--;
115442 -                                       mLength++;
115443 -                               } /* catch up */
115444 -                               offset_2 = offset_1;
115445 -                               offset_1 = offset;
115446 -                               ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
115448 -                       } else if ((matchIndex > lowestIndex) && (ZSTD_read32(match) == ZSTD_read32(ip))) {
115449 -                               size_t const h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8);
115450 -                               U32 const matchIndex3 = hashLong[h3];
115451 -                               const BYTE *const match3Base = matchIndex3 < dictLimit ? dictBase : base;
115452 -                               const BYTE *match3 = match3Base + matchIndex3;
115453 -                               U32 offset;
115454 -                               hashLong[h3] = curr + 1;
115455 -                               if ((matchIndex3 > lowestIndex) && (ZSTD_read64(match3) == ZSTD_read64(ip + 1))) {
115456 -                                       const BYTE *matchEnd = matchIndex3 < dictLimit ? dictEnd : iend;
115457 -                                       const BYTE *lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr;
115458 -                                       mLength = ZSTD_count_2segments(ip + 9, match3 + 8, iend, matchEnd, lowPrefixPtr) + 8;
115459 -                                       ip++;
115460 -                                       offset = curr + 1 - matchIndex3;
115461 -                                       while (((ip > anchor) & (match3 > lowMatchPtr)) && (ip[-1] == match3[-1])) {
115462 -                                               ip--;
115463 -                                               match3--;
115464 -                                               mLength++;
115465 -                                       } /* catch up */
115466 -                               } else {
115467 -                                       const BYTE *matchEnd = matchIndex < dictLimit ? dictEnd : iend;
115468 -                                       const BYTE *lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
115469 -                                       mLength = ZSTD_count_2segments(ip + 4, match + 4, iend, matchEnd, lowPrefixPtr) + 4;
115470 -                                       offset = curr - matchIndex;
115471 -                                       while (((ip > anchor) & (match > lowMatchPtr)) && (ip[-1] == match[-1])) {
115472 -                                               ip--;
115473 -                                               match--;
115474 -                                               mLength++;
115475 -                                       } /* catch up */
115476 -                               }
115477 -                               offset_2 = offset_1;
115478 -                               offset_1 = offset;
115479 -                               ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
115481 -                       } else {
115482 -                               ip += ((ip - anchor) >> g_searchStrength) + 1;
115483 -                               continue;
115484 -                       }
115485 -               }
115487 -               /* found a match : store it */
115488 -               ip += mLength;
115489 -               anchor = ip;
115491 -               if (ip <= ilimit) {
115492 -                       /* Fill Table */
115493 -                       hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] = curr + 2;
115494 -                       hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = curr + 2;
115495 -                       hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base);
115496 -                       hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = (U32)(ip - 2 - base);
115497 -                       /* check immediate repcode */
115498 -                       while (ip <= ilimit) {
115499 -                               U32 const curr2 = (U32)(ip - base);
115500 -                               U32 const repIndex2 = curr2 - offset_2;
115501 -                               const BYTE *repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
115502 -                               if ((((U32)((dictLimit - 1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
115503 -                                   && (ZSTD_read32(repMatch2) == ZSTD_read32(ip))) {
115504 -                                       const BYTE *const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
115505 -                                       size_t const repLength2 =
115506 -                                           ZSTD_count_2segments(ip + EQUAL_READ32, repMatch2 + EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
115507 -                                       U32 tmpOffset = offset_2;
115508 -                                       offset_2 = offset_1;
115509 -                                       offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
115510 -                                       ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2 - MINMATCH);
115511 -                                       hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = curr2;
115512 -                                       hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = curr2;
115513 -                                       ip += repLength2;
115514 -                                       anchor = ip;
115515 -                                       continue;
115516 -                               }
115517 -                               break;
115518 -                       }
115519 -               }
115520 -       }
115522 -       /* save reps for next block */
115523 -       ctx->repToConfirm[0] = offset_1;
115524 -       ctx->repToConfirm[1] = offset_2;
115526 -       /* Last Literals */
115527 -       {
115528 -               size_t const lastLLSize = iend - anchor;
115529 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
115530 -               seqStorePtr->lit += lastLLSize;
115531 -       }
115534 -static void ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
115536 -       U32 const mls = ctx->params.cParams.searchLength;
115537 -       switch (mls) {
115538 -       default: /* includes case 3 */
115539 -       case 4: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4); return;
115540 -       case 5: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 5); return;
115541 -       case 6: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 6); return;
115542 -       case 7: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 7); return;
115543 -       }
115546 -/*-*************************************
115547 -*  Binary Tree search
115548 -***************************************/
115549 -/** ZSTD_insertBt1() : add one or multiple positions to tree.
115550 -*   ip : assumed <= iend-8 .
115551 -*   @return : nb of positions added */
115552 -static U32 ZSTD_insertBt1(ZSTD_CCtx *zc, const BYTE *const ip, const U32 mls, const BYTE *const iend, U32 nbCompares, U32 extDict)
115554 -       U32 *const hashTable = zc->hashTable;
115555 -       U32 const hashLog = zc->params.cParams.hashLog;
115556 -       size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
115557 -       U32 *const bt = zc->chainTable;
115558 -       U32 const btLog = zc->params.cParams.chainLog - 1;
115559 -       U32 const btMask = (1 << btLog) - 1;
115560 -       U32 matchIndex = hashTable[h];
115561 -       size_t commonLengthSmaller = 0, commonLengthLarger = 0;
115562 -       const BYTE *const base = zc->base;
115563 -       const BYTE *const dictBase = zc->dictBase;
115564 -       const U32 dictLimit = zc->dictLimit;
115565 -       const BYTE *const dictEnd = dictBase + dictLimit;
115566 -       const BYTE *const prefixStart = base + dictLimit;
115567 -       const BYTE *match;
115568 -       const U32 curr = (U32)(ip - base);
115569 -       const U32 btLow = btMask >= curr ? 0 : curr - btMask;
115570 -       U32 *smallerPtr = bt + 2 * (curr & btMask);
115571 -       U32 *largerPtr = smallerPtr + 1;
115572 -       U32 dummy32; /* to be nullified at the end */
115573 -       U32 const windowLow = zc->lowLimit;
115574 -       U32 matchEndIdx = curr + 8;
115575 -       size_t bestLength = 8;
115577 -       hashTable[h] = curr; /* Update Hash Table */
115579 -       while (nbCompares-- && (matchIndex > windowLow)) {
115580 -               U32 *const nextPtr = bt + 2 * (matchIndex & btMask);
115581 -               size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
115583 -               if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
115584 -                       match = base + matchIndex;
115585 -                       if (match[matchLength] == ip[matchLength])
115586 -                               matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iend) + 1;
115587 -               } else {
115588 -                       match = dictBase + matchIndex;
115589 -                       matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart);
115590 -                       if (matchIndex + matchLength >= dictLimit)
115591 -                               match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
115592 -               }
115594 -               if (matchLength > bestLength) {
115595 -                       bestLength = matchLength;
115596 -                       if (matchLength > matchEndIdx - matchIndex)
115597 -                               matchEndIdx = matchIndex + (U32)matchLength;
115598 -               }
115600 -               if (ip + matchLength == iend) /* equal : no way to know if inf or sup */
115601 -                       break;                /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */
115603 -               if (match[matchLength] < ip[matchLength]) { /* necessarily within correct buffer */
115604 -                       /* match is smaller than curr */
115605 -                       *smallerPtr = matchIndex;         /* update smaller idx */
115606 -                       commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
115607 -                       if (matchIndex <= btLow) {
115608 -                               smallerPtr = &dummy32;
115609 -                               break;
115610 -                       }                         /* beyond tree size, stop the search */
115611 -                       smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */
115612 -                       matchIndex = nextPtr[1];  /* new matchIndex larger than previous (closer to curr) */
115613 -               } else {
115614 -                       /* match is larger than curr */
115615 -                       *largerPtr = matchIndex;
115616 -                       commonLengthLarger = matchLength;
115617 -                       if (matchIndex <= btLow) {
115618 -                               largerPtr = &dummy32;
115619 -                               break;
115620 -                       } /* beyond tree size, stop the search */
115621 -                       largerPtr = nextPtr;
115622 -                       matchIndex = nextPtr[0];
115623 -               }
115624 -       }
115626 -       *smallerPtr = *largerPtr = 0;
115627 -       if (bestLength > 384)
115628 -               return MIN(192, (U32)(bestLength - 384)); /* speed optimization */
115629 -       if (matchEndIdx > curr + 8)
115630 -               return matchEndIdx - curr - 8;
115631 -       return 1;
115634 -static size_t ZSTD_insertBtAndFindBestMatch(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, size_t *offsetPtr, U32 nbCompares, const U32 mls,
115635 -                                           U32 extDict)
115637 -       U32 *const hashTable = zc->hashTable;
115638 -       U32 const hashLog = zc->params.cParams.hashLog;
115639 -       size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
115640 -       U32 *const bt = zc->chainTable;
115641 -       U32 const btLog = zc->params.cParams.chainLog - 1;
115642 -       U32 const btMask = (1 << btLog) - 1;
115643 -       U32 matchIndex = hashTable[h];
115644 -       size_t commonLengthSmaller = 0, commonLengthLarger = 0;
115645 -       const BYTE *const base = zc->base;
115646 -       const BYTE *const dictBase = zc->dictBase;
115647 -       const U32 dictLimit = zc->dictLimit;
115648 -       const BYTE *const dictEnd = dictBase + dictLimit;
115649 -       const BYTE *const prefixStart = base + dictLimit;
115650 -       const U32 curr = (U32)(ip - base);
115651 -       const U32 btLow = btMask >= curr ? 0 : curr - btMask;
115652 -       const U32 windowLow = zc->lowLimit;
115653 -       U32 *smallerPtr = bt + 2 * (curr & btMask);
115654 -       U32 *largerPtr = bt + 2 * (curr & btMask) + 1;
115655 -       U32 matchEndIdx = curr + 8;
115656 -       U32 dummy32; /* to be nullified at the end */
115657 -       size_t bestLength = 0;
115659 -       hashTable[h] = curr; /* Update Hash Table */
115661 -       while (nbCompares-- && (matchIndex > windowLow)) {
115662 -               U32 *const nextPtr = bt + 2 * (matchIndex & btMask);
115663 -               size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
115664 -               const BYTE *match;
115666 -               if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
115667 -                       match = base + matchIndex;
115668 -                       if (match[matchLength] == ip[matchLength])
115669 -                               matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iend) + 1;
115670 -               } else {
115671 -                       match = dictBase + matchIndex;
115672 -                       matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart);
115673 -                       if (matchIndex + matchLength >= dictLimit)
115674 -                               match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
115675 -               }
115677 -               if (matchLength > bestLength) {
115678 -                       if (matchLength > matchEndIdx - matchIndex)
115679 -                               matchEndIdx = matchIndex + (U32)matchLength;
115680 -                       if ((4 * (int)(matchLength - bestLength)) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((U32)offsetPtr[0] + 1)))
115681 -                               bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
115682 -                       if (ip + matchLength == iend) /* equal : no way to know if inf or sup */
115683 -                               break;                /* drop, to guarantee consistency (miss a little bit of compression) */
115684 -               }
115686 -               if (match[matchLength] < ip[matchLength]) {
115687 -                       /* match is smaller than curr */
115688 -                       *smallerPtr = matchIndex;         /* update smaller idx */
115689 -                       commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
115690 -                       if (matchIndex <= btLow) {
115691 -                               smallerPtr = &dummy32;
115692 -                               break;
115693 -                       }                         /* beyond tree size, stop the search */
115694 -                       smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */
115695 -                       matchIndex = nextPtr[1];  /* new matchIndex larger than previous (closer to curr) */
115696 -               } else {
115697 -                       /* match is larger than curr */
115698 -                       *largerPtr = matchIndex;
115699 -                       commonLengthLarger = matchLength;
115700 -                       if (matchIndex <= btLow) {
115701 -                               largerPtr = &dummy32;
115702 -                               break;
115703 -                       } /* beyond tree size, stop the search */
115704 -                       largerPtr = nextPtr;
115705 -                       matchIndex = nextPtr[0];
115706 -               }
115707 -       }
115709 -       *smallerPtr = *largerPtr = 0;
115711 -       zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr + 1;
115712 -       return bestLength;
115715 -static void ZSTD_updateTree(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, const U32 nbCompares, const U32 mls)
115717 -       const BYTE *const base = zc->base;
115718 -       const U32 target = (U32)(ip - base);
115719 -       U32 idx = zc->nextToUpdate;
115721 -       while (idx < target)
115722 -               idx += ZSTD_insertBt1(zc, base + idx, mls, iend, nbCompares, 0);
115725 -/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
115726 -static size_t ZSTD_BtFindBestMatch(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 mls)
115728 -       if (ip < zc->base + zc->nextToUpdate)
115729 -               return 0; /* skipped area */
115730 -       ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
115731 -       return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0);
115734 -static size_t ZSTD_BtFindBestMatch_selectMLS(ZSTD_CCtx *zc, /* Index table will be updated */
115735 -                                            const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 matchLengthSearch)
115737 -       switch (matchLengthSearch) {
115738 -       default: /* includes case 3 */
115739 -       case 4: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
115740 -       case 5: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
115741 -       case 7:
115742 -       case 6: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
115743 -       }
115746 -static void ZSTD_updateTree_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, const U32 nbCompares, const U32 mls)
115748 -       const BYTE *const base = zc->base;
115749 -       const U32 target = (U32)(ip - base);
115750 -       U32 idx = zc->nextToUpdate;
115752 -       while (idx < target)
115753 -               idx += ZSTD_insertBt1(zc, base + idx, mls, iend, nbCompares, 1);
115756 -/** Tree updater, providing best match */
115757 -static size_t ZSTD_BtFindBestMatch_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
115758 -                                          const U32 mls)
115760 -       if (ip < zc->base + zc->nextToUpdate)
115761 -               return 0; /* skipped area */
115762 -       ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
115763 -       return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1);
115766 -static size_t ZSTD_BtFindBestMatch_selectMLS_extDict(ZSTD_CCtx *zc, /* Index table will be updated */
115767 -                                                    const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
115768 -                                                    const U32 matchLengthSearch)
115770 -       switch (matchLengthSearch) {
115771 -       default: /* includes case 3 */
115772 -       case 4: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
115773 -       case 5: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
115774 -       case 7:
115775 -       case 6: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
115776 -       }
115779 -/* *********************************
115780 -*  Hash Chain
115781 -***********************************/
115782 -#define NEXT_IN_CHAIN(d, mask) chainTable[(d)&mask]
115784 -/* Update chains up to ip (excluded)
115785 -   Assumption : always within prefix (i.e. not within extDict) */
115786 -FORCE_INLINE
115787 -U32 ZSTD_insertAndFindFirstIndex(ZSTD_CCtx *zc, const BYTE *ip, U32 mls)
115789 -       U32 *const hashTable = zc->hashTable;
115790 -       const U32 hashLog = zc->params.cParams.hashLog;
115791 -       U32 *const chainTable = zc->chainTable;
115792 -       const U32 chainMask = (1 << zc->params.cParams.chainLog) - 1;
115793 -       const BYTE *const base = zc->base;
115794 -       const U32 target = (U32)(ip - base);
115795 -       U32 idx = zc->nextToUpdate;
115797 -       while (idx < target) { /* catch up */
115798 -               size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls);
115799 -               NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
115800 -               hashTable[h] = idx;
115801 -               idx++;
115802 -       }
115804 -       zc->nextToUpdate = target;
115805 -       return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
115808 -/* inlining is important to hardwire a hot branch (template emulation) */
115809 -FORCE_INLINE
115810 -size_t ZSTD_HcFindBestMatch_generic(ZSTD_CCtx *zc, /* Index table will be updated */
115811 -                                   const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 mls,
115812 -                                   const U32 extDict)
115814 -       U32 *const chainTable = zc->chainTable;
115815 -       const U32 chainSize = (1 << zc->params.cParams.chainLog);
115816 -       const U32 chainMask = chainSize - 1;
115817 -       const BYTE *const base = zc->base;
115818 -       const BYTE *const dictBase = zc->dictBase;
115819 -       const U32 dictLimit = zc->dictLimit;
115820 -       const BYTE *const prefixStart = base + dictLimit;
115821 -       const BYTE *const dictEnd = dictBase + dictLimit;
115822 -       const U32 lowLimit = zc->lowLimit;
115823 -       const U32 curr = (U32)(ip - base);
115824 -       const U32 minChain = curr > chainSize ? curr - chainSize : 0;
115825 -       int nbAttempts = maxNbAttempts;
115826 -       size_t ml = EQUAL_READ32 - 1;
115828 -       /* HC4 match finder */
115829 -       U32 matchIndex = ZSTD_insertAndFindFirstIndex(zc, ip, mls);
115831 -       for (; (matchIndex > lowLimit) & (nbAttempts > 0); nbAttempts--) {
115832 -               const BYTE *match;
115833 -               size_t currMl = 0;
115834 -               if ((!extDict) || matchIndex >= dictLimit) {
115835 -                       match = base + matchIndex;
115836 -                       if (match[ml] == ip[ml]) /* potentially better */
115837 -                               currMl = ZSTD_count(ip, match, iLimit);
115838 -               } else {
115839 -                       match = dictBase + matchIndex;
115840 -                       if (ZSTD_read32(match) == ZSTD_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
115841 -                               currMl = ZSTD_count_2segments(ip + EQUAL_READ32, match + EQUAL_READ32, iLimit, dictEnd, prefixStart) + EQUAL_READ32;
115842 -               }
115844 -               /* save best solution */
115845 -               if (currMl > ml) {
115846 -                       ml = currMl;
115847 -                       *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE;
115848 -                       if (ip + currMl == iLimit)
115849 -                               break; /* best possible, and avoid read overflow*/
115850 -               }
115852 -               if (matchIndex <= minChain)
115853 -                       break;
115854 -               matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
115855 -       }
115857 -       return ml;
115860 -FORCE_INLINE size_t ZSTD_HcFindBestMatch_selectMLS(ZSTD_CCtx *zc, const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
115861 -                                                  const U32 matchLengthSearch)
115863 -       switch (matchLengthSearch) {
115864 -       default: /* includes case 3 */
115865 -       case 4: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0);
115866 -       case 5: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0);
115867 -       case 7:
115868 -       case 6: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0);
115869 -       }
115872 -FORCE_INLINE size_t ZSTD_HcFindBestMatch_extDict_selectMLS(ZSTD_CCtx *zc, const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
115873 -                                                          const U32 matchLengthSearch)
115875 -       switch (matchLengthSearch) {
115876 -       default: /* includes case 3 */
115877 -       case 4: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1);
115878 -       case 5: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1);
115879 -       case 7:
115880 -       case 6: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1);
115881 -       }
115884 -/* *******************************
115885 -*  Common parser - lazy strategy
115886 -*********************************/
115887 -FORCE_INLINE
115888 -void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 searchMethod, const U32 depth)
115890 -       seqStore_t *seqStorePtr = &(ctx->seqStore);
115891 -       const BYTE *const istart = (const BYTE *)src;
115892 -       const BYTE *ip = istart;
115893 -       const BYTE *anchor = istart;
115894 -       const BYTE *const iend = istart + srcSize;
115895 -       const BYTE *const ilimit = iend - 8;
115896 -       const BYTE *const base = ctx->base + ctx->dictLimit;
115898 -       U32 const maxSearches = 1 << ctx->params.cParams.searchLog;
115899 -       U32 const mls = ctx->params.cParams.searchLength;
115901 -       typedef size_t (*searchMax_f)(ZSTD_CCtx * zc, const BYTE *ip, const BYTE *iLimit, size_t *offsetPtr, U32 maxNbAttempts, U32 matchLengthSearch);
115902 -       searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
115903 -       U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1], savedOffset = 0;
115905 -       /* init */
115906 -       ip += (ip == base);
115907 -       ctx->nextToUpdate3 = ctx->nextToUpdate;
115908 -       {
115909 -               U32 const maxRep = (U32)(ip - base);
115910 -               if (offset_2 > maxRep)
115911 -                       savedOffset = offset_2, offset_2 = 0;
115912 -               if (offset_1 > maxRep)
115913 -                       savedOffset = offset_1, offset_1 = 0;
115914 -       }
115916 -       /* Match Loop */
115917 -       while (ip < ilimit) {
115918 -               size_t matchLength = 0;
115919 -               size_t offset = 0;
115920 -               const BYTE *start = ip + 1;
115922 -               /* check repCode */
115923 -               if ((offset_1 > 0) & (ZSTD_read32(ip + 1) == ZSTD_read32(ip + 1 - offset_1))) {
115924 -                       /* repcode : we take it */
115925 -                       matchLength = ZSTD_count(ip + 1 + EQUAL_READ32, ip + 1 + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
115926 -                       if (depth == 0)
115927 -                               goto _storeSequence;
115928 -               }
115930 -               /* first search (depth 0) */
115931 -               {
115932 -                       size_t offsetFound = 99999999;
115933 -                       size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
115934 -                       if (ml2 > matchLength)
115935 -                               matchLength = ml2, start = ip, offset = offsetFound;
115936 -               }
115938 -               if (matchLength < EQUAL_READ32) {
115939 -                       ip += ((ip - anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
115940 -                       continue;
115941 -               }
115943 -               /* let's try to find a better solution */
115944 -               if (depth >= 1)
115945 -                       while (ip < ilimit) {
115946 -                               ip++;
115947 -                               if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
115948 -                                       size_t const mlRep = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
115949 -                                       int const gain2 = (int)(mlRep * 3);
115950 -                                       int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1);
115951 -                                       if ((mlRep >= EQUAL_READ32) && (gain2 > gain1))
115952 -                                               matchLength = mlRep, offset = 0, start = ip;
115953 -                               }
115954 -                               {
115955 -                                       size_t offset2 = 99999999;
115956 -                                       size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
115957 -                                       int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
115958 -                                       int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 4);
115959 -                                       if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
115960 -                                               matchLength = ml2, offset = offset2, start = ip;
115961 -                                               continue; /* search a better one */
115962 -                                       }
115963 -                               }
115965 -                               /* let's find an even better one */
115966 -                               if ((depth == 2) && (ip < ilimit)) {
115967 -                                       ip++;
115968 -                                       if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
115969 -                                               size_t const ml2 = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
115970 -                                               int const gain2 = (int)(ml2 * 4);
115971 -                                               int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1);
115972 -                                               if ((ml2 >= EQUAL_READ32) && (gain2 > gain1))
115973 -                                                       matchLength = ml2, offset = 0, start = ip;
115974 -                                       }
115975 -                                       {
115976 -                                               size_t offset2 = 99999999;
115977 -                                               size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
115978 -                                               int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
115979 -                                               int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 7);
115980 -                                               if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
115981 -                                                       matchLength = ml2, offset = offset2, start = ip;
115982 -                                                       continue;
115983 -                                               }
115984 -                                       }
115985 -                               }
115986 -                               break; /* nothing found : store previous solution */
115987 -                       }
115989 -               /* NOTE:
115990 -                * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
115991 -                * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
115992 -                * overflows the pointer, which is undefined behavior.
115993 -                */
115994 -               /* catch up */
115995 -               if (offset) {
115996 -                       while ((start > anchor) && (start > base + offset - ZSTD_REP_MOVE) &&
115997 -                              (start[-1] == (start-offset+ZSTD_REP_MOVE)[-1])) /* only search for offset within prefix */
115998 -                       {
115999 -                               start--;
116000 -                               matchLength++;
116001 -                       }
116002 -                       offset_2 = offset_1;
116003 -                       offset_1 = (U32)(offset - ZSTD_REP_MOVE);
116004 -               }
116006 -       /* store sequence */
116007 -_storeSequence:
116008 -               {
116009 -                       size_t const litLength = start - anchor;
116010 -                       ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength - MINMATCH);
116011 -                       anchor = ip = start + matchLength;
116012 -               }
116014 -               /* check immediate repcode */
116015 -               while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
116016 -                       /* store sequence */
116017 -                       matchLength = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_2, iend) + EQUAL_READ32;
116018 -                       offset = offset_2;
116019 -                       offset_2 = offset_1;
116020 -                       offset_1 = (U32)offset; /* swap repcodes */
116021 -                       ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength - MINMATCH);
116022 -                       ip += matchLength;
116023 -                       anchor = ip;
116024 -                       continue; /* faster when present ... (?) */
116025 -               }
116026 -       }
116028 -       /* Save reps for next block */
116029 -       ctx->repToConfirm[0] = offset_1 ? offset_1 : savedOffset;
116030 -       ctx->repToConfirm[1] = offset_2 ? offset_2 : savedOffset;
116032 -       /* Last Literals */
116033 -       {
116034 -               size_t const lastLLSize = iend - anchor;
116035 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
116036 -               seqStorePtr->lit += lastLLSize;
116037 -       }
116040 -static void ZSTD_compressBlock_btlazy2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2); }
116042 -static void ZSTD_compressBlock_lazy2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2); }
116044 -static void ZSTD_compressBlock_lazy(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1); }
116046 -static void ZSTD_compressBlock_greedy(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0); }
116048 -FORCE_INLINE
116049 -void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 searchMethod, const U32 depth)
116051 -       seqStore_t *seqStorePtr = &(ctx->seqStore);
116052 -       const BYTE *const istart = (const BYTE *)src;
116053 -       const BYTE *ip = istart;
116054 -       const BYTE *anchor = istart;
116055 -       const BYTE *const iend = istart + srcSize;
116056 -       const BYTE *const ilimit = iend - 8;
116057 -       const BYTE *const base = ctx->base;
116058 -       const U32 dictLimit = ctx->dictLimit;
116059 -       const U32 lowestIndex = ctx->lowLimit;
116060 -       const BYTE *const prefixStart = base + dictLimit;
116061 -       const BYTE *const dictBase = ctx->dictBase;
116062 -       const BYTE *const dictEnd = dictBase + dictLimit;
116063 -       const BYTE *const dictStart = dictBase + ctx->lowLimit;
116065 -       const U32 maxSearches = 1 << ctx->params.cParams.searchLog;
116066 -       const U32 mls = ctx->params.cParams.searchLength;
116068 -       typedef size_t (*searchMax_f)(ZSTD_CCtx * zc, const BYTE *ip, const BYTE *iLimit, size_t *offsetPtr, U32 maxNbAttempts, U32 matchLengthSearch);
116069 -       searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
116071 -       U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
116073 -       /* init */
116074 -       ctx->nextToUpdate3 = ctx->nextToUpdate;
116075 -       ip += (ip == prefixStart);
116077 -       /* Match Loop */
116078 -       while (ip < ilimit) {
116079 -               size_t matchLength = 0;
116080 -               size_t offset = 0;
116081 -               const BYTE *start = ip + 1;
116082 -               U32 curr = (U32)(ip - base);
116084 -               /* check repCode */
116085 -               {
116086 -                       const U32 repIndex = (U32)(curr + 1 - offset_1);
116087 -                       const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
116088 -                       const BYTE *const repMatch = repBase + repIndex;
116089 -                       if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
116090 -                               if (ZSTD_read32(ip + 1) == ZSTD_read32(repMatch)) {
116091 -                                       /* repcode detected we should take it */
116092 -                                       const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
116093 -                                       matchLength =
116094 -                                           ZSTD_count_2segments(ip + 1 + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
116095 -                                       if (depth == 0)
116096 -                                               goto _storeSequence;
116097 -                               }
116098 -               }
116100 -               /* first search (depth 0) */
116101 -               {
116102 -                       size_t offsetFound = 99999999;
116103 -                       size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
116104 -                       if (ml2 > matchLength)
116105 -                               matchLength = ml2, start = ip, offset = offsetFound;
116106 -               }
116108 -               if (matchLength < EQUAL_READ32) {
116109 -                       ip += ((ip - anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
116110 -                       continue;
116111 -               }
116113 -               /* let's try to find a better solution */
116114 -               if (depth >= 1)
116115 -                       while (ip < ilimit) {
116116 -                               ip++;
116117 -                               curr++;
116118 -                               /* check repCode */
116119 -                               if (offset) {
116120 -                                       const U32 repIndex = (U32)(curr - offset_1);
116121 -                                       const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
116122 -                                       const BYTE *const repMatch = repBase + repIndex;
116123 -                                       if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
116124 -                                               if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
116125 -                                                       /* repcode detected */
116126 -                                                       const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
116127 -                                                       size_t const repLength =
116128 -                                                           ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) +
116129 -                                                           EQUAL_READ32;
116130 -                                                       int const gain2 = (int)(repLength * 3);
116131 -                                                       int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1);
116132 -                                                       if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
116133 -                                                               matchLength = repLength, offset = 0, start = ip;
116134 -                                               }
116135 -                               }
116137 -                               /* search match, depth 1 */
116138 -                               {
116139 -                                       size_t offset2 = 99999999;
116140 -                                       size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
116141 -                                       int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
116142 -                                       int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 4);
116143 -                                       if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
116144 -                                               matchLength = ml2, offset = offset2, start = ip;
116145 -                                               continue; /* search a better one */
116146 -                                       }
116147 -                               }
116149 -                               /* let's find an even better one */
116150 -                               if ((depth == 2) && (ip < ilimit)) {
116151 -                                       ip++;
116152 -                                       curr++;
116153 -                                       /* check repCode */
116154 -                                       if (offset) {
116155 -                                               const U32 repIndex = (U32)(curr - offset_1);
116156 -                                               const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
116157 -                                               const BYTE *const repMatch = repBase + repIndex;
116158 -                                               if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
116159 -                                                       if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
116160 -                                                               /* repcode detected */
116161 -                                                               const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
116162 -                                                               size_t repLength = ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend,
116163 -                                                                                                       repEnd, prefixStart) +
116164 -                                                                                  EQUAL_READ32;
116165 -                                                               int gain2 = (int)(repLength * 4);
116166 -                                                               int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1);
116167 -                                                               if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
116168 -                                                                       matchLength = repLength, offset = 0, start = ip;
116169 -                                                       }
116170 -                                       }
116172 -                                       /* search match, depth 2 */
116173 -                                       {
116174 -                                               size_t offset2 = 99999999;
116175 -                                               size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
116176 -                                               int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
116177 -                                               int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 7);
116178 -                                               if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
116179 -                                                       matchLength = ml2, offset = offset2, start = ip;
116180 -                                                       continue;
116181 -                                               }
116182 -                                       }
116183 -                               }
116184 -                               break; /* nothing found : store previous solution */
116185 -                       }
116187 -               /* catch up */
116188 -               if (offset) {
116189 -                       U32 const matchIndex = (U32)((start - base) - (offset - ZSTD_REP_MOVE));
116190 -                       const BYTE *match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
116191 -                       const BYTE *const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
116192 -                       while ((start > anchor) && (match > mStart) && (start[-1] == match[-1])) {
116193 -                               start--;
116194 -                               match--;
116195 -                               matchLength++;
116196 -                       } /* catch up */
116197 -                       offset_2 = offset_1;
116198 -                       offset_1 = (U32)(offset - ZSTD_REP_MOVE);
116199 -               }
116201 -       /* store sequence */
116202 -       _storeSequence : {
116203 -               size_t const litLength = start - anchor;
116204 -               ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength - MINMATCH);
116205 -               anchor = ip = start + matchLength;
116206 -       }
116208 -               /* check immediate repcode */
116209 -               while (ip <= ilimit) {
116210 -                       const U32 repIndex = (U32)((ip - base) - offset_2);
116211 -                       const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
116212 -                       const BYTE *const repMatch = repBase + repIndex;
116213 -                       if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
116214 -                               if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
116215 -                                       /* repcode detected we should take it */
116216 -                                       const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
116217 -                                       matchLength =
116218 -                                           ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
116219 -                                       offset = offset_2;
116220 -                                       offset_2 = offset_1;
116221 -                                       offset_1 = (U32)offset; /* swap offset history */
116222 -                                       ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength - MINMATCH);
116223 -                                       ip += matchLength;
116224 -                                       anchor = ip;
116225 -                                       continue; /* faster when present ... (?) */
116226 -                               }
116227 -                       break;
116228 -               }
116229 -       }
116231 -       /* Save reps for next block */
116232 -       ctx->repToConfirm[0] = offset_1;
116233 -       ctx->repToConfirm[1] = offset_2;
116235 -       /* Last Literals */
116236 -       {
116237 -               size_t const lastLLSize = iend - anchor;
116238 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
116239 -               seqStorePtr->lit += lastLLSize;
116240 -       }
116243 -void ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0); }
116245 -static void ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
116247 -       ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1);
116250 -static void ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
116252 -       ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2);
116255 -static void ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
116257 -       ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2);
116260 -/* The optimal parser */
116261 -#include "zstd_opt.h"
116263 -static void ZSTD_compressBlock_btopt(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
116265 -#ifdef ZSTD_OPT_H_91842398743
116266 -       ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 0);
116267 -#else
116268 -       (void)ctx;
116269 -       (void)src;
116270 -       (void)srcSize;
116271 -       return;
116272 -#endif
116275 -static void ZSTD_compressBlock_btopt2(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
116277 -#ifdef ZSTD_OPT_H_91842398743
116278 -       ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 1);
116279 -#else
116280 -       (void)ctx;
116281 -       (void)src;
116282 -       (void)srcSize;
116283 -       return;
116284 -#endif
116287 -static void ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
116289 -#ifdef ZSTD_OPT_H_91842398743
116290 -       ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 0);
116291 -#else
116292 -       (void)ctx;
116293 -       (void)src;
116294 -       (void)srcSize;
116295 -       return;
116296 -#endif
116299 -static void ZSTD_compressBlock_btopt2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
116301 -#ifdef ZSTD_OPT_H_91842398743
116302 -       ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 1);
116303 -#else
116304 -       (void)ctx;
116305 -       (void)src;
116306 -       (void)srcSize;
116307 -       return;
116308 -#endif
116311 -typedef void (*ZSTD_blockCompressor)(ZSTD_CCtx *ctx, const void *src, size_t srcSize);
116313 -static ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict)
116315 -       static const ZSTD_blockCompressor blockCompressor[2][8] = {
116316 -           {ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2,
116317 -            ZSTD_compressBlock_btlazy2, ZSTD_compressBlock_btopt, ZSTD_compressBlock_btopt2},
116318 -           {ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, ZSTD_compressBlock_lazy_extDict,
116319 -            ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btopt2_extDict}};
116321 -       return blockCompressor[extDict][(U32)strat];
116324 -static size_t ZSTD_compressBlock_internal(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
116326 -       ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->params.cParams.strategy, zc->lowLimit < zc->dictLimit);
116327 -       const BYTE *const base = zc->base;
116328 -       const BYTE *const istart = (const BYTE *)src;
116329 -       const U32 curr = (U32)(istart - base);
116330 -       if (srcSize < MIN_CBLOCK_SIZE + ZSTD_blockHeaderSize + 1)
116331 -               return 0; /* don't even attempt compression below a certain srcSize */
116332 -       ZSTD_resetSeqStore(&(zc->seqStore));
116333 -       if (curr > zc->nextToUpdate + 384)
116334 -               zc->nextToUpdate = curr - MIN(192, (U32)(curr - zc->nextToUpdate - 384)); /* update tree not updated after finding very long rep matches */
116335 -       blockCompressor(zc, src, srcSize);
116336 -       return ZSTD_compressSequences(zc, dst, dstCapacity, srcSize);
116339 -/*! ZSTD_compress_generic() :
116340 -*   Compress a chunk of data into one or multiple blocks.
116341 -*   All blocks will be terminated, all input will be consumed.
116342 -*   Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
116343 -*   Frame is supposed already started (header already produced)
116344 -*   @return : compressed size, or an error code
116346 -static size_t ZSTD_compress_generic(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 lastFrameChunk)
116348 -       size_t blockSize = cctx->blockSize;
116349 -       size_t remaining = srcSize;
116350 -       const BYTE *ip = (const BYTE *)src;
116351 -       BYTE *const ostart = (BYTE *)dst;
116352 -       BYTE *op = ostart;
116353 -       U32 const maxDist = 1 << cctx->params.cParams.windowLog;
116355 -       if (cctx->params.fParams.checksumFlag && srcSize)
116356 -               xxh64_update(&cctx->xxhState, src, srcSize);
116358 -       while (remaining) {
116359 -               U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
116360 -               size_t cSize;
116362 -               if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE)
116363 -                       return ERROR(dstSize_tooSmall); /* not enough space to store compressed block */
116364 -               if (remaining < blockSize)
116365 -                       blockSize = remaining;
116367 -               /* preemptive overflow correction */
116368 -               if (cctx->lowLimit > (3U << 29)) {
116369 -                       U32 const cycleMask = (1 << ZSTD_cycleLog(cctx->params.cParams.hashLog, cctx->params.cParams.strategy)) - 1;
116370 -                       U32 const curr = (U32)(ip - cctx->base);
116371 -                       U32 const newCurr = (curr & cycleMask) + (1 << cctx->params.cParams.windowLog);
116372 -                       U32 const correction = curr - newCurr;
116373 -                       ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_64 <= 30);
116374 -                       ZSTD_reduceIndex(cctx, correction);
116375 -                       cctx->base += correction;
116376 -                       cctx->dictBase += correction;
116377 -                       cctx->lowLimit -= correction;
116378 -                       cctx->dictLimit -= correction;
116379 -                       if (cctx->nextToUpdate < correction)
116380 -                               cctx->nextToUpdate = 0;
116381 -                       else
116382 -                               cctx->nextToUpdate -= correction;
116383 -               }
116385 -               if ((U32)(ip + blockSize - cctx->base) > cctx->loadedDictEnd + maxDist) {
116386 -                       /* enforce maxDist */
116387 -                       U32 const newLowLimit = (U32)(ip + blockSize - cctx->base) - maxDist;
116388 -                       if (cctx->lowLimit < newLowLimit)
116389 -                               cctx->lowLimit = newLowLimit;
116390 -                       if (cctx->dictLimit < cctx->lowLimit)
116391 -                               cctx->dictLimit = cctx->lowLimit;
116392 -               }
116394 -               cSize = ZSTD_compressBlock_internal(cctx, op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, ip, blockSize);
116395 -               if (ZSTD_isError(cSize))
116396 -                       return cSize;
116398 -               if (cSize == 0) { /* block is not compressible */
116399 -                       U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw) << 1) + (U32)(blockSize << 3);
116400 -                       if (blockSize + ZSTD_blockHeaderSize > dstCapacity)
116401 -                               return ERROR(dstSize_tooSmall);
116402 -                       ZSTD_writeLE32(op, cBlockHeader24); /* no pb, 4th byte will be overwritten */
116403 -                       memcpy(op + ZSTD_blockHeaderSize, ip, blockSize);
116404 -                       cSize = ZSTD_blockHeaderSize + blockSize;
116405 -               } else {
116406 -                       U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed) << 1) + (U32)(cSize << 3);
116407 -                       ZSTD_writeLE24(op, cBlockHeader24);
116408 -                       cSize += ZSTD_blockHeaderSize;
116409 -               }
116411 -               remaining -= blockSize;
116412 -               dstCapacity -= cSize;
116413 -               ip += blockSize;
116414 -               op += cSize;
116415 -       }
116417 -       if (lastFrameChunk && (op > ostart))
116418 -               cctx->stage = ZSTDcs_ending;
116419 -       return op - ostart;
116422 -static size_t ZSTD_writeFrameHeader(void *dst, size_t dstCapacity, ZSTD_parameters params, U64 pledgedSrcSize, U32 dictID)
116424 -       BYTE *const op = (BYTE *)dst;
116425 -       U32 const dictIDSizeCode = (dictID > 0) + (dictID >= 256) + (dictID >= 65536); /* 0-3 */
116426 -       U32 const checksumFlag = params.fParams.checksumFlag > 0;
116427 -       U32 const windowSize = 1U << params.cParams.windowLog;
116428 -       U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
116429 -       BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
116430 -       U32 const fcsCode =
116431 -           params.fParams.contentSizeFlag ? (pledgedSrcSize >= 256) + (pledgedSrcSize >= 65536 + 256) + (pledgedSrcSize >= 0xFFFFFFFFU) : 0; /* 0-3 */
116432 -       BYTE const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag << 2) + (singleSegment << 5) + (fcsCode << 6));
116433 -       size_t pos;
116435 -       if (dstCapacity < ZSTD_frameHeaderSize_max)
116436 -               return ERROR(dstSize_tooSmall);
116438 -       ZSTD_writeLE32(dst, ZSTD_MAGICNUMBER);
116439 -       op[4] = frameHeaderDecriptionByte;
116440 -       pos = 5;
116441 -       if (!singleSegment)
116442 -               op[pos++] = windowLogByte;
116443 -       switch (dictIDSizeCode) {
116444 -       default: /* impossible */
116445 -       case 0: break;
116446 -       case 1:
116447 -               op[pos] = (BYTE)(dictID);
116448 -               pos++;
116449 -               break;
116450 -       case 2:
116451 -               ZSTD_writeLE16(op + pos, (U16)dictID);
116452 -               pos += 2;
116453 -               break;
116454 -       case 3:
116455 -               ZSTD_writeLE32(op + pos, dictID);
116456 -               pos += 4;
116457 -               break;
116458 -       }
116459 -       switch (fcsCode) {
116460 -       default: /* impossible */
116461 -       case 0:
116462 -               if (singleSegment)
116463 -                       op[pos++] = (BYTE)(pledgedSrcSize);
116464 -               break;
116465 -       case 1:
116466 -               ZSTD_writeLE16(op + pos, (U16)(pledgedSrcSize - 256));
116467 -               pos += 2;
116468 -               break;
116469 -       case 2:
116470 -               ZSTD_writeLE32(op + pos, (U32)(pledgedSrcSize));
116471 -               pos += 4;
116472 -               break;
116473 -       case 3:
116474 -               ZSTD_writeLE64(op + pos, (U64)(pledgedSrcSize));
116475 -               pos += 8;
116476 -               break;
116477 -       }
116478 -       return pos;
116481 -static size_t ZSTD_compressContinue_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 frame, U32 lastFrameChunk)
116483 -       const BYTE *const ip = (const BYTE *)src;
116484 -       size_t fhSize = 0;
116486 -       if (cctx->stage == ZSTDcs_created)
116487 -               return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */
116489 -       if (frame && (cctx->stage == ZSTDcs_init)) {
116490 -               fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, cctx->frameContentSize, cctx->dictID);
116491 -               if (ZSTD_isError(fhSize))
116492 -                       return fhSize;
116493 -               dstCapacity -= fhSize;
116494 -               dst = (char *)dst + fhSize;
116495 -               cctx->stage = ZSTDcs_ongoing;
116496 -       }
116498 -       /* Check if blocks follow each other */
116499 -       if (src != cctx->nextSrc) {
116500 -               /* not contiguous */
116501 -               ptrdiff_t const delta = cctx->nextSrc - ip;
116502 -               cctx->lowLimit = cctx->dictLimit;
116503 -               cctx->dictLimit = (U32)(cctx->nextSrc - cctx->base);
116504 -               cctx->dictBase = cctx->base;
116505 -               cctx->base -= delta;
116506 -               cctx->nextToUpdate = cctx->dictLimit;
116507 -               if (cctx->dictLimit - cctx->lowLimit < HASH_READ_SIZE)
116508 -                       cctx->lowLimit = cctx->dictLimit; /* too small extDict */
116509 -       }
116511 -       /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
116512 -       if ((ip + srcSize > cctx->dictBase + cctx->lowLimit) & (ip < cctx->dictBase + cctx->dictLimit)) {
116513 -               ptrdiff_t const highInputIdx = (ip + srcSize) - cctx->dictBase;
116514 -               U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)cctx->dictLimit) ? cctx->dictLimit : (U32)highInputIdx;
116515 -               cctx->lowLimit = lowLimitMax;
116516 -       }
116518 -       cctx->nextSrc = ip + srcSize;
116520 -       if (srcSize) {
116521 -               size_t const cSize = frame ? ZSTD_compress_generic(cctx, dst, dstCapacity, src, srcSize, lastFrameChunk)
116522 -                                          : ZSTD_compressBlock_internal(cctx, dst, dstCapacity, src, srcSize);
116523 -               if (ZSTD_isError(cSize))
116524 -                       return cSize;
116525 -               return cSize + fhSize;
116526 -       } else
116527 -               return fhSize;
116530 -size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
116532 -       return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 0);
116535 -size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx) { return MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, 1 << cctx->params.cParams.windowLog); }
116537 -size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
116539 -       size_t const blockSizeMax = ZSTD_getBlockSizeMax(cctx);
116540 -       if (srcSize > blockSizeMax)
116541 -               return ERROR(srcSize_wrong);
116542 -       return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0, 0);
116545 -/*! ZSTD_loadDictionaryContent() :
116546 - *  @return : 0, or an error code
116547 - */
116548 -static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx *zc, const void *src, size_t srcSize)
116550 -       const BYTE *const ip = (const BYTE *)src;
116551 -       const BYTE *const iend = ip + srcSize;
116553 -       /* input becomes curr prefix */
116554 -       zc->lowLimit = zc->dictLimit;
116555 -       zc->dictLimit = (U32)(zc->nextSrc - zc->base);
116556 -       zc->dictBase = zc->base;
116557 -       zc->base += ip - zc->nextSrc;
116558 -       zc->nextToUpdate = zc->dictLimit;
116559 -       zc->loadedDictEnd = zc->forceWindow ? 0 : (U32)(iend - zc->base);
116561 -       zc->nextSrc = iend;
116562 -       if (srcSize <= HASH_READ_SIZE)
116563 -               return 0;
116565 -       switch (zc->params.cParams.strategy) {
116566 -       case ZSTD_fast: ZSTD_fillHashTable(zc, iend, zc->params.cParams.searchLength); break;
116568 -       case ZSTD_dfast: ZSTD_fillDoubleHashTable(zc, iend, zc->params.cParams.searchLength); break;
116570 -       case ZSTD_greedy:
116571 -       case ZSTD_lazy:
116572 -       case ZSTD_lazy2:
116573 -               if (srcSize >= HASH_READ_SIZE)
116574 -                       ZSTD_insertAndFindFirstIndex(zc, iend - HASH_READ_SIZE, zc->params.cParams.searchLength);
116575 -               break;
116577 -       case ZSTD_btlazy2:
116578 -       case ZSTD_btopt:
116579 -       case ZSTD_btopt2:
116580 -               if (srcSize >= HASH_READ_SIZE)
116581 -                       ZSTD_updateTree(zc, iend - HASH_READ_SIZE, iend, 1 << zc->params.cParams.searchLog, zc->params.cParams.searchLength);
116582 -               break;
116584 -       default:
116585 -               return ERROR(GENERIC); /* strategy doesn't exist; impossible */
116586 -       }
116588 -       zc->nextToUpdate = (U32)(iend - zc->base);
116589 -       return 0;
116592 -/* Dictionaries that assign zero probability to symbols that show up causes problems
116593 -   when FSE encoding.  Refuse dictionaries that assign zero probability to symbols
116594 -   that we may encounter during compression.
116595 -   NOTE: This behavior is not standard and could be improved in the future. */
116596 -static size_t ZSTD_checkDictNCount(short *normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue)
116598 -       U32 s;
116599 -       if (dictMaxSymbolValue < maxSymbolValue)
116600 -               return ERROR(dictionary_corrupted);
116601 -       for (s = 0; s <= maxSymbolValue; ++s) {
116602 -               if (normalizedCounter[s] == 0)
116603 -                       return ERROR(dictionary_corrupted);
116604 -       }
116605 -       return 0;
116608 -/* Dictionary format :
116609 - * See :
116610 - * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
116611 - */
116612 -/*! ZSTD_loadZstdDictionary() :
116613 - * @return : 0, or an error code
116614 - *  assumptions : magic number supposed already checked
116615 - *                dictSize supposed > 8
116616 - */
116617 -static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx *cctx, const void *dict, size_t dictSize)
116619 -       const BYTE *dictPtr = (const BYTE *)dict;
116620 -       const BYTE *const dictEnd = dictPtr + dictSize;
116621 -       short offcodeNCount[MaxOff + 1];
116622 -       unsigned offcodeMaxValue = MaxOff;
116624 -       dictPtr += 4; /* skip magic number */
116625 -       cctx->dictID = cctx->params.fParams.noDictIDFlag ? 0 : ZSTD_readLE32(dictPtr);
116626 -       dictPtr += 4;
116628 -       {
116629 -               size_t const hufHeaderSize = HUF_readCTable_wksp(cctx->hufTable, 255, dictPtr, dictEnd - dictPtr, cctx->tmpCounters, sizeof(cctx->tmpCounters));
116630 -               if (HUF_isError(hufHeaderSize))
116631 -                       return ERROR(dictionary_corrupted);
116632 -               dictPtr += hufHeaderSize;
116633 -       }
116635 -       {
116636 -               unsigned offcodeLog;
116637 -               size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd - dictPtr);
116638 -               if (FSE_isError(offcodeHeaderSize))
116639 -                       return ERROR(dictionary_corrupted);
116640 -               if (offcodeLog > OffFSELog)
116641 -                       return ERROR(dictionary_corrupted);
116642 -               /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
116643 -               CHECK_E(FSE_buildCTable_wksp(cctx->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
116644 -                       dictionary_corrupted);
116645 -               dictPtr += offcodeHeaderSize;
116646 -       }
116648 -       {
116649 -               short matchlengthNCount[MaxML + 1];
116650 -               unsigned matchlengthMaxValue = MaxML, matchlengthLog;
116651 -               size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd - dictPtr);
116652 -               if (FSE_isError(matchlengthHeaderSize))
116653 -                       return ERROR(dictionary_corrupted);
116654 -               if (matchlengthLog > MLFSELog)
116655 -                       return ERROR(dictionary_corrupted);
116656 -               /* Every match length code must have non-zero probability */
116657 -               CHECK_F(ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
116658 -               CHECK_E(
116659 -                   FSE_buildCTable_wksp(cctx->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
116660 -                   dictionary_corrupted);
116661 -               dictPtr += matchlengthHeaderSize;
116662 -       }
116664 -       {
116665 -               short litlengthNCount[MaxLL + 1];
116666 -               unsigned litlengthMaxValue = MaxLL, litlengthLog;
116667 -               size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd - dictPtr);
116668 -               if (FSE_isError(litlengthHeaderSize))
116669 -                       return ERROR(dictionary_corrupted);
116670 -               if (litlengthLog > LLFSELog)
116671 -                       return ERROR(dictionary_corrupted);
116672 -               /* Every literal length code must have non-zero probability */
116673 -               CHECK_F(ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
116674 -               CHECK_E(FSE_buildCTable_wksp(cctx->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
116675 -                       dictionary_corrupted);
116676 -               dictPtr += litlengthHeaderSize;
116677 -       }
116679 -       if (dictPtr + 12 > dictEnd)
116680 -               return ERROR(dictionary_corrupted);
116681 -       cctx->rep[0] = ZSTD_readLE32(dictPtr + 0);
116682 -       cctx->rep[1] = ZSTD_readLE32(dictPtr + 4);
116683 -       cctx->rep[2] = ZSTD_readLE32(dictPtr + 8);
116684 -       dictPtr += 12;
116686 -       {
116687 -               size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
116688 -               U32 offcodeMax = MaxOff;
116689 -               if (dictContentSize <= ((U32)-1) - 128 KB) {
116690 -                       U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
116691 -                       offcodeMax = ZSTD_highbit32(maxOffset);              /* Calculate minimum offset code required to represent maxOffset */
116692 -               }
116693 -               /* All offset values <= dictContentSize + 128 KB must be representable */
116694 -               CHECK_F(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
116695 -               /* All repCodes must be <= dictContentSize and != 0*/
116696 -               {
116697 -                       U32 u;
116698 -                       for (u = 0; u < 3; u++) {
116699 -                               if (cctx->rep[u] == 0)
116700 -                                       return ERROR(dictionary_corrupted);
116701 -                               if (cctx->rep[u] > dictContentSize)
116702 -                                       return ERROR(dictionary_corrupted);
116703 -                       }
116704 -               }
116706 -               cctx->flagStaticTables = 1;
116707 -               cctx->flagStaticHufTable = HUF_repeat_valid;
116708 -               return ZSTD_loadDictionaryContent(cctx, dictPtr, dictContentSize);
116709 -       }
116712 -/** ZSTD_compress_insertDictionary() :
116713 -*   @return : 0, or an error code */
116714 -static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx *cctx, const void *dict, size_t dictSize)
116716 -       if ((dict == NULL) || (dictSize <= 8))
116717 -               return 0;
116719 -       /* dict as pure content */
116720 -       if ((ZSTD_readLE32(dict) != ZSTD_DICT_MAGIC) || (cctx->forceRawDict))
116721 -               return ZSTD_loadDictionaryContent(cctx, dict, dictSize);
116723 -       /* dict as zstd dictionary */
116724 -       return ZSTD_loadZstdDictionary(cctx, dict, dictSize);
116727 -/*! ZSTD_compressBegin_internal() :
116728 -*   @return : 0, or an error code */
116729 -static size_t ZSTD_compressBegin_internal(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, U64 pledgedSrcSize)
116731 -       ZSTD_compResetPolicy_e const crp = dictSize ? ZSTDcrp_fullReset : ZSTDcrp_continue;
116732 -       CHECK_F(ZSTD_resetCCtx_advanced(cctx, params, pledgedSrcSize, crp));
116733 -       return ZSTD_compress_insertDictionary(cctx, dict, dictSize);
116736 -/*! ZSTD_compressBegin_advanced() :
116737 -*   @return : 0, or an error code */
116738 -size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize)
116740 -       /* compression parameters verification and optimization */
116741 -       CHECK_F(ZSTD_checkCParams(params.cParams));
116742 -       return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, pledgedSrcSize);
116745 -size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, int compressionLevel)
116747 -       ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
116748 -       return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, 0);
116751 -size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel) { return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); }
116753 -/*! ZSTD_writeEpilogue() :
116754 -*   Ends a frame.
116755 -*   @return : nb of bytes written into dst (or an error code) */
116756 -static size_t ZSTD_writeEpilogue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity)
116758 -       BYTE *const ostart = (BYTE *)dst;
116759 -       BYTE *op = ostart;
116760 -       size_t fhSize = 0;
116762 -       if (cctx->stage == ZSTDcs_created)
116763 -               return ERROR(stage_wrong); /* init missing */
116765 -       /* special case : empty frame */
116766 -       if (cctx->stage == ZSTDcs_init) {
116767 -               fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, 0, 0);
116768 -               if (ZSTD_isError(fhSize))
116769 -                       return fhSize;
116770 -               dstCapacity -= fhSize;
116771 -               op += fhSize;
116772 -               cctx->stage = ZSTDcs_ongoing;
116773 -       }
116775 -       if (cctx->stage != ZSTDcs_ending) {
116776 -               /* write one last empty block, make it the "last" block */
116777 -               U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw) << 1) + 0;
116778 -               if (dstCapacity < 4)
116779 -                       return ERROR(dstSize_tooSmall);
116780 -               ZSTD_writeLE32(op, cBlockHeader24);
116781 -               op += ZSTD_blockHeaderSize;
116782 -               dstCapacity -= ZSTD_blockHeaderSize;
116783 -       }
116785 -       if (cctx->params.fParams.checksumFlag) {
116786 -               U32 const checksum = (U32)xxh64_digest(&cctx->xxhState);
116787 -               if (dstCapacity < 4)
116788 -                       return ERROR(dstSize_tooSmall);
116789 -               ZSTD_writeLE32(op, checksum);
116790 -               op += 4;
116791 -       }
116793 -       cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
116794 -       return op - ostart;
116797 -size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
116799 -       size_t endResult;
116800 -       size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 1);
116801 -       if (ZSTD_isError(cSize))
116802 -               return cSize;
116803 -       endResult = ZSTD_writeEpilogue(cctx, (char *)dst + cSize, dstCapacity - cSize);
116804 -       if (ZSTD_isError(endResult))
116805 -               return endResult;
116806 -       return cSize + endResult;
116809 -static size_t ZSTD_compress_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
116810 -                                    ZSTD_parameters params)
116812 -       CHECK_F(ZSTD_compressBegin_internal(cctx, dict, dictSize, params, srcSize));
116813 -       return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
116816 -size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
116817 -                              ZSTD_parameters params)
116819 -       return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
116822 -size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, ZSTD_parameters params)
116824 -       return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, NULL, 0, params);
116827 -/* =====  Dictionary API  ===== */
116829 -struct ZSTD_CDict_s {
116830 -       void *dictBuffer;
116831 -       const void *dictContent;
116832 -       size_t dictContentSize;
116833 -       ZSTD_CCtx *refContext;
116834 -}; /* typedef'd tp ZSTD_CDict within "zstd.h" */
116836 -size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams) { return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CDict)); }
116838 -static ZSTD_CDict *ZSTD_createCDict_advanced(const void *dictBuffer, size_t dictSize, unsigned byReference, ZSTD_parameters params, ZSTD_customMem customMem)
116840 -       if (!customMem.customAlloc || !customMem.customFree)
116841 -               return NULL;
116843 -       {
116844 -               ZSTD_CDict *const cdict = (ZSTD_CDict *)ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
116845 -               ZSTD_CCtx *const cctx = ZSTD_createCCtx_advanced(customMem);
116847 -               if (!cdict || !cctx) {
116848 -                       ZSTD_free(cdict, customMem);
116849 -                       ZSTD_freeCCtx(cctx);
116850 -                       return NULL;
116851 -               }
116853 -               if ((byReference) || (!dictBuffer) || (!dictSize)) {
116854 -                       cdict->dictBuffer = NULL;
116855 -                       cdict->dictContent = dictBuffer;
116856 -               } else {
116857 -                       void *const internalBuffer = ZSTD_malloc(dictSize, customMem);
116858 -                       if (!internalBuffer) {
116859 -                               ZSTD_free(cctx, customMem);
116860 -                               ZSTD_free(cdict, customMem);
116861 -                               return NULL;
116862 -                       }
116863 -                       memcpy(internalBuffer, dictBuffer, dictSize);
116864 -                       cdict->dictBuffer = internalBuffer;
116865 -                       cdict->dictContent = internalBuffer;
116866 -               }
116868 -               {
116869 -                       size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0);
116870 -                       if (ZSTD_isError(errorCode)) {
116871 -                               ZSTD_free(cdict->dictBuffer, customMem);
116872 -                               ZSTD_free(cdict, customMem);
116873 -                               ZSTD_freeCCtx(cctx);
116874 -                               return NULL;
116875 -                       }
116876 -               }
116878 -               cdict->refContext = cctx;
116879 -               cdict->dictContentSize = dictSize;
116880 -               return cdict;
116881 -       }
116884 -ZSTD_CDict *ZSTD_initCDict(const void *dict, size_t dictSize, ZSTD_parameters params, void *workspace, size_t workspaceSize)
116886 -       ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
116887 -       return ZSTD_createCDict_advanced(dict, dictSize, 1, params, stackMem);
116890 -size_t ZSTD_freeCDict(ZSTD_CDict *cdict)
116892 -       if (cdict == NULL)
116893 -               return 0; /* support free on NULL */
116894 -       {
116895 -               ZSTD_customMem const cMem = cdict->refContext->customMem;
116896 -               ZSTD_freeCCtx(cdict->refContext);
116897 -               ZSTD_free(cdict->dictBuffer, cMem);
116898 -               ZSTD_free(cdict, cMem);
116899 -               return 0;
116900 -       }
116903 -static ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict *cdict) { return ZSTD_getParamsFromCCtx(cdict->refContext); }
116905 -size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict, unsigned long long pledgedSrcSize)
116907 -       if (cdict->dictContentSize)
116908 -               CHECK_F(ZSTD_copyCCtx(cctx, cdict->refContext, pledgedSrcSize))
116909 -       else {
116910 -               ZSTD_parameters params = cdict->refContext->params;
116911 -               params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
116912 -               CHECK_F(ZSTD_compressBegin_advanced(cctx, NULL, 0, params, pledgedSrcSize));
116913 -       }
116914 -       return 0;
116917 -/*! ZSTD_compress_usingCDict() :
116918 -*   Compression using a digested Dictionary.
116919 -*   Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
116920 -*   Note that compression level is decided during dictionary creation */
116921 -size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_CDict *cdict)
116923 -       CHECK_F(ZSTD_compressBegin_usingCDict(cctx, cdict, srcSize));
116925 -       if (cdict->refContext->params.fParams.contentSizeFlag == 1) {
116926 -               cctx->params.fParams.contentSizeFlag = 1;
116927 -               cctx->frameContentSize = srcSize;
116928 -       } else {
116929 -               cctx->params.fParams.contentSizeFlag = 0;
116930 -       }
116932 -       return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
116935 -/* ******************************************************************
116936 -*  Streaming
116937 -********************************************************************/
116939 -typedef enum { zcss_init, zcss_load, zcss_flush, zcss_final } ZSTD_cStreamStage;
116941 -struct ZSTD_CStream_s {
116942 -       ZSTD_CCtx *cctx;
116943 -       ZSTD_CDict *cdictLocal;
116944 -       const ZSTD_CDict *cdict;
116945 -       char *inBuff;
116946 -       size_t inBuffSize;
116947 -       size_t inToCompress;
116948 -       size_t inBuffPos;
116949 -       size_t inBuffTarget;
116950 -       size_t blockSize;
116951 -       char *outBuff;
116952 -       size_t outBuffSize;
116953 -       size_t outBuffContentSize;
116954 -       size_t outBuffFlushedSize;
116955 -       ZSTD_cStreamStage stage;
116956 -       U32 checksum;
116957 -       U32 frameEnded;
116958 -       U64 pledgedSrcSize;
116959 -       U64 inputProcessed;
116960 -       ZSTD_parameters params;
116961 -       ZSTD_customMem customMem;
116962 -}; /* typedef'd to ZSTD_CStream within "zstd.h" */
116964 -size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams)
116966 -       size_t const inBuffSize = (size_t)1 << cParams.windowLog;
116967 -       size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, inBuffSize);
116968 -       size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
116970 -       return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize);
116973 -ZSTD_CStream *ZSTD_createCStream_advanced(ZSTD_customMem customMem)
116975 -       ZSTD_CStream *zcs;
116977 -       if (!customMem.customAlloc || !customMem.customFree)
116978 -               return NULL;
116980 -       zcs = (ZSTD_CStream *)ZSTD_malloc(sizeof(ZSTD_CStream), customMem);
116981 -       if (zcs == NULL)
116982 -               return NULL;
116983 -       memset(zcs, 0, sizeof(ZSTD_CStream));
116984 -       memcpy(&zcs->customMem, &customMem, sizeof(ZSTD_customMem));
116985 -       zcs->cctx = ZSTD_createCCtx_advanced(customMem);
116986 -       if (zcs->cctx == NULL) {
116987 -               ZSTD_freeCStream(zcs);
116988 -               return NULL;
116989 -       }
116990 -       return zcs;
116993 -size_t ZSTD_freeCStream(ZSTD_CStream *zcs)
116995 -       if (zcs == NULL)
116996 -               return 0; /* support free on NULL */
116997 -       {
116998 -               ZSTD_customMem const cMem = zcs->customMem;
116999 -               ZSTD_freeCCtx(zcs->cctx);
117000 -               zcs->cctx = NULL;
117001 -               ZSTD_freeCDict(zcs->cdictLocal);
117002 -               zcs->cdictLocal = NULL;
117003 -               ZSTD_free(zcs->inBuff, cMem);
117004 -               zcs->inBuff = NULL;
117005 -               ZSTD_free(zcs->outBuff, cMem);
117006 -               zcs->outBuff = NULL;
117007 -               ZSTD_free(zcs, cMem);
117008 -               return 0;
117009 -       }
117012 -/*======   Initialization   ======*/
117014 -size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; }
117015 -size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_ABSOLUTEMAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */; }
117017 -static size_t ZSTD_resetCStream_internal(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize)
117019 -       if (zcs->inBuffSize == 0)
117020 -               return ERROR(stage_wrong); /* zcs has not been init at least once => can't reset */
117022 -       if (zcs->cdict)
117023 -               CHECK_F(ZSTD_compressBegin_usingCDict(zcs->cctx, zcs->cdict, pledgedSrcSize))
117024 -       else
117025 -               CHECK_F(ZSTD_compressBegin_advanced(zcs->cctx, NULL, 0, zcs->params, pledgedSrcSize));
117027 -       zcs->inToCompress = 0;
117028 -       zcs->inBuffPos = 0;
117029 -       zcs->inBuffTarget = zcs->blockSize;
117030 -       zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
117031 -       zcs->stage = zcss_load;
117032 -       zcs->frameEnded = 0;
117033 -       zcs->pledgedSrcSize = pledgedSrcSize;
117034 -       zcs->inputProcessed = 0;
117035 -       return 0; /* ready to go */
117038 -size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize)
117041 -       zcs->params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
117043 -       return ZSTD_resetCStream_internal(zcs, pledgedSrcSize);
117046 -static size_t ZSTD_initCStream_advanced(ZSTD_CStream *zcs, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize)
117048 -       /* allocate buffers */
117049 -       {
117050 -               size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog;
117051 -               if (zcs->inBuffSize < neededInBuffSize) {
117052 -                       zcs->inBuffSize = neededInBuffSize;
117053 -                       ZSTD_free(zcs->inBuff, zcs->customMem);
117054 -                       zcs->inBuff = (char *)ZSTD_malloc(neededInBuffSize, zcs->customMem);
117055 -                       if (zcs->inBuff == NULL)
117056 -                               return ERROR(memory_allocation);
117057 -               }
117058 -               zcs->blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, neededInBuffSize);
117059 -       }
117060 -       if (zcs->outBuffSize < ZSTD_compressBound(zcs->blockSize) + 1) {
117061 -               zcs->outBuffSize = ZSTD_compressBound(zcs->blockSize) + 1;
117062 -               ZSTD_free(zcs->outBuff, zcs->customMem);
117063 -               zcs->outBuff = (char *)ZSTD_malloc(zcs->outBuffSize, zcs->customMem);
117064 -               if (zcs->outBuff == NULL)
117065 -                       return ERROR(memory_allocation);
117066 -       }
117068 -       if (dict && dictSize >= 8) {
117069 -               ZSTD_freeCDict(zcs->cdictLocal);
117070 -               zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, 0, params, zcs->customMem);
117071 -               if (zcs->cdictLocal == NULL)
117072 -                       return ERROR(memory_allocation);
117073 -               zcs->cdict = zcs->cdictLocal;
117074 -       } else
117075 -               zcs->cdict = NULL;
117077 -       zcs->checksum = params.fParams.checksumFlag > 0;
117078 -       zcs->params = params;
117080 -       return ZSTD_resetCStream_internal(zcs, pledgedSrcSize);
117083 -ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params, unsigned long long pledgedSrcSize, void *workspace, size_t workspaceSize)
117085 -       ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
117086 -       ZSTD_CStream *const zcs = ZSTD_createCStream_advanced(stackMem);
117087 -       if (zcs) {
117088 -               size_t const code = ZSTD_initCStream_advanced(zcs, NULL, 0, params, pledgedSrcSize);
117089 -               if (ZSTD_isError(code)) {
117090 -                       return NULL;
117091 -               }
117092 -       }
117093 -       return zcs;
117096 -ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict, unsigned long long pledgedSrcSize, void *workspace, size_t workspaceSize)
117098 -       ZSTD_parameters const params = ZSTD_getParamsFromCDict(cdict);
117099 -       ZSTD_CStream *const zcs = ZSTD_initCStream(params, pledgedSrcSize, workspace, workspaceSize);
117100 -       if (zcs) {
117101 -               zcs->cdict = cdict;
117102 -               if (ZSTD_isError(ZSTD_resetCStream_internal(zcs, pledgedSrcSize))) {
117103 -                       return NULL;
117104 -               }
117105 -       }
117106 -       return zcs;
117109 -/*======   Compression   ======*/
117111 -typedef enum { zsf_gather, zsf_flush, zsf_end } ZSTD_flush_e;
117113 -ZSTD_STATIC size_t ZSTD_limitCopy(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
117115 -       size_t const length = MIN(dstCapacity, srcSize);
117116 -       memcpy(dst, src, length);
117117 -       return length;
117120 -static size_t ZSTD_compressStream_generic(ZSTD_CStream *zcs, void *dst, size_t *dstCapacityPtr, const void *src, size_t *srcSizePtr, ZSTD_flush_e const flush)
117122 -       U32 someMoreWork = 1;
117123 -       const char *const istart = (const char *)src;
117124 -       const char *const iend = istart + *srcSizePtr;
117125 -       const char *ip = istart;
117126 -       char *const ostart = (char *)dst;
117127 -       char *const oend = ostart + *dstCapacityPtr;
117128 -       char *op = ostart;
117130 -       while (someMoreWork) {
117131 -               switch (zcs->stage) {
117132 -               case zcss_init:
117133 -                       return ERROR(init_missing); /* call ZBUFF_compressInit() first ! */
117135 -               case zcss_load:
117136 -                       /* complete inBuffer */
117137 -                       {
117138 -                               size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
117139 -                               size_t const loaded = ZSTD_limitCopy(zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend - ip);
117140 -                               zcs->inBuffPos += loaded;
117141 -                               ip += loaded;
117142 -                               if ((zcs->inBuffPos == zcs->inToCompress) || (!flush && (toLoad != loaded))) {
117143 -                                       someMoreWork = 0;
117144 -                                       break; /* not enough input to get a full block : stop there, wait for more */
117145 -                               }
117146 -                       }
117147 -                       /* compress curr block (note : this stage cannot be stopped in the middle) */
117148 -                       {
117149 -                               void *cDst;
117150 -                               size_t cSize;
117151 -                               size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
117152 -                               size_t oSize = oend - op;
117153 -                               if (oSize >= ZSTD_compressBound(iSize))
117154 -                                       cDst = op; /* compress directly into output buffer (avoid flush stage) */
117155 -                               else
117156 -                                       cDst = zcs->outBuff, oSize = zcs->outBuffSize;
117157 -                               cSize = (flush == zsf_end) ? ZSTD_compressEnd(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize)
117158 -                                                          : ZSTD_compressContinue(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize);
117159 -                               if (ZSTD_isError(cSize))
117160 -                                       return cSize;
117161 -                               if (flush == zsf_end)
117162 -                                       zcs->frameEnded = 1;
117163 -                               /* prepare next block */
117164 -                               zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
117165 -                               if (zcs->inBuffTarget > zcs->inBuffSize)
117166 -                                       zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; /* note : inBuffSize >= blockSize */
117167 -                               zcs->inToCompress = zcs->inBuffPos;
117168 -                               if (cDst == op) {
117169 -                                       op += cSize;
117170 -                                       break;
117171 -                               } /* no need to flush */
117172 -                               zcs->outBuffContentSize = cSize;
117173 -                               zcs->outBuffFlushedSize = 0;
117174 -                               zcs->stage = zcss_flush; /* pass-through to flush stage */
117175 -                       }
117176 -                       fallthrough;
117178 -               case zcss_flush: {
117179 -                       size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
117180 -                       size_t const flushed = ZSTD_limitCopy(op, oend - op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
117181 -                       op += flushed;
117182 -                       zcs->outBuffFlushedSize += flushed;
117183 -                       if (toFlush != flushed) {
117184 -                               someMoreWork = 0;
117185 -                               break;
117186 -                       } /* dst too small to store flushed data : stop there */
117187 -                       zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
117188 -                       zcs->stage = zcss_load;
117189 -                       break;
117190 -               }
117192 -               case zcss_final:
117193 -                       someMoreWork = 0; /* do nothing */
117194 -                       break;
117196 -               default:
117197 -                       return ERROR(GENERIC); /* impossible */
117198 -               }
117199 -       }
117201 -       *srcSizePtr = ip - istart;
117202 -       *dstCapacityPtr = op - ostart;
117203 -       zcs->inputProcessed += *srcSizePtr;
117204 -       if (zcs->frameEnded)
117205 -               return 0;
117206 -       {
117207 -               size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos;
117208 -               if (hintInSize == 0)
117209 -                       hintInSize = zcs->blockSize;
117210 -               return hintInSize;
117211 -       }
117214 -size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output, ZSTD_inBuffer *input)
117216 -       size_t sizeRead = input->size - input->pos;
117217 -       size_t sizeWritten = output->size - output->pos;
117218 -       size_t const result =
117219 -           ZSTD_compressStream_generic(zcs, (char *)(output->dst) + output->pos, &sizeWritten, (const char *)(input->src) + input->pos, &sizeRead, zsf_gather);
117220 -       input->pos += sizeRead;
117221 -       output->pos += sizeWritten;
117222 -       return result;
117225 -/*======   Finalize   ======*/
117227 -/*! ZSTD_flushStream() :
117228 -*   @return : amount of data remaining to flush */
117229 -size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output)
117231 -       size_t srcSize = 0;
117232 -       size_t sizeWritten = output->size - output->pos;
117233 -       size_t const result = ZSTD_compressStream_generic(zcs, (char *)(output->dst) + output->pos, &sizeWritten, &srcSize,
117234 -                                                         &srcSize, /* use a valid src address instead of NULL */
117235 -                                                         zsf_flush);
117236 -       output->pos += sizeWritten;
117237 -       if (ZSTD_isError(result))
117238 -               return result;
117239 -       return zcs->outBuffContentSize - zcs->outBuffFlushedSize; /* remaining to flush */
117242 -size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output)
117244 -       BYTE *const ostart = (BYTE *)(output->dst) + output->pos;
117245 -       BYTE *const oend = (BYTE *)(output->dst) + output->size;
117246 -       BYTE *op = ostart;
117248 -       if ((zcs->pledgedSrcSize) && (zcs->inputProcessed != zcs->pledgedSrcSize))
117249 -               return ERROR(srcSize_wrong); /* pledgedSrcSize not respected */
117251 -       if (zcs->stage != zcss_final) {
117252 -               /* flush whatever remains */
117253 -               size_t srcSize = 0;
117254 -               size_t sizeWritten = output->size - output->pos;
117255 -               size_t const notEnded =
117256 -                   ZSTD_compressStream_generic(zcs, ostart, &sizeWritten, &srcSize, &srcSize, zsf_end); /* use a valid src address instead of NULL */
117257 -               size_t const remainingToFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
117258 -               op += sizeWritten;
117259 -               if (remainingToFlush) {
117260 -                       output->pos += sizeWritten;
117261 -                       return remainingToFlush + ZSTD_BLOCKHEADERSIZE /* final empty block */ + (zcs->checksum * 4);
117262 -               }
117263 -               /* create epilogue */
117264 -               zcs->stage = zcss_final;
117265 -               zcs->outBuffContentSize = !notEnded ? 0 : ZSTD_compressEnd(zcs->cctx, zcs->outBuff, zcs->outBuffSize, NULL,
117266 -                                                                          0); /* write epilogue, including final empty block, into outBuff */
117267 -       }
117269 -       /* flush epilogue */
117270 -       {
117271 -               size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
117272 -               size_t const flushed = ZSTD_limitCopy(op, oend - op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
117273 -               op += flushed;
117274 -               zcs->outBuffFlushedSize += flushed;
117275 -               output->pos += op - ostart;
117276 -               if (toFlush == flushed)
117277 -                       zcs->stage = zcss_init; /* end reached */
117278 -               return toFlush - flushed;
117279 -       }
117282 -/*-=====  Pre-defined compression levels  =====-*/
117284 -#define ZSTD_DEFAULT_CLEVEL 1
117285 -#define ZSTD_MAX_CLEVEL 22
117286 -int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
117288 -static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL + 1] = {
117289 -    {
117290 -       /* "default" */
117291 -       /* W,  C,  H,  S,  L, TL, strat */
117292 -       {18, 12, 12, 1, 7, 16, ZSTD_fast},    /* level  0 - never used */
117293 -       {19, 13, 14, 1, 7, 16, ZSTD_fast},    /* level  1 */
117294 -       {19, 15, 16, 1, 6, 16, ZSTD_fast},    /* level  2 */
117295 -       {20, 16, 17, 1, 5, 16, ZSTD_dfast},   /* level  3.*/
117296 -       {20, 18, 18, 1, 5, 16, ZSTD_dfast},   /* level  4.*/
117297 -       {20, 15, 18, 3, 5, 16, ZSTD_greedy},  /* level  5 */
117298 -       {21, 16, 19, 2, 5, 16, ZSTD_lazy},    /* level  6 */
117299 -       {21, 17, 20, 3, 5, 16, ZSTD_lazy},    /* level  7 */
117300 -       {21, 18, 20, 3, 5, 16, ZSTD_lazy2},   /* level  8 */
117301 -       {21, 20, 20, 3, 5, 16, ZSTD_lazy2},   /* level  9 */
117302 -       {21, 19, 21, 4, 5, 16, ZSTD_lazy2},   /* level 10 */
117303 -       {22, 20, 22, 4, 5, 16, ZSTD_lazy2},   /* level 11 */
117304 -       {22, 20, 22, 5, 5, 16, ZSTD_lazy2},   /* level 12 */
117305 -       {22, 21, 22, 5, 5, 16, ZSTD_lazy2},   /* level 13 */
117306 -       {22, 21, 22, 6, 5, 16, ZSTD_lazy2},   /* level 14 */
117307 -       {22, 21, 21, 5, 5, 16, ZSTD_btlazy2}, /* level 15 */
117308 -       {23, 22, 22, 5, 5, 16, ZSTD_btlazy2}, /* level 16 */
117309 -       {23, 21, 22, 4, 5, 24, ZSTD_btopt},   /* level 17 */
117310 -       {23, 23, 22, 6, 5, 32, ZSTD_btopt},   /* level 18 */
117311 -       {23, 23, 22, 6, 3, 48, ZSTD_btopt},   /* level 19 */
117312 -       {25, 25, 23, 7, 3, 64, ZSTD_btopt2},  /* level 20 */
117313 -       {26, 26, 23, 7, 3, 256, ZSTD_btopt2}, /* level 21 */
117314 -       {27, 27, 25, 9, 3, 512, ZSTD_btopt2}, /* level 22 */
117315 -    },
117316 -    {
117317 -       /* for srcSize <= 256 KB */
117318 -       /* W,  C,  H,  S,  L,  T, strat */
117319 -       {0, 0, 0, 0, 0, 0, ZSTD_fast},   /* level  0 - not used */
117320 -       {18, 13, 14, 1, 6, 8, ZSTD_fast},      /* level  1 */
117321 -       {18, 14, 13, 1, 5, 8, ZSTD_dfast},     /* level  2 */
117322 -       {18, 16, 15, 1, 5, 8, ZSTD_dfast},     /* level  3 */
117323 -       {18, 15, 17, 1, 5, 8, ZSTD_greedy},    /* level  4.*/
117324 -       {18, 16, 17, 4, 5, 8, ZSTD_greedy},    /* level  5.*/
117325 -       {18, 16, 17, 3, 5, 8, ZSTD_lazy},      /* level  6.*/
117326 -       {18, 17, 17, 4, 4, 8, ZSTD_lazy},      /* level  7 */
117327 -       {18, 17, 17, 4, 4, 8, ZSTD_lazy2},     /* level  8 */
117328 -       {18, 17, 17, 5, 4, 8, ZSTD_lazy2},     /* level  9 */
117329 -       {18, 17, 17, 6, 4, 8, ZSTD_lazy2},     /* level 10 */
117330 -       {18, 18, 17, 6, 4, 8, ZSTD_lazy2},     /* level 11.*/
117331 -       {18, 18, 17, 7, 4, 8, ZSTD_lazy2},     /* level 12.*/
117332 -       {18, 19, 17, 6, 4, 8, ZSTD_btlazy2},   /* level 13 */
117333 -       {18, 18, 18, 4, 4, 16, ZSTD_btopt},    /* level 14.*/
117334 -       {18, 18, 18, 4, 3, 16, ZSTD_btopt},    /* level 15.*/
117335 -       {18, 19, 18, 6, 3, 32, ZSTD_btopt},    /* level 16.*/
117336 -       {18, 19, 18, 8, 3, 64, ZSTD_btopt},    /* level 17.*/
117337 -       {18, 19, 18, 9, 3, 128, ZSTD_btopt},   /* level 18.*/
117338 -       {18, 19, 18, 10, 3, 256, ZSTD_btopt},  /* level 19.*/
117339 -       {18, 19, 18, 11, 3, 512, ZSTD_btopt2}, /* level 20.*/
117340 -       {18, 19, 18, 12, 3, 512, ZSTD_btopt2}, /* level 21.*/
117341 -       {18, 19, 18, 13, 3, 512, ZSTD_btopt2}, /* level 22.*/
117342 -    },
117343 -    {
117344 -       /* for srcSize <= 128 KB */
117345 -       /* W,  C,  H,  S,  L,  T, strat */
117346 -       {17, 12, 12, 1, 7, 8, ZSTD_fast},      /* level  0 - not used */
117347 -       {17, 12, 13, 1, 6, 8, ZSTD_fast},      /* level  1 */
117348 -       {17, 13, 16, 1, 5, 8, ZSTD_fast},      /* level  2 */
117349 -       {17, 16, 16, 2, 5, 8, ZSTD_dfast},     /* level  3 */
117350 -       {17, 13, 15, 3, 4, 8, ZSTD_greedy},    /* level  4 */
117351 -       {17, 15, 17, 4, 4, 8, ZSTD_greedy},    /* level  5 */
117352 -       {17, 16, 17, 3, 4, 8, ZSTD_lazy},      /* level  6 */
117353 -       {17, 15, 17, 4, 4, 8, ZSTD_lazy2},     /* level  7 */
117354 -       {17, 17, 17, 4, 4, 8, ZSTD_lazy2},     /* level  8 */
117355 -       {17, 17, 17, 5, 4, 8, ZSTD_lazy2},     /* level  9 */
117356 -       {17, 17, 17, 6, 4, 8, ZSTD_lazy2},     /* level 10 */
117357 -       {17, 17, 17, 7, 4, 8, ZSTD_lazy2},     /* level 11 */
117358 -       {17, 17, 17, 8, 4, 8, ZSTD_lazy2},     /* level 12 */
117359 -       {17, 18, 17, 6, 4, 8, ZSTD_btlazy2},   /* level 13.*/
117360 -       {17, 17, 17, 7, 3, 8, ZSTD_btopt},     /* level 14.*/
117361 -       {17, 17, 17, 7, 3, 16, ZSTD_btopt},    /* level 15.*/
117362 -       {17, 18, 17, 7, 3, 32, ZSTD_btopt},    /* level 16.*/
117363 -       {17, 18, 17, 7, 3, 64, ZSTD_btopt},    /* level 17.*/
117364 -       {17, 18, 17, 7, 3, 256, ZSTD_btopt},   /* level 18.*/
117365 -       {17, 18, 17, 8, 3, 256, ZSTD_btopt},   /* level 19.*/
117366 -       {17, 18, 17, 9, 3, 256, ZSTD_btopt2},  /* level 20.*/
117367 -       {17, 18, 17, 10, 3, 256, ZSTD_btopt2}, /* level 21.*/
117368 -       {17, 18, 17, 11, 3, 512, ZSTD_btopt2}, /* level 22.*/
117369 -    },
117370 -    {
117371 -       /* for srcSize <= 16 KB */
117372 -       /* W,  C,  H,  S,  L,  T, strat */
117373 -       {14, 12, 12, 1, 7, 6, ZSTD_fast},      /* level  0 - not used */
117374 -       {14, 14, 14, 1, 6, 6, ZSTD_fast},      /* level  1 */
117375 -       {14, 14, 14, 1, 4, 6, ZSTD_fast},      /* level  2 */
117376 -       {14, 14, 14, 1, 4, 6, ZSTD_dfast},     /* level  3.*/
117377 -       {14, 14, 14, 4, 4, 6, ZSTD_greedy},    /* level  4.*/
117378 -       {14, 14, 14, 3, 4, 6, ZSTD_lazy},      /* level  5.*/
117379 -       {14, 14, 14, 4, 4, 6, ZSTD_lazy2},     /* level  6 */
117380 -       {14, 14, 14, 5, 4, 6, ZSTD_lazy2},     /* level  7 */
117381 -       {14, 14, 14, 6, 4, 6, ZSTD_lazy2},     /* level  8.*/
117382 -       {14, 15, 14, 6, 4, 6, ZSTD_btlazy2},   /* level  9.*/
117383 -       {14, 15, 14, 3, 3, 6, ZSTD_btopt},     /* level 10.*/
117384 -       {14, 15, 14, 6, 3, 8, ZSTD_btopt},     /* level 11.*/
117385 -       {14, 15, 14, 6, 3, 16, ZSTD_btopt},    /* level 12.*/
117386 -       {14, 15, 14, 6, 3, 24, ZSTD_btopt},    /* level 13.*/
117387 -       {14, 15, 15, 6, 3, 48, ZSTD_btopt},    /* level 14.*/
117388 -       {14, 15, 15, 6, 3, 64, ZSTD_btopt},    /* level 15.*/
117389 -       {14, 15, 15, 6, 3, 96, ZSTD_btopt},    /* level 16.*/
117390 -       {14, 15, 15, 6, 3, 128, ZSTD_btopt},   /* level 17.*/
117391 -       {14, 15, 15, 6, 3, 256, ZSTD_btopt},   /* level 18.*/
117392 -       {14, 15, 15, 7, 3, 256, ZSTD_btopt},   /* level 19.*/
117393 -       {14, 15, 15, 8, 3, 256, ZSTD_btopt2},  /* level 20.*/
117394 -       {14, 15, 15, 9, 3, 256, ZSTD_btopt2},  /* level 21.*/
117395 -       {14, 15, 15, 10, 3, 256, ZSTD_btopt2}, /* level 22.*/
117396 -    },
117399 -/*! ZSTD_getCParams() :
117400 -*   @return ZSTD_compressionParameters structure for a selected compression level, `srcSize` and `dictSize`.
117401 -*   Size values are optional, provide 0 if not known or unused */
117402 -ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSize, size_t dictSize)
117404 -       ZSTD_compressionParameters cp;
117405 -       size_t const addedSize = srcSize ? 0 : 500;
117406 -       U64 const rSize = srcSize + dictSize ? srcSize + dictSize + addedSize : (U64)-1;
117407 -       U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); /* intentional underflow for srcSizeHint == 0 */
117408 -       if (compressionLevel <= 0)
117409 -               compressionLevel = ZSTD_DEFAULT_CLEVEL; /* 0 == default; no negative compressionLevel yet */
117410 -       if (compressionLevel > ZSTD_MAX_CLEVEL)
117411 -               compressionLevel = ZSTD_MAX_CLEVEL;
117412 -       cp = ZSTD_defaultCParameters[tableID][compressionLevel];
117413 -       if (ZSTD_32bits()) { /* auto-correction, for 32-bits mode */
117414 -               if (cp.windowLog > ZSTD_WINDOWLOG_MAX)
117415 -                       cp.windowLog = ZSTD_WINDOWLOG_MAX;
117416 -               if (cp.chainLog > ZSTD_CHAINLOG_MAX)
117417 -                       cp.chainLog = ZSTD_CHAINLOG_MAX;
117418 -               if (cp.hashLog > ZSTD_HASHLOG_MAX)
117419 -                       cp.hashLog = ZSTD_HASHLOG_MAX;
117420 -       }
117421 -       cp = ZSTD_adjustCParams(cp, srcSize, dictSize);
117422 -       return cp;
117425 -/*! ZSTD_getParams() :
117426 -*   same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`).
117427 -*   All fields of `ZSTD_frameParameters` are set to default (0) */
117428 -ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSize, size_t dictSize)
117430 -       ZSTD_parameters params;
117431 -       ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSize, dictSize);
117432 -       memset(&params, 0, sizeof(params));
117433 -       params.cParams = cParams;
117434 -       return params;
117437 -EXPORT_SYMBOL(ZSTD_maxCLevel);
117438 -EXPORT_SYMBOL(ZSTD_compressBound);
117440 -EXPORT_SYMBOL(ZSTD_CCtxWorkspaceBound);
117441 -EXPORT_SYMBOL(ZSTD_initCCtx);
117442 -EXPORT_SYMBOL(ZSTD_compressCCtx);
117443 -EXPORT_SYMBOL(ZSTD_compress_usingDict);
117445 -EXPORT_SYMBOL(ZSTD_CDictWorkspaceBound);
117446 -EXPORT_SYMBOL(ZSTD_initCDict);
117447 -EXPORT_SYMBOL(ZSTD_compress_usingCDict);
117449 -EXPORT_SYMBOL(ZSTD_CStreamWorkspaceBound);
117450 -EXPORT_SYMBOL(ZSTD_initCStream);
117451 -EXPORT_SYMBOL(ZSTD_initCStream_usingCDict);
117452 -EXPORT_SYMBOL(ZSTD_resetCStream);
117453 -EXPORT_SYMBOL(ZSTD_compressStream);
117454 -EXPORT_SYMBOL(ZSTD_flushStream);
117455 -EXPORT_SYMBOL(ZSTD_endStream);
117456 -EXPORT_SYMBOL(ZSTD_CStreamInSize);
117457 -EXPORT_SYMBOL(ZSTD_CStreamOutSize);
117459 -EXPORT_SYMBOL(ZSTD_getCParams);
117460 -EXPORT_SYMBOL(ZSTD_getParams);
117461 -EXPORT_SYMBOL(ZSTD_checkCParams);
117462 -EXPORT_SYMBOL(ZSTD_adjustCParams);
117464 -EXPORT_SYMBOL(ZSTD_compressBegin);
117465 -EXPORT_SYMBOL(ZSTD_compressBegin_usingDict);
117466 -EXPORT_SYMBOL(ZSTD_compressBegin_advanced);
117467 -EXPORT_SYMBOL(ZSTD_copyCCtx);
117468 -EXPORT_SYMBOL(ZSTD_compressBegin_usingCDict);
117469 -EXPORT_SYMBOL(ZSTD_compressContinue);
117470 -EXPORT_SYMBOL(ZSTD_compressEnd);
117472 -EXPORT_SYMBOL(ZSTD_getBlockSizeMax);
117473 -EXPORT_SYMBOL(ZSTD_compressBlock);
117475 -MODULE_LICENSE("Dual BSD/GPL");
117476 -MODULE_DESCRIPTION("Zstd Compressor");
117477 diff --git a/lib/zstd/compress/fse_compress.c b/lib/zstd/compress/fse_compress.c
117478 new file mode 100644
117479 index 000000000000..436985b620e5
117480 --- /dev/null
117481 +++ b/lib/zstd/compress/fse_compress.c
117482 @@ -0,0 +1,625 @@
117483 +/* ******************************************************************
117484 + * FSE : Finite State Entropy encoder
117485 + * Copyright (c) Yann Collet, Facebook, Inc.
117487 + *  You can contact the author at :
117488 + *  - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
117489 + *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
117491 + * This source code is licensed under both the BSD-style license (found in the
117492 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
117493 + * in the COPYING file in the root directory of this source tree).
117494 + * You may select, at your option, one of the above-listed licenses.
117495 +****************************************************************** */
117497 +/* **************************************************************
117498 +*  Includes
117499 +****************************************************************/
117500 +#include "../common/compiler.h"
117501 +#include "../common/mem.h"        /* U32, U16, etc. */
117502 +#include "../common/debug.h"      /* assert, DEBUGLOG */
117503 +#include "hist.h"       /* HIST_count_wksp */
117504 +#include "../common/bitstream.h"
117505 +#define FSE_STATIC_LINKING_ONLY
117506 +#include "../common/fse.h"
117507 +#include "../common/error_private.h"
117508 +#define ZSTD_DEPS_NEED_MALLOC
117509 +#define ZSTD_DEPS_NEED_MATH64
117510 +#include "../common/zstd_deps.h"  /* ZSTD_malloc, ZSTD_free, ZSTD_memcpy, ZSTD_memset */
117513 +/* **************************************************************
117514 +*  Error Management
117515 +****************************************************************/
117516 +#define FSE_isError ERR_isError
117519 +/* **************************************************************
117520 +*  Templates
117521 +****************************************************************/
117523 +  designed to be included
117524 +  for type-specific functions (template emulation in C)
117525 +  Objective is to write these functions only once, for improved maintenance
117528 +/* safety checks */
117529 +#ifndef FSE_FUNCTION_EXTENSION
117530 +#  error "FSE_FUNCTION_EXTENSION must be defined"
117531 +#endif
117532 +#ifndef FSE_FUNCTION_TYPE
117533 +#  error "FSE_FUNCTION_TYPE must be defined"
117534 +#endif
117536 +/* Function names */
117537 +#define FSE_CAT(X,Y) X##Y
117538 +#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
117539 +#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
117542 +/* Function templates */
117544 +/* FSE_buildCTable_wksp() :
117545 + * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
117546 + * wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
117547 + * workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
117548 + */
117549 +size_t FSE_buildCTable_wksp(FSE_CTable* ct,
117550 +                      const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
117551 +                            void* workSpace, size_t wkspSize)
117553 +    U32 const tableSize = 1 << tableLog;
117554 +    U32 const tableMask = tableSize - 1;
117555 +    void* const ptr = ct;
117556 +    U16* const tableU16 = ( (U16*) ptr) + 2;
117557 +    void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
117558 +    FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
117559 +    U32 const step = FSE_TABLESTEP(tableSize);
117561 +    U32* cumul = (U32*)workSpace;
117562 +    FSE_FUNCTION_TYPE* tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSymbolValue + 2));
117564 +    U32 highThreshold = tableSize-1;
117566 +    if ((size_t)workSpace & 3) return ERROR(GENERIC); /* Must be 4 byte aligned */
117567 +    if (FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) > wkspSize) return ERROR(tableLog_tooLarge);
117568 +    /* CTable header */
117569 +    tableU16[-2] = (U16) tableLog;
117570 +    tableU16[-1] = (U16) maxSymbolValue;
117571 +    assert(tableLog < 16);   /* required for threshold strategy to work */
117573 +    /* For explanations on how to distribute symbol values over the table :
117574 +     * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
117576 +     #ifdef __clang_analyzer__
117577 +     ZSTD_memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize);   /* useless initialization, just to keep scan-build happy */
117578 +     #endif
117580 +    /* symbol start positions */
117581 +    {   U32 u;
117582 +        cumul[0] = 0;
117583 +        for (u=1; u <= maxSymbolValue+1; u++) {
117584 +            if (normalizedCounter[u-1]==-1) {  /* Low proba symbol */
117585 +                cumul[u] = cumul[u-1] + 1;
117586 +                tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
117587 +            } else {
117588 +                cumul[u] = cumul[u-1] + normalizedCounter[u-1];
117589 +        }   }
117590 +        cumul[maxSymbolValue+1] = tableSize+1;
117591 +    }
117593 +    /* Spread symbols */
117594 +    {   U32 position = 0;
117595 +        U32 symbol;
117596 +        for (symbol=0; symbol<=maxSymbolValue; symbol++) {
117597 +            int nbOccurrences;
117598 +            int const freq = normalizedCounter[symbol];
117599 +            for (nbOccurrences=0; nbOccurrences<freq; nbOccurrences++) {
117600 +                tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
117601 +                position = (position + step) & tableMask;
117602 +                while (position > highThreshold)
117603 +                    position = (position + step) & tableMask;   /* Low proba area */
117604 +        }   }
117606 +        assert(position==0);  /* Must have initialized all positions */
117607 +    }
117609 +    /* Build table */
117610 +    {   U32 u; for (u=0; u<tableSize; u++) {
117611 +        FSE_FUNCTION_TYPE s = tableSymbol[u];   /* note : static analyzer may not understand tableSymbol is properly initialized */
117612 +        tableU16[cumul[s]++] = (U16) (tableSize+u);   /* TableU16 : sorted by symbol order; gives next state value */
117613 +    }   }
117615 +    /* Build Symbol Transformation Table */
117616 +    {   unsigned total = 0;
117617 +        unsigned s;
117618 +        for (s=0; s<=maxSymbolValue; s++) {
117619 +            switch (normalizedCounter[s])
117620 +            {
117621 +            case  0:
117622 +                /* filling nonetheless, for compatibility with FSE_getMaxNbBits() */
117623 +                symbolTT[s].deltaNbBits = ((tableLog+1) << 16) - (1<<tableLog);
117624 +                break;
117626 +            case -1:
117627 +            case  1:
117628 +                symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog);
117629 +                symbolTT[s].deltaFindState = total - 1;
117630 +                total ++;
117631 +                break;
117632 +            default :
117633 +                {
117634 +                    U32 const maxBitsOut = tableLog - BIT_highbit32 (normalizedCounter[s]-1);
117635 +                    U32 const minStatePlus = normalizedCounter[s] << maxBitsOut;
117636 +                    symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
117637 +                    symbolTT[s].deltaFindState = total - normalizedCounter[s];
117638 +                    total +=  normalizedCounter[s];
117639 +    }   }   }   }
117641 +#if 0  /* debug : symbol costs */
117642 +    DEBUGLOG(5, "\n --- table statistics : ");
117643 +    {   U32 symbol;
117644 +        for (symbol=0; symbol<=maxSymbolValue; symbol++) {
117645 +            DEBUGLOG(5, "%3u: w=%3i,   maxBits=%u, fracBits=%.2f",
117646 +                symbol, normalizedCounter[symbol],
117647 +                FSE_getMaxNbBits(symbolTT, symbol),
117648 +                (double)FSE_bitCost(symbolTT, tableLog, symbol, 8) / 256);
117649 +        }
117650 +    }
117651 +#endif
117653 +    return 0;
117659 +#ifndef FSE_COMMONDEFS_ONLY
117662 +/*-**************************************************************
117663 +*  FSE NCount encoding
117664 +****************************************************************/
117665 +size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
117667 +    size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog) >> 3) + 3;
117668 +    return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND;  /* maxSymbolValue==0 ? use default */
117671 +static size_t
117672 +FSE_writeNCount_generic (void* header, size_t headerBufferSize,
117673 +                   const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
117674 +                         unsigned writeIsSafe)
117676 +    BYTE* const ostart = (BYTE*) header;
117677 +    BYTE* out = ostart;
117678 +    BYTE* const oend = ostart + headerBufferSize;
117679 +    int nbBits;
117680 +    const int tableSize = 1 << tableLog;
117681 +    int remaining;
117682 +    int threshold;
117683 +    U32 bitStream = 0;
117684 +    int bitCount = 0;
117685 +    unsigned symbol = 0;
117686 +    unsigned const alphabetSize = maxSymbolValue + 1;
117687 +    int previousIs0 = 0;
117689 +    /* Table Size */
117690 +    bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount;
117691 +    bitCount  += 4;
117693 +    /* Init */
117694 +    remaining = tableSize+1;   /* +1 for extra accuracy */
117695 +    threshold = tableSize;
117696 +    nbBits = tableLog+1;
117698 +    while ((symbol < alphabetSize) && (remaining>1)) {  /* stops at 1 */
117699 +        if (previousIs0) {
117700 +            unsigned start = symbol;
117701 +            while ((symbol < alphabetSize) && !normalizedCounter[symbol]) symbol++;
117702 +            if (symbol == alphabetSize) break;   /* incorrect distribution */
117703 +            while (symbol >= start+24) {
117704 +                start+=24;
117705 +                bitStream += 0xFFFFU << bitCount;
117706 +                if ((!writeIsSafe) && (out > oend-2))
117707 +                    return ERROR(dstSize_tooSmall);   /* Buffer overflow */
117708 +                out[0] = (BYTE) bitStream;
117709 +                out[1] = (BYTE)(bitStream>>8);
117710 +                out+=2;
117711 +                bitStream>>=16;
117712 +            }
117713 +            while (symbol >= start+3) {
117714 +                start+=3;
117715 +                bitStream += 3 << bitCount;
117716 +                bitCount += 2;
117717 +            }
117718 +            bitStream += (symbol-start) << bitCount;
117719 +            bitCount += 2;
117720 +            if (bitCount>16) {
117721 +                if ((!writeIsSafe) && (out > oend - 2))
117722 +                    return ERROR(dstSize_tooSmall);   /* Buffer overflow */
117723 +                out[0] = (BYTE)bitStream;
117724 +                out[1] = (BYTE)(bitStream>>8);
117725 +                out += 2;
117726 +                bitStream >>= 16;
117727 +                bitCount -= 16;
117728 +        }   }
117729 +        {   int count = normalizedCounter[symbol++];
117730 +            int const max = (2*threshold-1) - remaining;
117731 +            remaining -= count < 0 ? -count : count;
117732 +            count++;   /* +1 for extra accuracy */
117733 +            if (count>=threshold)
117734 +                count += max;   /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
117735 +            bitStream += count << bitCount;
117736 +            bitCount  += nbBits;
117737 +            bitCount  -= (count<max);
117738 +            previousIs0  = (count==1);
117739 +            if (remaining<1) return ERROR(GENERIC);
117740 +            while (remaining<threshold) { nbBits--; threshold>>=1; }
117741 +        }
117742 +        if (bitCount>16) {
117743 +            if ((!writeIsSafe) && (out > oend - 2))
117744 +                return ERROR(dstSize_tooSmall);   /* Buffer overflow */
117745 +            out[0] = (BYTE)bitStream;
117746 +            out[1] = (BYTE)(bitStream>>8);
117747 +            out += 2;
117748 +            bitStream >>= 16;
117749 +            bitCount -= 16;
117750 +    }   }
117752 +    if (remaining != 1)
117753 +        return ERROR(GENERIC);  /* incorrect normalized distribution */
117754 +    assert(symbol <= alphabetSize);
117756 +    /* flush remaining bitStream */
117757 +    if ((!writeIsSafe) && (out > oend - 2))
117758 +        return ERROR(dstSize_tooSmall);   /* Buffer overflow */
117759 +    out[0] = (BYTE)bitStream;
117760 +    out[1] = (BYTE)(bitStream>>8);
117761 +    out+= (bitCount+7) /8;
117763 +    return (out-ostart);
117767 +size_t FSE_writeNCount (void* buffer, size_t bufferSize,
117768 +                  const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
117770 +    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);   /* Unsupported */
117771 +    if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC);   /* Unsupported */
117773 +    if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
117774 +        return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
117776 +    return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1 /* write in buffer is safe */);
117780 +/*-**************************************************************
117781 +*  FSE Compression Code
117782 +****************************************************************/
117784 +FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
117786 +    size_t size;
117787 +    if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
117788 +    size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
117789 +    return (FSE_CTable*)ZSTD_malloc(size);
117792 +void FSE_freeCTable (FSE_CTable* ct) { ZSTD_free(ct); }
117794 +/* provides the minimum logSize to safely represent a distribution */
117795 +static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
117797 +    U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1;
117798 +    U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
117799 +    U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
117800 +    assert(srcSize > 1); /* Not supported, RLE should be used instead */
117801 +    return minBits;
117804 +unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
117806 +    U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
117807 +    U32 tableLog = maxTableLog;
117808 +    U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
117809 +    assert(srcSize > 1); /* Not supported, RLE should be used instead */
117810 +    if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
117811 +    if (maxBitsSrc < tableLog) tableLog = maxBitsSrc;   /* Accuracy can be reduced */
117812 +    if (minBits > tableLog) tableLog = minBits;   /* Need a minimum to safely represent all symbol values */
117813 +    if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG;
117814 +    if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG;
117815 +    return tableLog;
117818 +unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
117820 +    return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
117823 +/* Secondary normalization method.
117824 +   To be used when primary method fails. */
117826 +static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue, short lowProbCount)
117828 +    short const NOT_YET_ASSIGNED = -2;
117829 +    U32 s;
117830 +    U32 distributed = 0;
117831 +    U32 ToDistribute;
117833 +    /* Init */
117834 +    U32 const lowThreshold = (U32)(total >> tableLog);
117835 +    U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
117837 +    for (s=0; s<=maxSymbolValue; s++) {
117838 +        if (count[s] == 0) {
117839 +            norm[s]=0;
117840 +            continue;
117841 +        }
117842 +        if (count[s] <= lowThreshold) {
117843 +            norm[s] = lowProbCount;
117844 +            distributed++;
117845 +            total -= count[s];
117846 +            continue;
117847 +        }
117848 +        if (count[s] <= lowOne) {
117849 +            norm[s] = 1;
117850 +            distributed++;
117851 +            total -= count[s];
117852 +            continue;
117853 +        }
117855 +        norm[s]=NOT_YET_ASSIGNED;
117856 +    }
117857 +    ToDistribute = (1 << tableLog) - distributed;
117859 +    if (ToDistribute == 0)
117860 +        return 0;
117862 +    if ((total / ToDistribute) > lowOne) {
117863 +        /* risk of rounding to zero */
117864 +        lowOne = (U32)((total * 3) / (ToDistribute * 2));
117865 +        for (s=0; s<=maxSymbolValue; s++) {
117866 +            if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
117867 +                norm[s] = 1;
117868 +                distributed++;
117869 +                total -= count[s];
117870 +                continue;
117871 +        }   }
117872 +        ToDistribute = (1 << tableLog) - distributed;
117873 +    }
117875 +    if (distributed == maxSymbolValue+1) {
117876 +        /* all values are pretty poor;
117877 +           probably incompressible data (should have already been detected);
117878 +           find max, then give all remaining points to max */
117879 +        U32 maxV = 0, maxC = 0;
117880 +        for (s=0; s<=maxSymbolValue; s++)
117881 +            if (count[s] > maxC) { maxV=s; maxC=count[s]; }
117882 +        norm[maxV] += (short)ToDistribute;
117883 +        return 0;
117884 +    }
117886 +    if (total == 0) {
117887 +        /* all of the symbols were low enough for the lowOne or lowThreshold */
117888 +        for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1))
117889 +            if (norm[s] > 0) { ToDistribute--; norm[s]++; }
117890 +        return 0;
117891 +    }
117893 +    {   U64 const vStepLog = 62 - tableLog;
117894 +        U64 const mid = (1ULL << (vStepLog-1)) - 1;
117895 +        U64 const rStep = ZSTD_div64((((U64)1<<vStepLog) * ToDistribute) + mid, (U32)total);   /* scale on remaining */
117896 +        U64 tmpTotal = mid;
117897 +        for (s=0; s<=maxSymbolValue; s++) {
117898 +            if (norm[s]==NOT_YET_ASSIGNED) {
117899 +                U64 const end = tmpTotal + (count[s] * rStep);
117900 +                U32 const sStart = (U32)(tmpTotal >> vStepLog);
117901 +                U32 const sEnd = (U32)(end >> vStepLog);
117902 +                U32 const weight = sEnd - sStart;
117903 +                if (weight < 1)
117904 +                    return ERROR(GENERIC);
117905 +                norm[s] = (short)weight;
117906 +                tmpTotal = end;
117907 +    }   }   }
117909 +    return 0;
117912 +size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
117913 +                           const unsigned* count, size_t total,
117914 +                           unsigned maxSymbolValue, unsigned useLowProbCount)
117916 +    /* Sanity checks */
117917 +    if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
117918 +    if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC);   /* Unsupported size */
117919 +    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);   /* Unsupported size */
117920 +    if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC);   /* Too small tableLog, compression potentially impossible */
117922 +    {   static U32 const rtbTable[] = {     0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
117923 +        short const lowProbCount = useLowProbCount ? -1 : 1;
117924 +        U64 const scale = 62 - tableLog;
117925 +        U64 const step = ZSTD_div64((U64)1<<62, (U32)total);   /* <== here, one division ! */
117926 +        U64 const vStep = 1ULL<<(scale-20);
117927 +        int stillToDistribute = 1<<tableLog;
117928 +        unsigned s;
117929 +        unsigned largest=0;
117930 +        short largestP=0;
117931 +        U32 lowThreshold = (U32)(total >> tableLog);
117933 +        for (s=0; s<=maxSymbolValue; s++) {
117934 +            if (count[s] == total) return 0;   /* rle special case */
117935 +            if (count[s] == 0) { normalizedCounter[s]=0; continue; }
117936 +            if (count[s] <= lowThreshold) {
117937 +                normalizedCounter[s] = lowProbCount;
117938 +                stillToDistribute--;
117939 +            } else {
117940 +                short proba = (short)((count[s]*step) >> scale);
117941 +                if (proba<8) {
117942 +                    U64 restToBeat = vStep * rtbTable[proba];
117943 +                    proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat;
117944 +                }
117945 +                if (proba > largestP) { largestP=proba; largest=s; }
117946 +                normalizedCounter[s] = proba;
117947 +                stillToDistribute -= proba;
117948 +        }   }
117949 +        if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
117950 +            /* corner case, need another normalization method */
117951 +            size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue, lowProbCount);
117952 +            if (FSE_isError(errorCode)) return errorCode;
117953 +        }
117954 +        else normalizedCounter[largest] += (short)stillToDistribute;
117955 +    }
117957 +#if 0
117958 +    {   /* Print Table (debug) */
117959 +        U32 s;
117960 +        U32 nTotal = 0;
117961 +        for (s=0; s<=maxSymbolValue; s++)
117962 +            RAWLOG(2, "%3i: %4i \n", s, normalizedCounter[s]);
117963 +        for (s=0; s<=maxSymbolValue; s++)
117964 +            nTotal += abs(normalizedCounter[s]);
117965 +        if (nTotal != (1U<<tableLog))
117966 +            RAWLOG(2, "Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
117967 +        getchar();
117968 +    }
117969 +#endif
117971 +    return tableLog;
117975 +/* fake FSE_CTable, for raw (uncompressed) input */
117976 +size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits)
117978 +    const unsigned tableSize = 1 << nbBits;
117979 +    const unsigned tableMask = tableSize - 1;
117980 +    const unsigned maxSymbolValue = tableMask;
117981 +    void* const ptr = ct;
117982 +    U16* const tableU16 = ( (U16*) ptr) + 2;
117983 +    void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1);   /* assumption : tableLog >= 1 */
117984 +    FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
117985 +    unsigned s;
117987 +    /* Sanity checks */
117988 +    if (nbBits < 1) return ERROR(GENERIC);             /* min size */
117990 +    /* header */
117991 +    tableU16[-2] = (U16) nbBits;
117992 +    tableU16[-1] = (U16) maxSymbolValue;
117994 +    /* Build table */
117995 +    for (s=0; s<tableSize; s++)
117996 +        tableU16[s] = (U16)(tableSize + s);
117998 +    /* Build Symbol Transformation Table */
117999 +    {   const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
118000 +        for (s=0; s<=maxSymbolValue; s++) {
118001 +            symbolTT[s].deltaNbBits = deltaNbBits;
118002 +            symbolTT[s].deltaFindState = s-1;
118003 +    }   }
118005 +    return 0;
118008 +/* fake FSE_CTable, for rle input (always same symbol) */
118009 +size_t FSE_buildCTable_rle (FSE_CTable* ct, BYTE symbolValue)
118011 +    void* ptr = ct;
118012 +    U16* tableU16 = ( (U16*) ptr) + 2;
118013 +    void* FSCTptr = (U32*)ptr + 2;
118014 +    FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*) FSCTptr;
118016 +    /* header */
118017 +    tableU16[-2] = (U16) 0;
118018 +    tableU16[-1] = (U16) symbolValue;
118020 +    /* Build table */
118021 +    tableU16[0] = 0;
118022 +    tableU16[1] = 0;   /* just in case */
118024 +    /* Build Symbol Transformation Table */
118025 +    symbolTT[symbolValue].deltaNbBits = 0;
118026 +    symbolTT[symbolValue].deltaFindState = 0;
118028 +    return 0;
118032 +static size_t FSE_compress_usingCTable_generic (void* dst, size_t dstSize,
118033 +                           const void* src, size_t srcSize,
118034 +                           const FSE_CTable* ct, const unsigned fast)
118036 +    const BYTE* const istart = (const BYTE*) src;
118037 +    const BYTE* const iend = istart + srcSize;
118038 +    const BYTE* ip=iend;
118040 +    BIT_CStream_t bitC;
118041 +    FSE_CState_t CState1, CState2;
118043 +    /* init */
118044 +    if (srcSize <= 2) return 0;
118045 +    { size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
118046 +      if (FSE_isError(initError)) return 0; /* not enough space available to write a bitstream */ }
118048 +#define FSE_FLUSHBITS(s)  (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
118050 +    if (srcSize & 1) {
118051 +        FSE_initCState2(&CState1, ct, *--ip);
118052 +        FSE_initCState2(&CState2, ct, *--ip);
118053 +        FSE_encodeSymbol(&bitC, &CState1, *--ip);
118054 +        FSE_FLUSHBITS(&bitC);
118055 +    } else {
118056 +        FSE_initCState2(&CState2, ct, *--ip);
118057 +        FSE_initCState2(&CState1, ct, *--ip);
118058 +    }
118060 +    /* join to mod 4 */
118061 +    srcSize -= 2;
118062 +    if ((sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) {  /* test bit 2 */
118063 +        FSE_encodeSymbol(&bitC, &CState2, *--ip);
118064 +        FSE_encodeSymbol(&bitC, &CState1, *--ip);
118065 +        FSE_FLUSHBITS(&bitC);
118066 +    }
118068 +    /* 2 or 4 encoding per loop */
118069 +    while ( ip>istart ) {
118071 +        FSE_encodeSymbol(&bitC, &CState2, *--ip);
118073 +        if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 )   /* this test must be static */
118074 +            FSE_FLUSHBITS(&bitC);
118076 +        FSE_encodeSymbol(&bitC, &CState1, *--ip);
118078 +        if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) {  /* this test must be static */
118079 +            FSE_encodeSymbol(&bitC, &CState2, *--ip);
118080 +            FSE_encodeSymbol(&bitC, &CState1, *--ip);
118081 +        }
118083 +        FSE_FLUSHBITS(&bitC);
118084 +    }
118086 +    FSE_flushCState(&bitC, &CState2);
118087 +    FSE_flushCState(&bitC, &CState1);
118088 +    return BIT_closeCStream(&bitC);
118091 +size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
118092 +                           const void* src, size_t srcSize,
118093 +                           const FSE_CTable* ct)
118095 +    unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
118097 +    if (fast)
118098 +        return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
118099 +    else
118100 +        return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
118104 +size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
118107 +#endif   /* FSE_COMMONDEFS_ONLY */
118108 diff --git a/lib/zstd/compress/hist.c b/lib/zstd/compress/hist.c
118109 new file mode 100644
118110 index 000000000000..5fc30f766591
118111 --- /dev/null
118112 +++ b/lib/zstd/compress/hist.c
118113 @@ -0,0 +1,164 @@
118114 +/* ******************************************************************
118115 + * hist : Histogram functions
118116 + * part of Finite State Entropy project
118117 + * Copyright (c) Yann Collet, Facebook, Inc.
118119 + *  You can contact the author at :
118120 + *  - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
118121 + *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
118123 + * This source code is licensed under both the BSD-style license (found in the
118124 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
118125 + * in the COPYING file in the root directory of this source tree).
118126 + * You may select, at your option, one of the above-listed licenses.
118127 +****************************************************************** */
118129 +/* --- dependencies --- */
118130 +#include "../common/mem.h"             /* U32, BYTE, etc. */
118131 +#include "../common/debug.h"           /* assert, DEBUGLOG */
118132 +#include "../common/error_private.h"   /* ERROR */
118133 +#include "hist.h"
118136 +/* --- Error management --- */
118137 +unsigned HIST_isError(size_t code) { return ERR_isError(code); }
118139 +/*-**************************************************************
118140 + *  Histogram functions
118141 + ****************************************************************/
118142 +unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
118143 +                           const void* src, size_t srcSize)
118145 +    const BYTE* ip = (const BYTE*)src;
118146 +    const BYTE* const end = ip + srcSize;
118147 +    unsigned maxSymbolValue = *maxSymbolValuePtr;
118148 +    unsigned largestCount=0;
118150 +    ZSTD_memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
118151 +    if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
118153 +    while (ip<end) {
118154 +        assert(*ip <= maxSymbolValue);
118155 +        count[*ip++]++;
118156 +    }
118158 +    while (!count[maxSymbolValue]) maxSymbolValue--;
118159 +    *maxSymbolValuePtr = maxSymbolValue;
118161 +    {   U32 s;
118162 +        for (s=0; s<=maxSymbolValue; s++)
118163 +            if (count[s] > largestCount) largestCount = count[s];
118164 +    }
118166 +    return largestCount;
118169 +typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e;
118171 +/* HIST_count_parallel_wksp() :
118172 + * store histogram into 4 intermediate tables, recombined at the end.
118173 + * this design makes better use of OoO cpus,
118174 + * and is noticeably faster when some values are heavily repeated.
118175 + * But it needs some additional workspace for intermediate tables.
118176 + * `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32.
118177 + * @return : largest histogram frequency,
118178 + *           or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */
118179 +static size_t HIST_count_parallel_wksp(
118180 +                                unsigned* count, unsigned* maxSymbolValuePtr,
118181 +                                const void* source, size_t sourceSize,
118182 +                                HIST_checkInput_e check,
118183 +                                U32* const workSpace)
118185 +    const BYTE* ip = (const BYTE*)source;
118186 +    const BYTE* const iend = ip+sourceSize;
118187 +    size_t const countSize = (*maxSymbolValuePtr + 1) * sizeof(*count);
118188 +    unsigned max=0;
118189 +    U32* const Counting1 = workSpace;
118190 +    U32* const Counting2 = Counting1 + 256;
118191 +    U32* const Counting3 = Counting2 + 256;
118192 +    U32* const Counting4 = Counting3 + 256;
118194 +    /* safety checks */
118195 +    assert(*maxSymbolValuePtr <= 255);
118196 +    if (!sourceSize) {
118197 +        ZSTD_memset(count, 0, countSize);
118198 +        *maxSymbolValuePtr = 0;
118199 +        return 0;
118200 +    }
118201 +    ZSTD_memset(workSpace, 0, 4*256*sizeof(unsigned));
118203 +    /* by stripes of 16 bytes */
118204 +    {   U32 cached = MEM_read32(ip); ip += 4;
118205 +        while (ip < iend-15) {
118206 +            U32 c = cached; cached = MEM_read32(ip); ip += 4;
118207 +            Counting1[(BYTE) c     ]++;
118208 +            Counting2[(BYTE)(c>>8) ]++;
118209 +            Counting3[(BYTE)(c>>16)]++;
118210 +            Counting4[       c>>24 ]++;
118211 +            c = cached; cached = MEM_read32(ip); ip += 4;
118212 +            Counting1[(BYTE) c     ]++;
118213 +            Counting2[(BYTE)(c>>8) ]++;
118214 +            Counting3[(BYTE)(c>>16)]++;
118215 +            Counting4[       c>>24 ]++;
118216 +            c = cached; cached = MEM_read32(ip); ip += 4;
118217 +            Counting1[(BYTE) c     ]++;
118218 +            Counting2[(BYTE)(c>>8) ]++;
118219 +            Counting3[(BYTE)(c>>16)]++;
118220 +            Counting4[       c>>24 ]++;
118221 +            c = cached; cached = MEM_read32(ip); ip += 4;
118222 +            Counting1[(BYTE) c     ]++;
118223 +            Counting2[(BYTE)(c>>8) ]++;
118224 +            Counting3[(BYTE)(c>>16)]++;
118225 +            Counting4[       c>>24 ]++;
118226 +        }
118227 +        ip-=4;
118228 +    }
118230 +    /* finish last symbols */
118231 +    while (ip<iend) Counting1[*ip++]++;
118233 +    {   U32 s;
118234 +        for (s=0; s<256; s++) {
118235 +            Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
118236 +            if (Counting1[s] > max) max = Counting1[s];
118237 +    }   }
118239 +    {   unsigned maxSymbolValue = 255;
118240 +        while (!Counting1[maxSymbolValue]) maxSymbolValue--;
118241 +        if (check && maxSymbolValue > *maxSymbolValuePtr) return ERROR(maxSymbolValue_tooSmall);
118242 +        *maxSymbolValuePtr = maxSymbolValue;
118243 +        ZSTD_memmove(count, Counting1, countSize);   /* in case count & Counting1 are overlapping */
118244 +    }
118245 +    return (size_t)max;
118248 +/* HIST_countFast_wksp() :
118249 + * Same as HIST_countFast(), but using an externally provided scratch buffer.
118250 + * `workSpace` is a writable buffer which must be 4-bytes aligned,
118251 + * `workSpaceSize` must be >= HIST_WKSP_SIZE
118252 + */
118253 +size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
118254 +                          const void* source, size_t sourceSize,
118255 +                          void* workSpace, size_t workSpaceSize)
118257 +    if (sourceSize < 1500) /* heuristic threshold */
118258 +        return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize);
118259 +    if ((size_t)workSpace & 3) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */
118260 +    if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
118261 +    return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace);
118264 +/* HIST_count_wksp() :
118265 + * Same as HIST_count(), but using an externally provided scratch buffer.
118266 + * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
118267 +size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
118268 +                       const void* source, size_t sourceSize,
118269 +                       void* workSpace, size_t workSpaceSize)
118271 +    if ((size_t)workSpace & 3) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */
118272 +    if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
118273 +    if (*maxSymbolValuePtr < 255)
118274 +        return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace);
118275 +    *maxSymbolValuePtr = 255;
118276 +    return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize);
118278 diff --git a/lib/zstd/compress/hist.h b/lib/zstd/compress/hist.h
118279 new file mode 100644
118280 index 000000000000..228ed48a71de
118281 --- /dev/null
118282 +++ b/lib/zstd/compress/hist.h
118283 @@ -0,0 +1,75 @@
118284 +/* ******************************************************************
118285 + * hist : Histogram functions
118286 + * part of Finite State Entropy project
118287 + * Copyright (c) Yann Collet, Facebook, Inc.
118289 + *  You can contact the author at :
118290 + *  - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
118291 + *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
118293 + * This source code is licensed under both the BSD-style license (found in the
118294 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
118295 + * in the COPYING file in the root directory of this source tree).
118296 + * You may select, at your option, one of the above-listed licenses.
118297 +****************************************************************** */
118299 +/* --- dependencies --- */
118300 +#include "../common/zstd_deps.h"   /* size_t */
118303 +/* --- simple histogram functions --- */
118305 +/*! HIST_count():
118306 + *  Provides the precise count of each byte within a table 'count'.
118307 + * 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1).
118308 + *  Updates *maxSymbolValuePtr with actual largest symbol value detected.
118309 + * @return : count of the most frequent symbol (which isn't identified).
118310 + *           or an error code, which can be tested using HIST_isError().
118311 + *           note : if return == srcSize, there is only one symbol.
118312 + */
118313 +size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
118314 +                  const void* src, size_t srcSize);
118316 +unsigned HIST_isError(size_t code);  /**< tells if a return value is an error code */
118319 +/* --- advanced histogram functions --- */
118321 +#define HIST_WKSP_SIZE_U32 1024
118322 +#define HIST_WKSP_SIZE    (HIST_WKSP_SIZE_U32 * sizeof(unsigned))
118323 +/** HIST_count_wksp() :
118324 + *  Same as HIST_count(), but using an externally provided scratch buffer.
118325 + *  Benefit is this function will use very little stack space.
118326 + * `workSpace` is a writable buffer which must be 4-bytes aligned,
118327 + * `workSpaceSize` must be >= HIST_WKSP_SIZE
118328 + */
118329 +size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
118330 +                       const void* src, size_t srcSize,
118331 +                       void* workSpace, size_t workSpaceSize);
118333 +/** HIST_countFast() :
118334 + *  same as HIST_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr.
118335 + *  This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr`
118336 + */
118337 +size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
118338 +                      const void* src, size_t srcSize);
118340 +/** HIST_countFast_wksp() :
118341 + *  Same as HIST_countFast(), but using an externally provided scratch buffer.
118342 + * `workSpace` is a writable buffer which must be 4-bytes aligned,
118343 + * `workSpaceSize` must be >= HIST_WKSP_SIZE
118344 + */
118345 +size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
118346 +                           const void* src, size_t srcSize,
118347 +                           void* workSpace, size_t workSpaceSize);
118349 +/*! HIST_count_simple() :
118350 + *  Same as HIST_countFast(), this function is unsafe,
118351 + *  and will segfault if any value within `src` is `> *maxSymbolValuePtr`.
118352 + *  It is also a bit slower for large inputs.
118353 + *  However, it does not need any additional memory (not even on stack).
118354 + * @return : count of the most frequent symbol.
118355 + *  Note this function doesn't produce any error (i.e. it must succeed).
118356 + */
118357 +unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
118358 +                           const void* src, size_t srcSize);
118359 diff --git a/lib/zstd/compress/huf_compress.c b/lib/zstd/compress/huf_compress.c
118360 new file mode 100644
118361 index 000000000000..ff0e76a2e0e3
118362 --- /dev/null
118363 +++ b/lib/zstd/compress/huf_compress.c
118364 @@ -0,0 +1,901 @@
118365 +/* ******************************************************************
118366 + * Huffman encoder, part of New Generation Entropy library
118367 + * Copyright (c) Yann Collet, Facebook, Inc.
118369 + *  You can contact the author at :
118370 + *  - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
118371 + *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
118373 + * This source code is licensed under both the BSD-style license (found in the
118374 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
118375 + * in the COPYING file in the root directory of this source tree).
118376 + * You may select, at your option, one of the above-listed licenses.
118377 +****************************************************************** */
118379 +/* **************************************************************
118380 +*  Compiler specifics
118381 +****************************************************************/
118384 +/* **************************************************************
118385 +*  Includes
118386 +****************************************************************/
118387 +#include "../common/zstd_deps.h"     /* ZSTD_memcpy, ZSTD_memset */
118388 +#include "../common/compiler.h"
118389 +#include "../common/bitstream.h"
118390 +#include "hist.h"
118391 +#define FSE_STATIC_LINKING_ONLY   /* FSE_optimalTableLog_internal */
118392 +#include "../common/fse.h"        /* header compression */
118393 +#define HUF_STATIC_LINKING_ONLY
118394 +#include "../common/huf.h"
118395 +#include "../common/error_private.h"
118398 +/* **************************************************************
118399 +*  Error Management
118400 +****************************************************************/
118401 +#define HUF_isError ERR_isError
118402 +#define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)   /* use only *after* variable declarations */
118405 +/* **************************************************************
118406 +*  Utils
118407 +****************************************************************/
118408 +unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
118410 +    return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
118414 +/* *******************************************************
118415 +*  HUF : Huffman block compression
118416 +*********************************************************/
118417 +/* HUF_compressWeights() :
118418 + * Same as FSE_compress(), but dedicated to huff0's weights compression.
118419 + * The use case needs much less stack memory.
118420 + * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
118421 + */
118422 +#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
118424 +typedef struct {
118425 +    FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
118426 +    U32 scratchBuffer[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(HUF_TABLELOG_MAX, MAX_FSE_TABLELOG_FOR_HUFF_HEADER)];
118427 +    unsigned count[HUF_TABLELOG_MAX+1];
118428 +    S16 norm[HUF_TABLELOG_MAX+1];
118429 +} HUF_CompressWeightsWksp;
118431 +static size_t HUF_compressWeights(void* dst, size_t dstSize, const void* weightTable, size_t wtSize, void* workspace, size_t workspaceSize)
118433 +    BYTE* const ostart = (BYTE*) dst;
118434 +    BYTE* op = ostart;
118435 +    BYTE* const oend = ostart + dstSize;
118437 +    unsigned maxSymbolValue = HUF_TABLELOG_MAX;
118438 +    U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
118439 +    HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)workspace;
118441 +    if (workspaceSize < sizeof(HUF_CompressWeightsWksp)) return ERROR(GENERIC);
118443 +    /* init conditions */
118444 +    if (wtSize <= 1) return 0;  /* Not compressible */
118446 +    /* Scan input and build symbol stats */
118447 +    {   unsigned const maxCount = HIST_count_simple(wksp->count, &maxSymbolValue, weightTable, wtSize);   /* never fails */
118448 +        if (maxCount == wtSize) return 1;   /* only a single symbol in src : rle */
118449 +        if (maxCount == 1) return 0;        /* each symbol present maximum once => not compressible */
118450 +    }
118452 +    tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
118453 +    CHECK_F( FSE_normalizeCount(wksp->norm, tableLog, wksp->count, wtSize, maxSymbolValue, /* useLowProbCount */ 0) );
118455 +    /* Write table description header */
118456 +    {   CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), wksp->norm, maxSymbolValue, tableLog) );
118457 +        op += hSize;
118458 +    }
118460 +    /* Compress */
118461 +    CHECK_F( FSE_buildCTable_wksp(wksp->CTable, wksp->norm, maxSymbolValue, tableLog, wksp->scratchBuffer, sizeof(wksp->scratchBuffer)) );
118462 +    {   CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, wksp->CTable) );
118463 +        if (cSize == 0) return 0;   /* not enough space for compressed data */
118464 +        op += cSize;
118465 +    }
118467 +    return (size_t)(op-ostart);
118471 +typedef struct {
118472 +    HUF_CompressWeightsWksp wksp;
118473 +    BYTE bitsToWeight[HUF_TABLELOG_MAX + 1];   /* precomputed conversion table */
118474 +    BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
118475 +} HUF_WriteCTableWksp;
118477 +size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
118478 +                            const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog,
118479 +                            void* workspace, size_t workspaceSize)
118481 +    BYTE* op = (BYTE*)dst;
118482 +    U32 n;
118483 +    HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)workspace;
118485 +    /* check conditions */
118486 +    if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC);
118487 +    if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
118489 +    /* convert to weight */
118490 +    wksp->bitsToWeight[0] = 0;
118491 +    for (n=1; n<huffLog+1; n++)
118492 +        wksp->bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
118493 +    for (n=0; n<maxSymbolValue; n++)
118494 +        wksp->huffWeight[n] = wksp->bitsToWeight[CTable[n].nbBits];
118496 +    /* attempt weights compression by FSE */
118497 +    {   CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) );
118498 +        if ((hSize>1) & (hSize < maxSymbolValue/2)) {   /* FSE compressed */
118499 +            op[0] = (BYTE)hSize;
118500 +            return hSize+1;
118501 +    }   }
118503 +    /* write raw values as 4-bits (max : 15) */
118504 +    if (maxSymbolValue > (256-128)) return ERROR(GENERIC);   /* should not happen : likely means source cannot be compressed */
118505 +    if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall);   /* not enough space within dst buffer */
118506 +    op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
118507 +    wksp->huffWeight[maxSymbolValue] = 0;   /* to be sure it doesn't cause msan issue in final combination */
118508 +    for (n=0; n<maxSymbolValue; n+=2)
118509 +        op[(n/2)+1] = (BYTE)((wksp->huffWeight[n] << 4) + wksp->huffWeight[n+1]);
118510 +    return ((maxSymbolValue+1)/2) + 1;
118513 +/*! HUF_writeCTable() :
118514 +    `CTable` : Huffman tree to save, using huf representation.
118515 +    @return : size of saved CTable */
118516 +size_t HUF_writeCTable (void* dst, size_t maxDstSize,
118517 +                        const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog)
118519 +    HUF_WriteCTableWksp wksp;
118520 +    return HUF_writeCTable_wksp(dst, maxDstSize, CTable, maxSymbolValue, huffLog, &wksp, sizeof(wksp));
118524 +size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights)
118526 +    BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];   /* init not required, even though some static analyzer may complain */
118527 +    U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];   /* large enough for values from 0 to 16 */
118528 +    U32 tableLog = 0;
118529 +    U32 nbSymbols = 0;
118531 +    /* get symbol weights */
118532 +    CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
118533 +    *hasZeroWeights = (rankVal[0] > 0);
118535 +    /* check result */
118536 +    if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
118537 +    if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
118539 +    /* Prepare base value per rank */
118540 +    {   U32 n, nextRankStart = 0;
118541 +        for (n=1; n<=tableLog; n++) {
118542 +            U32 curr = nextRankStart;
118543 +            nextRankStart += (rankVal[n] << (n-1));
118544 +            rankVal[n] = curr;
118545 +    }   }
118547 +    /* fill nbBits */
118548 +    {   U32 n; for (n=0; n<nbSymbols; n++) {
118549 +            const U32 w = huffWeight[n];
118550 +            CTable[n].nbBits = (BYTE)(tableLog + 1 - w) & -(w != 0);
118551 +    }   }
118553 +    /* fill val */
118554 +    {   U16 nbPerRank[HUF_TABLELOG_MAX+2]  = {0};  /* support w=0=>n=tableLog+1 */
118555 +        U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
118556 +        { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[CTable[n].nbBits]++; }
118557 +        /* determine stating value per rank */
118558 +        valPerRank[tableLog+1] = 0;   /* for w==0 */
118559 +        {   U16 min = 0;
118560 +            U32 n; for (n=tableLog; n>0; n--) {  /* start at n=tablelog <-> w=1 */
118561 +                valPerRank[n] = min;     /* get starting value within each rank */
118562 +                min += nbPerRank[n];
118563 +                min >>= 1;
118564 +        }   }
118565 +        /* assign value within rank, symbol order */
118566 +        { U32 n; for (n=0; n<nbSymbols; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; }
118567 +    }
118569 +    *maxSymbolValuePtr = nbSymbols - 1;
118570 +    return readSize;
118573 +U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue)
118575 +    const HUF_CElt* table = (const HUF_CElt*)symbolTable;
118576 +    assert(symbolValue <= HUF_SYMBOLVALUE_MAX);
118577 +    return table[symbolValue].nbBits;
118581 +typedef struct nodeElt_s {
118582 +    U32 count;
118583 +    U16 parent;
118584 +    BYTE byte;
118585 +    BYTE nbBits;
118586 +} nodeElt;
118589 + * HUF_setMaxHeight():
118590 + * Enforces maxNbBits on the Huffman tree described in huffNode.
118592 + * It sets all nodes with nbBits > maxNbBits to be maxNbBits. Then it adjusts
118593 + * the tree to so that it is a valid canonical Huffman tree.
118595 + * @pre               The sum of the ranks of each symbol == 2^largestBits,
118596 + *                    where largestBits == huffNode[lastNonNull].nbBits.
118597 + * @post              The sum of the ranks of each symbol == 2^largestBits,
118598 + *                    where largestBits is the return value <= maxNbBits.
118600 + * @param huffNode    The Huffman tree modified in place to enforce maxNbBits.
118601 + * @param lastNonNull The symbol with the lowest count in the Huffman tree.
118602 + * @param maxNbBits   The maximum allowed number of bits, which the Huffman tree
118603 + *                    may not respect. After this function the Huffman tree will
118604 + *                    respect maxNbBits.
118605 + * @return            The maximum number of bits of the Huffman tree after adjustment,
118606 + *                    necessarily no more than maxNbBits.
118607 + */
118608 +static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
118610 +    const U32 largestBits = huffNode[lastNonNull].nbBits;
118611 +    /* early exit : no elt > maxNbBits, so the tree is already valid. */
118612 +    if (largestBits <= maxNbBits) return largestBits;
118614 +    /* there are several too large elements (at least >= 2) */
118615 +    {   int totalCost = 0;
118616 +        const U32 baseCost = 1 << (largestBits - maxNbBits);
118617 +        int n = (int)lastNonNull;
118619 +        /* Adjust any ranks > maxNbBits to maxNbBits.
118620 +         * Compute totalCost, which is how far the sum of the ranks is
118621 +         * we are over 2^largestBits after adjust the offending ranks.
118622 +         */
118623 +        while (huffNode[n].nbBits > maxNbBits) {
118624 +            totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
118625 +            huffNode[n].nbBits = (BYTE)maxNbBits;
118626 +            n--;
118627 +        }
118628 +        /* n stops at huffNode[n].nbBits <= maxNbBits */
118629 +        assert(huffNode[n].nbBits <= maxNbBits);
118630 +        /* n end at index of smallest symbol using < maxNbBits */
118631 +        while (huffNode[n].nbBits == maxNbBits) --n;
118633 +        /* renorm totalCost from 2^largestBits to 2^maxNbBits
118634 +         * note : totalCost is necessarily a multiple of baseCost */
118635 +        assert((totalCost & (baseCost - 1)) == 0);
118636 +        totalCost >>= (largestBits - maxNbBits);
118637 +        assert(totalCost > 0);
118639 +        /* repay normalized cost */
118640 +        {   U32 const noSymbol = 0xF0F0F0F0;
118641 +            U32 rankLast[HUF_TABLELOG_MAX+2];
118643 +            /* Get pos of last (smallest = lowest cum. count) symbol per rank */
118644 +            ZSTD_memset(rankLast, 0xF0, sizeof(rankLast));
118645 +            {   U32 currentNbBits = maxNbBits;
118646 +                int pos;
118647 +                for (pos=n ; pos >= 0; pos--) {
118648 +                    if (huffNode[pos].nbBits >= currentNbBits) continue;
118649 +                    currentNbBits = huffNode[pos].nbBits;   /* < maxNbBits */
118650 +                    rankLast[maxNbBits-currentNbBits] = (U32)pos;
118651 +            }   }
118653 +            while (totalCost > 0) {
118654 +                /* Try to reduce the next power of 2 above totalCost because we
118655 +                 * gain back half the rank.
118656 +                 */
118657 +                U32 nBitsToDecrease = BIT_highbit32((U32)totalCost) + 1;
118658 +                for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
118659 +                    U32 const highPos = rankLast[nBitsToDecrease];
118660 +                    U32 const lowPos = rankLast[nBitsToDecrease-1];
118661 +                    if (highPos == noSymbol) continue;
118662 +                    /* Decrease highPos if no symbols of lowPos or if it is
118663 +                     * not cheaper to remove 2 lowPos than highPos.
118664 +                     */
118665 +                    if (lowPos == noSymbol) break;
118666 +                    {   U32 const highTotal = huffNode[highPos].count;
118667 +                        U32 const lowTotal = 2 * huffNode[lowPos].count;
118668 +                        if (highTotal <= lowTotal) break;
118669 +                }   }
118670 +                /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
118671 +                assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1);
118672 +                /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
118673 +                while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
118674 +                    nBitsToDecrease++;
118675 +                assert(rankLast[nBitsToDecrease] != noSymbol);
118676 +                /* Increase the number of bits to gain back half the rank cost. */
118677 +                totalCost -= 1 << (nBitsToDecrease-1);
118678 +                huffNode[rankLast[nBitsToDecrease]].nbBits++;
118680 +                /* Fix up the new rank.
118681 +                 * If the new rank was empty, this symbol is now its smallest.
118682 +                 * Otherwise, this symbol will be the largest in the new rank so no adjustment.
118683 +                 */
118684 +                if (rankLast[nBitsToDecrease-1] == noSymbol)
118685 +                    rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease];
118686 +                /* Fix up the old rank.
118687 +                 * If the symbol was at position 0, meaning it was the highest weight symbol in the tree,
118688 +                 * it must be the only symbol in its rank, so the old rank now has no symbols.
118689 +                 * Otherwise, since the Huffman nodes are sorted by count, the previous position is now
118690 +                 * the smallest node in the rank. If the previous position belongs to a different rank,
118691 +                 * then the rank is now empty.
118692 +                 */
118693 +                if (rankLast[nBitsToDecrease] == 0)    /* special case, reached largest symbol */
118694 +                    rankLast[nBitsToDecrease] = noSymbol;
118695 +                else {
118696 +                    rankLast[nBitsToDecrease]--;
118697 +                    if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
118698 +                        rankLast[nBitsToDecrease] = noSymbol;   /* this rank is now empty */
118699 +                }
118700 +            }   /* while (totalCost > 0) */
118702 +            /* If we've removed too much weight, then we have to add it back.
118703 +             * To avoid overshooting again, we only adjust the smallest rank.
118704 +             * We take the largest nodes from the lowest rank 0 and move them
118705 +             * to rank 1. There's guaranteed to be enough rank 0 symbols because
118706 +             * TODO.
118707 +             */
118708 +            while (totalCost < 0) {  /* Sometimes, cost correction overshoot */
118709 +                /* special case : no rank 1 symbol (using maxNbBits-1);
118710 +                 * let's create one from largest rank 0 (using maxNbBits).
118711 +                 */
118712 +                if (rankLast[1] == noSymbol) {
118713 +                    while (huffNode[n].nbBits == maxNbBits) n--;
118714 +                    huffNode[n+1].nbBits--;
118715 +                    assert(n >= 0);
118716 +                    rankLast[1] = (U32)(n+1);
118717 +                    totalCost++;
118718 +                    continue;
118719 +                }
118720 +                huffNode[ rankLast[1] + 1 ].nbBits--;
118721 +                rankLast[1]++;
118722 +                totalCost ++;
118723 +            }
118724 +        }   /* repay normalized cost */
118725 +    }   /* there are several too large elements (at least >= 2) */
118727 +    return maxNbBits;
118730 +typedef struct {
118731 +    U32 base;
118732 +    U32 curr;
118733 +} rankPos;
118735 +typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
118737 +#define RANK_POSITION_TABLE_SIZE 32
118739 +typedef struct {
118740 +  huffNodeTable huffNodeTbl;
118741 +  rankPos rankPosition[RANK_POSITION_TABLE_SIZE];
118742 +} HUF_buildCTable_wksp_tables;
118745 + * HUF_sort():
118746 + * Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order.
118748 + * @param[out] huffNode       Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled.
118749 + *                            Must have (maxSymbolValue + 1) entries.
118750 + * @param[in]  count          Histogram of the symbols.
118751 + * @param[in]  maxSymbolValue Maximum symbol value.
118752 + * @param      rankPosition   This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries.
118753 + */
118754 +static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue, rankPos* rankPosition)
118756 +    int n;
118757 +    int const maxSymbolValue1 = (int)maxSymbolValue + 1;
118759 +    /* Compute base and set curr to base.
118760 +     * For symbol s let lowerRank = BIT_highbit32(count[n]+1) and rank = lowerRank + 1.
118761 +     * Then 2^lowerRank <= count[n]+1 <= 2^rank.
118762 +     * We attribute each symbol to lowerRank's base value, because we want to know where
118763 +     * each rank begins in the output, so for rank R we want to count ranks R+1 and above.
118764 +     */
118765 +    ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE);
118766 +    for (n = 0; n < maxSymbolValue1; ++n) {
118767 +        U32 lowerRank = BIT_highbit32(count[n] + 1);
118768 +        rankPosition[lowerRank].base++;
118769 +    }
118770 +    assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0);
118771 +    for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) {
118772 +        rankPosition[n-1].base += rankPosition[n].base;
118773 +        rankPosition[n-1].curr = rankPosition[n-1].base;
118774 +    }
118775 +    /* Sort */
118776 +    for (n = 0; n < maxSymbolValue1; ++n) {
118777 +        U32 const c = count[n];
118778 +        U32 const r = BIT_highbit32(c+1) + 1;
118779 +        U32 pos = rankPosition[r].curr++;
118780 +        /* Insert into the correct position in the rank.
118781 +         * We have at most 256 symbols, so this insertion should be fine.
118782 +         */
118783 +        while ((pos > rankPosition[r].base) && (c > huffNode[pos-1].count)) {
118784 +            huffNode[pos] = huffNode[pos-1];
118785 +            pos--;
118786 +        }
118787 +        huffNode[pos].count = c;
118788 +        huffNode[pos].byte  = (BYTE)n;
118789 +    }
118793 +/** HUF_buildCTable_wksp() :
118794 + *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
118795 + *  `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables).
118796 + */
118797 +#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
118799 +/* HUF_buildTree():
118800 + * Takes the huffNode array sorted by HUF_sort() and builds an unlimited-depth Huffman tree.
118802 + * @param huffNode        The array sorted by HUF_sort(). Builds the Huffman tree in this array.
118803 + * @param maxSymbolValue  The maximum symbol value.
118804 + * @return                The smallest node in the Huffman tree (by count).
118805 + */
118806 +static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue)
118808 +    nodeElt* const huffNode0 = huffNode - 1;
118809 +    int nonNullRank;
118810 +    int lowS, lowN;
118811 +    int nodeNb = STARTNODE;
118812 +    int n, nodeRoot;
118813 +    /* init for parents */
118814 +    nonNullRank = (int)maxSymbolValue;
118815 +    while(huffNode[nonNullRank].count == 0) nonNullRank--;
118816 +    lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;
118817 +    huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;
118818 +    huffNode[lowS].parent = huffNode[lowS-1].parent = (U16)nodeNb;
118819 +    nodeNb++; lowS-=2;
118820 +    for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);
118821 +    huffNode0[0].count = (U32)(1U<<31);  /* fake entry, strong barrier */
118823 +    /* create parents */
118824 +    while (nodeNb <= nodeRoot) {
118825 +        int const n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
118826 +        int const n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
118827 +        huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
118828 +        huffNode[n1].parent = huffNode[n2].parent = (U16)nodeNb;
118829 +        nodeNb++;
118830 +    }
118832 +    /* distribute weights (unlimited tree height) */
118833 +    huffNode[nodeRoot].nbBits = 0;
118834 +    for (n=nodeRoot-1; n>=STARTNODE; n--)
118835 +        huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
118836 +    for (n=0; n<=nonNullRank; n++)
118837 +        huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
118839 +    return nonNullRank;
118843 + * HUF_buildCTableFromTree():
118844 + * Build the CTable given the Huffman tree in huffNode.
118846 + * @param[out] CTable         The output Huffman CTable.
118847 + * @param      huffNode       The Huffman tree.
118848 + * @param      nonNullRank    The last and smallest node in the Huffman tree.
118849 + * @param      maxSymbolValue The maximum symbol value.
118850 + * @param      maxNbBits      The exact maximum number of bits used in the Huffman tree.
118851 + */
118852 +static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits)
118854 +    /* fill result into ctable (val, nbBits) */
118855 +    int n;
118856 +    U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
118857 +    U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
118858 +    int const alphabetSize = (int)(maxSymbolValue + 1);
118859 +    for (n=0; n<=nonNullRank; n++)
118860 +        nbPerRank[huffNode[n].nbBits]++;
118861 +    /* determine starting value per rank */
118862 +    {   U16 min = 0;
118863 +        for (n=(int)maxNbBits; n>0; n--) {
118864 +            valPerRank[n] = min;      /* get starting value within each rank */
118865 +            min += nbPerRank[n];
118866 +            min >>= 1;
118867 +    }   }
118868 +    for (n=0; n<alphabetSize; n++)
118869 +        CTable[huffNode[n].byte].nbBits = huffNode[n].nbBits;   /* push nbBits per symbol, symbol order */
118870 +    for (n=0; n<alphabetSize; n++)
118871 +        CTable[n].val = valPerRank[CTable[n].nbBits]++;   /* assign value within rank, symbol order */
118874 +size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
118876 +    HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)workSpace;
118877 +    nodeElt* const huffNode0 = wksp_tables->huffNodeTbl;
118878 +    nodeElt* const huffNode = huffNode0+1;
118879 +    int nonNullRank;
118881 +    /* safety checks */
118882 +    if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */
118883 +    if (wkspSize < sizeof(HUF_buildCTable_wksp_tables))
118884 +      return ERROR(workSpace_tooSmall);
118885 +    if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
118886 +    if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
118887 +      return ERROR(maxSymbolValue_tooLarge);
118888 +    ZSTD_memset(huffNode0, 0, sizeof(huffNodeTable));
118890 +    /* sort, decreasing order */
118891 +    HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition);
118893 +    /* build tree */
118894 +    nonNullRank = HUF_buildTree(huffNode, maxSymbolValue);
118896 +    /* enforce maxTableLog */
118897 +    maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits);
118898 +    if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC);   /* check fit into table */
118900 +    HUF_buildCTableFromTree(tree, huffNode, nonNullRank, maxSymbolValue, maxNbBits);
118902 +    return maxNbBits;
118905 +size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
118907 +    size_t nbBits = 0;
118908 +    int s;
118909 +    for (s = 0; s <= (int)maxSymbolValue; ++s) {
118910 +        nbBits += CTable[s].nbBits * count[s];
118911 +    }
118912 +    return nbBits >> 3;
118915 +int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
118916 +  int bad = 0;
118917 +  int s;
118918 +  for (s = 0; s <= (int)maxSymbolValue; ++s) {
118919 +    bad |= (count[s] != 0) & (CTable[s].nbBits == 0);
118920 +  }
118921 +  return !bad;
118924 +size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
118926 +FORCE_INLINE_TEMPLATE void
118927 +HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable)
118929 +    BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
118932 +#define HUF_FLUSHBITS(s)  BIT_flushBits(s)
118934 +#define HUF_FLUSHBITS_1(stream) \
118935 +    if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream)
118937 +#define HUF_FLUSHBITS_2(stream) \
118938 +    if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream)
118940 +FORCE_INLINE_TEMPLATE size_t
118941 +HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
118942 +                                   const void* src, size_t srcSize,
118943 +                                   const HUF_CElt* CTable)
118945 +    const BYTE* ip = (const BYTE*) src;
118946 +    BYTE* const ostart = (BYTE*)dst;
118947 +    BYTE* const oend = ostart + dstSize;
118948 +    BYTE* op = ostart;
118949 +    size_t n;
118950 +    BIT_CStream_t bitC;
118952 +    /* init */
118953 +    if (dstSize < 8) return 0;   /* not enough space to compress */
118954 +    { size_t const initErr = BIT_initCStream(&bitC, op, (size_t)(oend-op));
118955 +      if (HUF_isError(initErr)) return 0; }
118957 +    n = srcSize & ~3;  /* join to mod 4 */
118958 +    switch (srcSize & 3)
118959 +    {
118960 +        case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable);
118961 +                 HUF_FLUSHBITS_2(&bitC);
118962 +                /* fall-through */
118963 +        case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable);
118964 +                 HUF_FLUSHBITS_1(&bitC);
118965 +                /* fall-through */
118966 +        case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable);
118967 +                 HUF_FLUSHBITS(&bitC);
118968 +                /* fall-through */
118969 +        case 0 : /* fall-through */
118970 +        default: break;
118971 +    }
118973 +    for (; n>0; n-=4) {  /* note : n&3==0 at this stage */
118974 +        HUF_encodeSymbol(&bitC, ip[n- 1], CTable);
118975 +        HUF_FLUSHBITS_1(&bitC);
118976 +        HUF_encodeSymbol(&bitC, ip[n- 2], CTable);
118977 +        HUF_FLUSHBITS_2(&bitC);
118978 +        HUF_encodeSymbol(&bitC, ip[n- 3], CTable);
118979 +        HUF_FLUSHBITS_1(&bitC);
118980 +        HUF_encodeSymbol(&bitC, ip[n- 4], CTable);
118981 +        HUF_FLUSHBITS(&bitC);
118982 +    }
118984 +    return BIT_closeCStream(&bitC);
118987 +#if DYNAMIC_BMI2
118989 +static TARGET_ATTRIBUTE("bmi2") size_t
118990 +HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize,
118991 +                                   const void* src, size_t srcSize,
118992 +                                   const HUF_CElt* CTable)
118994 +    return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
118997 +static size_t
118998 +HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize,
118999 +                                      const void* src, size_t srcSize,
119000 +                                      const HUF_CElt* CTable)
119002 +    return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
119005 +static size_t
119006 +HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
119007 +                              const void* src, size_t srcSize,
119008 +                              const HUF_CElt* CTable, const int bmi2)
119010 +    if (bmi2) {
119011 +        return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable);
119012 +    }
119013 +    return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable);
119016 +#else
119018 +static size_t
119019 +HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
119020 +                              const void* src, size_t srcSize,
119021 +                              const HUF_CElt* CTable, const int bmi2)
119023 +    (void)bmi2;
119024 +    return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
119027 +#endif
119029 +size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
119031 +    return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
119035 +static size_t
119036 +HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
119037 +                              const void* src, size_t srcSize,
119038 +                              const HUF_CElt* CTable, int bmi2)
119040 +    size_t const segmentSize = (srcSize+3)/4;   /* first 3 segments */
119041 +    const BYTE* ip = (const BYTE*) src;
119042 +    const BYTE* const iend = ip + srcSize;
119043 +    BYTE* const ostart = (BYTE*) dst;
119044 +    BYTE* const oend = ostart + dstSize;
119045 +    BYTE* op = ostart;
119047 +    if (dstSize < 6 + 1 + 1 + 1 + 8) return 0;   /* minimum space to compress successfully */
119048 +    if (srcSize < 12) return 0;   /* no saving possible : too small input */
119049 +    op += 6;   /* jumpTable */
119051 +    assert(op <= oend);
119052 +    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
119053 +        if (cSize==0) return 0;
119054 +        assert(cSize <= 65535);
119055 +        MEM_writeLE16(ostart, (U16)cSize);
119056 +        op += cSize;
119057 +    }
119059 +    ip += segmentSize;
119060 +    assert(op <= oend);
119061 +    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
119062 +        if (cSize==0) return 0;
119063 +        assert(cSize <= 65535);
119064 +        MEM_writeLE16(ostart+2, (U16)cSize);
119065 +        op += cSize;
119066 +    }
119068 +    ip += segmentSize;
119069 +    assert(op <= oend);
119070 +    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
119071 +        if (cSize==0) return 0;
119072 +        assert(cSize <= 65535);
119073 +        MEM_writeLE16(ostart+4, (U16)cSize);
119074 +        op += cSize;
119075 +    }
119077 +    ip += segmentSize;
119078 +    assert(op <= oend);
119079 +    assert(ip <= iend);
119080 +    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) );
119081 +        if (cSize==0) return 0;
119082 +        op += cSize;
119083 +    }
119085 +    return (size_t)(op-ostart);
119088 +size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
119090 +    return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
119093 +typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
119095 +static size_t HUF_compressCTable_internal(
119096 +                BYTE* const ostart, BYTE* op, BYTE* const oend,
119097 +                const void* src, size_t srcSize,
119098 +                HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2)
119100 +    size_t const cSize = (nbStreams==HUF_singleStream) ?
119101 +                         HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2) :
119102 +                         HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2);
119103 +    if (HUF_isError(cSize)) { return cSize; }
119104 +    if (cSize==0) { return 0; }   /* uncompressible */
119105 +    op += cSize;
119106 +    /* check compressibility */
119107 +    assert(op >= ostart);
119108 +    if ((size_t)(op-ostart) >= srcSize-1) { return 0; }
119109 +    return (size_t)(op-ostart);
119112 +typedef struct {
119113 +    unsigned count[HUF_SYMBOLVALUE_MAX + 1];
119114 +    HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1];
119115 +    union {
119116 +        HUF_buildCTable_wksp_tables buildCTable_wksp;
119117 +        HUF_WriteCTableWksp writeCTable_wksp;
119118 +    } wksps;
119119 +} HUF_compress_tables_t;
119121 +/* HUF_compress_internal() :
119122 + * `workSpace_align4` must be aligned on 4-bytes boundaries,
119123 + * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U32 unsigned */
119124 +static size_t
119125 +HUF_compress_internal (void* dst, size_t dstSize,
119126 +                 const void* src, size_t srcSize,
119127 +                       unsigned maxSymbolValue, unsigned huffLog,
119128 +                       HUF_nbStreams_e nbStreams,
119129 +                       void* workSpace_align4, size_t wkspSize,
119130 +                       HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
119131 +                 const int bmi2)
119133 +    HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace_align4;
119134 +    BYTE* const ostart = (BYTE*)dst;
119135 +    BYTE* const oend = ostart + dstSize;
119136 +    BYTE* op = ostart;
119138 +    HUF_STATIC_ASSERT(sizeof(*table) <= HUF_WORKSPACE_SIZE);
119139 +    assert(((size_t)workSpace_align4 & 3) == 0);   /* must be aligned on 4-bytes boundaries */
119141 +    /* checks & inits */
119142 +    if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall);
119143 +    if (!srcSize) return 0;  /* Uncompressed */
119144 +    if (!dstSize) return 0;  /* cannot fit anything within dst budget */
119145 +    if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);   /* current block size limit */
119146 +    if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
119147 +    if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
119148 +    if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
119149 +    if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
119151 +    /* Heuristic : If old table is valid, use it for small inputs */
119152 +    if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
119153 +        return HUF_compressCTable_internal(ostart, op, oend,
119154 +                                           src, srcSize,
119155 +                                           nbStreams, oldHufTable, bmi2);
119156 +    }
119158 +    /* Scan input and build symbol stats */
119159 +    {   CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace_align4, wkspSize) );
119160 +        if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; }   /* single symbol, rle */
119161 +        if (largest <= (srcSize >> 7)+4) return 0;   /* heuristic : probably not compressible enough */
119162 +    }
119164 +    /* Check validity of previous table */
119165 +    if ( repeat
119166 +      && *repeat == HUF_repeat_check
119167 +      && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) {
119168 +        *repeat = HUF_repeat_none;
119169 +    }
119170 +    /* Heuristic : use existing table for small inputs */
119171 +    if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
119172 +        return HUF_compressCTable_internal(ostart, op, oend,
119173 +                                           src, srcSize,
119174 +                                           nbStreams, oldHufTable, bmi2);
119175 +    }
119177 +    /* Build Huffman Tree */
119178 +    huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
119179 +    {   size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
119180 +                                            maxSymbolValue, huffLog,
119181 +                                            &table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp));
119182 +        CHECK_F(maxBits);
119183 +        huffLog = (U32)maxBits;
119184 +        /* Zero unused symbols in CTable, so we can check it for validity */
119185 +        ZSTD_memset(table->CTable + (maxSymbolValue + 1), 0,
119186 +               sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt)));
119187 +    }
119189 +    /* Write table description header */
119190 +    {   CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog,
119191 +                                              &table->wksps.writeCTable_wksp, sizeof(table->wksps.writeCTable_wksp)) );
119192 +        /* Check if using previous huffman table is beneficial */
119193 +        if (repeat && *repeat != HUF_repeat_none) {
119194 +            size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue);
119195 +            size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue);
119196 +            if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
119197 +                return HUF_compressCTable_internal(ostart, op, oend,
119198 +                                                   src, srcSize,
119199 +                                                   nbStreams, oldHufTable, bmi2);
119200 +        }   }
119202 +        /* Use the new huffman table */
119203 +        if (hSize + 12ul >= srcSize) { return 0; }
119204 +        op += hSize;
119205 +        if (repeat) { *repeat = HUF_repeat_none; }
119206 +        if (oldHufTable)
119207 +            ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable));  /* Save new table */
119208 +    }
119209 +    return HUF_compressCTable_internal(ostart, op, oend,
119210 +                                       src, srcSize,
119211 +                                       nbStreams, table->CTable, bmi2);
119215 +size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
119216 +                      const void* src, size_t srcSize,
119217 +                      unsigned maxSymbolValue, unsigned huffLog,
119218 +                      void* workSpace, size_t wkspSize)
119220 +    return HUF_compress_internal(dst, dstSize, src, srcSize,
119221 +                                 maxSymbolValue, huffLog, HUF_singleStream,
119222 +                                 workSpace, wkspSize,
119223 +                                 NULL, NULL, 0, 0 /*bmi2*/);
119226 +size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
119227 +                      const void* src, size_t srcSize,
119228 +                      unsigned maxSymbolValue, unsigned huffLog,
119229 +                      void* workSpace, size_t wkspSize,
119230 +                      HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
119232 +    return HUF_compress_internal(dst, dstSize, src, srcSize,
119233 +                                 maxSymbolValue, huffLog, HUF_singleStream,
119234 +                                 workSpace, wkspSize, hufTable,
119235 +                                 repeat, preferRepeat, bmi2);
119238 +/* HUF_compress4X_repeat():
119239 + * compress input using 4 streams.
119240 + * provide workspace to generate compression tables */
119241 +size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
119242 +                      const void* src, size_t srcSize,
119243 +                      unsigned maxSymbolValue, unsigned huffLog,
119244 +                      void* workSpace, size_t wkspSize)
119246 +    return HUF_compress_internal(dst, dstSize, src, srcSize,
119247 +                                 maxSymbolValue, huffLog, HUF_fourStreams,
119248 +                                 workSpace, wkspSize,
119249 +                                 NULL, NULL, 0, 0 /*bmi2*/);
119252 +/* HUF_compress4X_repeat():
119253 + * compress input using 4 streams.
119254 + * re-use an existing huffman compression table */
119255 +size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
119256 +                      const void* src, size_t srcSize,
119257 +                      unsigned maxSymbolValue, unsigned huffLog,
119258 +                      void* workSpace, size_t wkspSize,
119259 +                      HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
119261 +    return HUF_compress_internal(dst, dstSize, src, srcSize,
119262 +                                 maxSymbolValue, huffLog, HUF_fourStreams,
119263 +                                 workSpace, wkspSize,
119264 +                                 hufTable, repeat, preferRepeat, bmi2);
119266 diff --git a/lib/zstd/compress/zstd_compress.c b/lib/zstd/compress/zstd_compress.c
119267 new file mode 100644
119268 index 000000000000..78aa14c50dd2
119269 --- /dev/null
119270 +++ b/lib/zstd/compress/zstd_compress.c
119271 @@ -0,0 +1,5105 @@
119273 + * Copyright (c) Yann Collet, Facebook, Inc.
119274 + * All rights reserved.
119276 + * This source code is licensed under both the BSD-style license (found in the
119277 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
119278 + * in the COPYING file in the root directory of this source tree).
119279 + * You may select, at your option, one of the above-listed licenses.
119280 + */
119282 +/*-*************************************
119283 +*  Dependencies
119284 +***************************************/
119285 +#include "../common/zstd_deps.h"  /* INT_MAX, ZSTD_memset, ZSTD_memcpy */
119286 +#include "../common/cpu.h"
119287 +#include "../common/mem.h"
119288 +#include "hist.h"           /* HIST_countFast_wksp */
119289 +#define FSE_STATIC_LINKING_ONLY   /* FSE_encodeSymbol */
119290 +#include "../common/fse.h"
119291 +#define HUF_STATIC_LINKING_ONLY
119292 +#include "../common/huf.h"
119293 +#include "zstd_compress_internal.h"
119294 +#include "zstd_compress_sequences.h"
119295 +#include "zstd_compress_literals.h"
119296 +#include "zstd_fast.h"
119297 +#include "zstd_double_fast.h"
119298 +#include "zstd_lazy.h"
119299 +#include "zstd_opt.h"
119300 +#include "zstd_ldm.h"
119301 +#include "zstd_compress_superblock.h"
119303 +/* ***************************************************************
119304 +*  Tuning parameters
119305 +*****************************************************************/
119307 + * COMPRESS_HEAPMODE :
119308 + * Select how default decompression function ZSTD_compress() allocates its context,
119309 + * on stack (0, default), or into heap (1).
119310 + * Note that functions with explicit context such as ZSTD_compressCCtx() are unaffected.
119311 + */
119314 +/*-*************************************
119315 +*  Helper functions
119316 +***************************************/
119317 +/* ZSTD_compressBound()
119318 + * Note that the result from this function is only compatible with the "normal"
119319 + * full-block strategy.
119320 + * When there are a lot of small blocks due to frequent flush in streaming mode
119321 + * the overhead of headers can make the compressed data to be larger than the
119322 + * return value of ZSTD_compressBound().
119323 + */
119324 +size_t ZSTD_compressBound(size_t srcSize) {
119325 +    return ZSTD_COMPRESSBOUND(srcSize);
119329 +/*-*************************************
119330 +*  Context memory management
119331 +***************************************/
119332 +struct ZSTD_CDict_s {
119333 +    const void* dictContent;
119334 +    size_t dictContentSize;
119335 +    ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */
119336 +    U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
119337 +    ZSTD_cwksp workspace;
119338 +    ZSTD_matchState_t matchState;
119339 +    ZSTD_compressedBlockState_t cBlockState;
119340 +    ZSTD_customMem customMem;
119341 +    U32 dictID;
119342 +    int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */
119343 +};  /* typedef'd to ZSTD_CDict within "zstd.h" */
119345 +ZSTD_CCtx* ZSTD_createCCtx(void)
119347 +    return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
119350 +static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
119352 +    assert(cctx != NULL);
119353 +    ZSTD_memset(cctx, 0, sizeof(*cctx));
119354 +    cctx->customMem = memManager;
119355 +    cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
119356 +    {   size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters);
119357 +        assert(!ZSTD_isError(err));
119358 +        (void)err;
119359 +    }
119362 +ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
119364 +    ZSTD_STATIC_ASSERT(zcss_init==0);
119365 +    ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
119366 +    if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
119367 +    {   ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_customMalloc(sizeof(ZSTD_CCtx), customMem);
119368 +        if (!cctx) return NULL;
119369 +        ZSTD_initCCtx(cctx, customMem);
119370 +        return cctx;
119371 +    }
119374 +ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize)
119376 +    ZSTD_cwksp ws;
119377 +    ZSTD_CCtx* cctx;
119378 +    if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL;  /* minimum size */
119379 +    if ((size_t)workspace & 7) return NULL;  /* must be 8-aligned */
119380 +    ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
119382 +    cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx));
119383 +    if (cctx == NULL) return NULL;
119385 +    ZSTD_memset(cctx, 0, sizeof(ZSTD_CCtx));
119386 +    ZSTD_cwksp_move(&cctx->workspace, &ws);
119387 +    cctx->staticSize = workspaceSize;
119389 +    /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
119390 +    if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
119391 +    cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
119392 +    cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
119393 +    cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE);
119394 +    cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
119395 +    return cctx;
119399 + * Clears and frees all of the dictionaries in the CCtx.
119400 + */
119401 +static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx)
119403 +    ZSTD_customFree(cctx->localDict.dictBuffer, cctx->customMem);
119404 +    ZSTD_freeCDict(cctx->localDict.cdict);
119405 +    ZSTD_memset(&cctx->localDict, 0, sizeof(cctx->localDict));
119406 +    ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));
119407 +    cctx->cdict = NULL;
119410 +static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict)
119412 +    size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0;
119413 +    size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict);
119414 +    return bufferSize + cdictSize;
119417 +static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
119419 +    assert(cctx != NULL);
119420 +    assert(cctx->staticSize == 0);
119421 +    ZSTD_clearAllDicts(cctx);
119422 +    ZSTD_cwksp_free(&cctx->workspace, cctx->customMem);
119425 +size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
119427 +    if (cctx==NULL) return 0;   /* support free on NULL */
119428 +    RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
119429 +                    "not compatible with static CCtx");
119430 +    {
119431 +        int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);
119432 +        ZSTD_freeCCtxContent(cctx);
119433 +        if (!cctxInWorkspace) {
119434 +            ZSTD_customFree(cctx, cctx->customMem);
119435 +        }
119436 +    }
119437 +    return 0;
119441 +static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
119443 +    (void)cctx;
119444 +    return 0;
119448 +size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
119450 +    if (cctx==NULL) return 0;   /* support sizeof on NULL */
119451 +    /* cctx may be in the workspace */
119452 +    return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx))
119453 +           + ZSTD_cwksp_sizeof(&cctx->workspace)
119454 +           + ZSTD_sizeof_localDict(cctx->localDict)
119455 +           + ZSTD_sizeof_mtctx(cctx);
119458 +size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
119460 +    return ZSTD_sizeof_CCtx(zcs);  /* same object */
119463 +/* private API call, for dictBuilder only */
119464 +const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
119466 +/* Returns 1 if compression parameters are such that we should
119467 + * enable long distance matching (wlog >= 27, strategy >= btopt).
119468 + * Returns 0 otherwise.
119469 + */
119470 +static U32 ZSTD_CParams_shouldEnableLdm(const ZSTD_compressionParameters* const cParams) {
119471 +    return cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27;
119474 +static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
119475 +        ZSTD_compressionParameters cParams)
119477 +    ZSTD_CCtx_params cctxParams;
119478 +    /* should not matter, as all cParams are presumed properly defined */
119479 +    ZSTD_CCtxParams_init(&cctxParams, ZSTD_CLEVEL_DEFAULT);
119480 +    cctxParams.cParams = cParams;
119482 +    if (ZSTD_CParams_shouldEnableLdm(&cParams)) {
119483 +        DEBUGLOG(4, "ZSTD_makeCCtxParamsFromCParams(): Including LDM into cctx params");
119484 +        cctxParams.ldmParams.enableLdm = 1;
119485 +        /* LDM is enabled by default for optimal parser and window size >= 128MB */
119486 +        ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams);
119487 +        assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog);
119488 +        assert(cctxParams.ldmParams.hashRateLog < 32);
119489 +    }
119491 +    assert(!ZSTD_checkCParams(cParams));
119492 +    return cctxParams;
119495 +static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
119496 +        ZSTD_customMem customMem)
119498 +    ZSTD_CCtx_params* params;
119499 +    if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
119500 +    params = (ZSTD_CCtx_params*)ZSTD_customCalloc(
119501 +            sizeof(ZSTD_CCtx_params), customMem);
119502 +    if (!params) { return NULL; }
119503 +    ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
119504 +    params->customMem = customMem;
119505 +    return params;
119508 +ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
119510 +    return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
119513 +size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
119515 +    if (params == NULL) { return 0; }
119516 +    ZSTD_customFree(params, params->customMem);
119517 +    return 0;
119520 +size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
119522 +    return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
119525 +size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
119526 +    RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
119527 +    ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
119528 +    cctxParams->compressionLevel = compressionLevel;
119529 +    cctxParams->fParams.contentSizeFlag = 1;
119530 +    return 0;
119533 +#define ZSTD_NO_CLEVEL 0
119536 + * Initializes the cctxParams from params and compressionLevel.
119537 + * @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL.
119538 + */
119539 +static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_parameters const* params, int compressionLevel)
119541 +    assert(!ZSTD_checkCParams(params->cParams));
119542 +    ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
119543 +    cctxParams->cParams = params->cParams;
119544 +    cctxParams->fParams = params->fParams;
119545 +    /* Should not matter, as all cParams are presumed properly defined.
119546 +     * But, set it for tracing anyway.
119547 +     */
119548 +    cctxParams->compressionLevel = compressionLevel;
119551 +size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
119553 +    RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
119554 +    FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
119555 +    ZSTD_CCtxParams_init_internal(cctxParams, &params, ZSTD_NO_CLEVEL);
119556 +    return 0;
119560 + * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone.
119561 + * @param param Validated zstd parameters.
119562 + */
119563 +static void ZSTD_CCtxParams_setZstdParams(
119564 +        ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)
119566 +    assert(!ZSTD_checkCParams(params->cParams));
119567 +    cctxParams->cParams = params->cParams;
119568 +    cctxParams->fParams = params->fParams;
119569 +    /* Should not matter, as all cParams are presumed properly defined.
119570 +     * But, set it for tracing anyway.
119571 +     */
119572 +    cctxParams->compressionLevel = ZSTD_NO_CLEVEL;
119575 +ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
119577 +    ZSTD_bounds bounds = { 0, 0, 0 };
119579 +    switch(param)
119580 +    {
119581 +    case ZSTD_c_compressionLevel:
119582 +        bounds.lowerBound = ZSTD_minCLevel();
119583 +        bounds.upperBound = ZSTD_maxCLevel();
119584 +        return bounds;
119586 +    case ZSTD_c_windowLog:
119587 +        bounds.lowerBound = ZSTD_WINDOWLOG_MIN;
119588 +        bounds.upperBound = ZSTD_WINDOWLOG_MAX;
119589 +        return bounds;
119591 +    case ZSTD_c_hashLog:
119592 +        bounds.lowerBound = ZSTD_HASHLOG_MIN;
119593 +        bounds.upperBound = ZSTD_HASHLOG_MAX;
119594 +        return bounds;
119596 +    case ZSTD_c_chainLog:
119597 +        bounds.lowerBound = ZSTD_CHAINLOG_MIN;
119598 +        bounds.upperBound = ZSTD_CHAINLOG_MAX;
119599 +        return bounds;
119601 +    case ZSTD_c_searchLog:
119602 +        bounds.lowerBound = ZSTD_SEARCHLOG_MIN;
119603 +        bounds.upperBound = ZSTD_SEARCHLOG_MAX;
119604 +        return bounds;
119606 +    case ZSTD_c_minMatch:
119607 +        bounds.lowerBound = ZSTD_MINMATCH_MIN;
119608 +        bounds.upperBound = ZSTD_MINMATCH_MAX;
119609 +        return bounds;
119611 +    case ZSTD_c_targetLength:
119612 +        bounds.lowerBound = ZSTD_TARGETLENGTH_MIN;
119613 +        bounds.upperBound = ZSTD_TARGETLENGTH_MAX;
119614 +        return bounds;
119616 +    case ZSTD_c_strategy:
119617 +        bounds.lowerBound = ZSTD_STRATEGY_MIN;
119618 +        bounds.upperBound = ZSTD_STRATEGY_MAX;
119619 +        return bounds;
119621 +    case ZSTD_c_contentSizeFlag:
119622 +        bounds.lowerBound = 0;
119623 +        bounds.upperBound = 1;
119624 +        return bounds;
119626 +    case ZSTD_c_checksumFlag:
119627 +        bounds.lowerBound = 0;
119628 +        bounds.upperBound = 1;
119629 +        return bounds;
119631 +    case ZSTD_c_dictIDFlag:
119632 +        bounds.lowerBound = 0;
119633 +        bounds.upperBound = 1;
119634 +        return bounds;
119636 +    case ZSTD_c_nbWorkers:
119637 +        bounds.lowerBound = 0;
119638 +        bounds.upperBound = 0;
119639 +        return bounds;
119641 +    case ZSTD_c_jobSize:
119642 +        bounds.lowerBound = 0;
119643 +        bounds.upperBound = 0;
119644 +        return bounds;
119646 +    case ZSTD_c_overlapLog:
119647 +        bounds.lowerBound = 0;
119648 +        bounds.upperBound = 0;
119649 +        return bounds;
119651 +    case ZSTD_c_enableDedicatedDictSearch:
119652 +        bounds.lowerBound = 0;
119653 +        bounds.upperBound = 1;
119654 +        return bounds;
119656 +    case ZSTD_c_enableLongDistanceMatching:
119657 +        bounds.lowerBound = 0;
119658 +        bounds.upperBound = 1;
119659 +        return bounds;
119661 +    case ZSTD_c_ldmHashLog:
119662 +        bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN;
119663 +        bounds.upperBound = ZSTD_LDM_HASHLOG_MAX;
119664 +        return bounds;
119666 +    case ZSTD_c_ldmMinMatch:
119667 +        bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN;
119668 +        bounds.upperBound = ZSTD_LDM_MINMATCH_MAX;
119669 +        return bounds;
119671 +    case ZSTD_c_ldmBucketSizeLog:
119672 +        bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN;
119673 +        bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX;
119674 +        return bounds;
119676 +    case ZSTD_c_ldmHashRateLog:
119677 +        bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN;
119678 +        bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX;
119679 +        return bounds;
119681 +    /* experimental parameters */
119682 +    case ZSTD_c_rsyncable:
119683 +        bounds.lowerBound = 0;
119684 +        bounds.upperBound = 1;
119685 +        return bounds;
119687 +    case ZSTD_c_forceMaxWindow :
119688 +        bounds.lowerBound = 0;
119689 +        bounds.upperBound = 1;
119690 +        return bounds;
119692 +    case ZSTD_c_format:
119693 +        ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
119694 +        bounds.lowerBound = ZSTD_f_zstd1;
119695 +        bounds.upperBound = ZSTD_f_zstd1_magicless;   /* note : how to ensure at compile time that this is the highest value enum ? */
119696 +        return bounds;
119698 +    case ZSTD_c_forceAttachDict:
119699 +        ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceLoad);
119700 +        bounds.lowerBound = ZSTD_dictDefaultAttach;
119701 +        bounds.upperBound = ZSTD_dictForceLoad;       /* note : how to ensure at compile time that this is the highest value enum ? */
119702 +        return bounds;
119704 +    case ZSTD_c_literalCompressionMode:
119705 +        ZSTD_STATIC_ASSERT(ZSTD_lcm_auto < ZSTD_lcm_huffman && ZSTD_lcm_huffman < ZSTD_lcm_uncompressed);
119706 +        bounds.lowerBound = ZSTD_lcm_auto;
119707 +        bounds.upperBound = ZSTD_lcm_uncompressed;
119708 +        return bounds;
119710 +    case ZSTD_c_targetCBlockSize:
119711 +        bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN;
119712 +        bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX;
119713 +        return bounds;
119715 +    case ZSTD_c_srcSizeHint:
119716 +        bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN;
119717 +        bounds.upperBound = ZSTD_SRCSIZEHINT_MAX;
119718 +        return bounds;
119720 +    case ZSTD_c_stableInBuffer:
119721 +    case ZSTD_c_stableOutBuffer:
119722 +        bounds.lowerBound = (int)ZSTD_bm_buffered;
119723 +        bounds.upperBound = (int)ZSTD_bm_stable;
119724 +        return bounds;
119726 +    case ZSTD_c_blockDelimiters:
119727 +        bounds.lowerBound = (int)ZSTD_sf_noBlockDelimiters;
119728 +        bounds.upperBound = (int)ZSTD_sf_explicitBlockDelimiters;
119729 +        return bounds;
119731 +    case ZSTD_c_validateSequences:
119732 +        bounds.lowerBound = 0;
119733 +        bounds.upperBound = 1;
119734 +        return bounds;
119736 +    default:
119737 +        bounds.error = ERROR(parameter_unsupported);
119738 +        return bounds;
119739 +    }
119742 +/* ZSTD_cParam_clampBounds:
119743 + * Clamps the value into the bounded range.
119744 + */
119745 +static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)
119747 +    ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
119748 +    if (ZSTD_isError(bounds.error)) return bounds.error;
119749 +    if (*value < bounds.lowerBound) *value = bounds.lowerBound;
119750 +    if (*value > bounds.upperBound) *value = bounds.upperBound;
119751 +    return 0;
119754 +#define BOUNDCHECK(cParam, val) { \
119755 +    RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
119756 +                    parameter_outOfBound, "Param out of bounds"); \
119760 +static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
119762 +    switch(param)
119763 +    {
119764 +    case ZSTD_c_compressionLevel:
119765 +    case ZSTD_c_hashLog:
119766 +    case ZSTD_c_chainLog:
119767 +    case ZSTD_c_searchLog:
119768 +    case ZSTD_c_minMatch:
119769 +    case ZSTD_c_targetLength:
119770 +    case ZSTD_c_strategy:
119771 +        return 1;
119773 +    case ZSTD_c_format:
119774 +    case ZSTD_c_windowLog:
119775 +    case ZSTD_c_contentSizeFlag:
119776 +    case ZSTD_c_checksumFlag:
119777 +    case ZSTD_c_dictIDFlag:
119778 +    case ZSTD_c_forceMaxWindow :
119779 +    case ZSTD_c_nbWorkers:
119780 +    case ZSTD_c_jobSize:
119781 +    case ZSTD_c_overlapLog:
119782 +    case ZSTD_c_rsyncable:
119783 +    case ZSTD_c_enableDedicatedDictSearch:
119784 +    case ZSTD_c_enableLongDistanceMatching:
119785 +    case ZSTD_c_ldmHashLog:
119786 +    case ZSTD_c_ldmMinMatch:
119787 +    case ZSTD_c_ldmBucketSizeLog:
119788 +    case ZSTD_c_ldmHashRateLog:
119789 +    case ZSTD_c_forceAttachDict:
119790 +    case ZSTD_c_literalCompressionMode:
119791 +    case ZSTD_c_targetCBlockSize:
119792 +    case ZSTD_c_srcSizeHint:
119793 +    case ZSTD_c_stableInBuffer:
119794 +    case ZSTD_c_stableOutBuffer:
119795 +    case ZSTD_c_blockDelimiters:
119796 +    case ZSTD_c_validateSequences:
119797 +    default:
119798 +        return 0;
119799 +    }
119802 +size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
119804 +    DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value);
119805 +    if (cctx->streamStage != zcss_init) {
119806 +        if (ZSTD_isUpdateAuthorized(param)) {
119807 +            cctx->cParamsChanged = 1;
119808 +        } else {
119809 +            RETURN_ERROR(stage_wrong, "can only set params in ctx init stage");
119810 +    }   }
119812 +    switch(param)
119813 +    {
119814 +    case ZSTD_c_nbWorkers:
119815 +        RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported,
119816 +                        "MT not compatible with static alloc");
119817 +        break;
119819 +    case ZSTD_c_compressionLevel:
119820 +    case ZSTD_c_windowLog:
119821 +    case ZSTD_c_hashLog:
119822 +    case ZSTD_c_chainLog:
119823 +    case ZSTD_c_searchLog:
119824 +    case ZSTD_c_minMatch:
119825 +    case ZSTD_c_targetLength:
119826 +    case ZSTD_c_strategy:
119827 +    case ZSTD_c_ldmHashRateLog:
119828 +    case ZSTD_c_format:
119829 +    case ZSTD_c_contentSizeFlag:
119830 +    case ZSTD_c_checksumFlag:
119831 +    case ZSTD_c_dictIDFlag:
119832 +    case ZSTD_c_forceMaxWindow:
119833 +    case ZSTD_c_forceAttachDict:
119834 +    case ZSTD_c_literalCompressionMode:
119835 +    case ZSTD_c_jobSize:
119836 +    case ZSTD_c_overlapLog:
119837 +    case ZSTD_c_rsyncable:
119838 +    case ZSTD_c_enableDedicatedDictSearch:
119839 +    case ZSTD_c_enableLongDistanceMatching:
119840 +    case ZSTD_c_ldmHashLog:
119841 +    case ZSTD_c_ldmMinMatch:
119842 +    case ZSTD_c_ldmBucketSizeLog:
119843 +    case ZSTD_c_targetCBlockSize:
119844 +    case ZSTD_c_srcSizeHint:
119845 +    case ZSTD_c_stableInBuffer:
119846 +    case ZSTD_c_stableOutBuffer:
119847 +    case ZSTD_c_blockDelimiters:
119848 +    case ZSTD_c_validateSequences:
119849 +        break;
119851 +    default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
119852 +    }
119853 +    return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value);
119856 +size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
119857 +                                    ZSTD_cParameter param, int value)
119859 +    DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value);
119860 +    switch(param)
119861 +    {
119862 +    case ZSTD_c_format :
119863 +        BOUNDCHECK(ZSTD_c_format, value);
119864 +        CCtxParams->format = (ZSTD_format_e)value;
119865 +        return (size_t)CCtxParams->format;
119867 +    case ZSTD_c_compressionLevel : {
119868 +        FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
119869 +        if (value == 0)
119870 +            CCtxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
119871 +        else
119872 +            CCtxParams->compressionLevel = value;
119873 +        if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel;
119874 +        return 0;  /* return type (size_t) cannot represent negative values */
119875 +    }
119877 +    case ZSTD_c_windowLog :
119878 +        if (value!=0)   /* 0 => use default */
119879 +            BOUNDCHECK(ZSTD_c_windowLog, value);
119880 +        CCtxParams->cParams.windowLog = (U32)value;
119881 +        return CCtxParams->cParams.windowLog;
119883 +    case ZSTD_c_hashLog :
119884 +        if (value!=0)   /* 0 => use default */
119885 +            BOUNDCHECK(ZSTD_c_hashLog, value);
119886 +        CCtxParams->cParams.hashLog = (U32)value;
119887 +        return CCtxParams->cParams.hashLog;
119889 +    case ZSTD_c_chainLog :
119890 +        if (value!=0)   /* 0 => use default */
119891 +            BOUNDCHECK(ZSTD_c_chainLog, value);
119892 +        CCtxParams->cParams.chainLog = (U32)value;
119893 +        return CCtxParams->cParams.chainLog;
119895 +    case ZSTD_c_searchLog :
119896 +        if (value!=0)   /* 0 => use default */
119897 +            BOUNDCHECK(ZSTD_c_searchLog, value);
119898 +        CCtxParams->cParams.searchLog = (U32)value;
119899 +        return (size_t)value;
119901 +    case ZSTD_c_minMatch :
119902 +        if (value!=0)   /* 0 => use default */
119903 +            BOUNDCHECK(ZSTD_c_minMatch, value);
119904 +        CCtxParams->cParams.minMatch = value;
119905 +        return CCtxParams->cParams.minMatch;
119907 +    case ZSTD_c_targetLength :
119908 +        BOUNDCHECK(ZSTD_c_targetLength, value);
119909 +        CCtxParams->cParams.targetLength = value;
119910 +        return CCtxParams->cParams.targetLength;
119912 +    case ZSTD_c_strategy :
119913 +        if (value!=0)   /* 0 => use default */
119914 +            BOUNDCHECK(ZSTD_c_strategy, value);
119915 +        CCtxParams->cParams.strategy = (ZSTD_strategy)value;
119916 +        return (size_t)CCtxParams->cParams.strategy;
119918 +    case ZSTD_c_contentSizeFlag :
119919 +        /* Content size written in frame header _when known_ (default:1) */
119920 +        DEBUGLOG(4, "set content size flag = %u", (value!=0));
119921 +        CCtxParams->fParams.contentSizeFlag = value != 0;
119922 +        return CCtxParams->fParams.contentSizeFlag;
119924 +    case ZSTD_c_checksumFlag :
119925 +        /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
119926 +        CCtxParams->fParams.checksumFlag = value != 0;
119927 +        return CCtxParams->fParams.checksumFlag;
119929 +    case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
119930 +        DEBUGLOG(4, "set dictIDFlag = %u", (value!=0));
119931 +        CCtxParams->fParams.noDictIDFlag = !value;
119932 +        return !CCtxParams->fParams.noDictIDFlag;
119934 +    case ZSTD_c_forceMaxWindow :
119935 +        CCtxParams->forceWindow = (value != 0);
119936 +        return CCtxParams->forceWindow;
119938 +    case ZSTD_c_forceAttachDict : {
119939 +        const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value;
119940 +        BOUNDCHECK(ZSTD_c_forceAttachDict, pref);
119941 +        CCtxParams->attachDictPref = pref;
119942 +        return CCtxParams->attachDictPref;
119943 +    }
119945 +    case ZSTD_c_literalCompressionMode : {
119946 +        const ZSTD_literalCompressionMode_e lcm = (ZSTD_literalCompressionMode_e)value;
119947 +        BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm);
119948 +        CCtxParams->literalCompressionMode = lcm;
119949 +        return CCtxParams->literalCompressionMode;
119950 +    }
119952 +    case ZSTD_c_nbWorkers :
119953 +        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
119954 +        return 0;
119956 +    case ZSTD_c_jobSize :
119957 +        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
119958 +        return 0;
119960 +    case ZSTD_c_overlapLog :
119961 +        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
119962 +        return 0;
119964 +    case ZSTD_c_rsyncable :
119965 +        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
119966 +        return 0;
119968 +    case ZSTD_c_enableDedicatedDictSearch :
119969 +        CCtxParams->enableDedicatedDictSearch = (value!=0);
119970 +        return CCtxParams->enableDedicatedDictSearch;
119972 +    case ZSTD_c_enableLongDistanceMatching :
119973 +        CCtxParams->ldmParams.enableLdm = (value!=0);
119974 +        return CCtxParams->ldmParams.enableLdm;
119976 +    case ZSTD_c_ldmHashLog :
119977 +        if (value!=0)   /* 0 ==> auto */
119978 +            BOUNDCHECK(ZSTD_c_ldmHashLog, value);
119979 +        CCtxParams->ldmParams.hashLog = value;
119980 +        return CCtxParams->ldmParams.hashLog;
119982 +    case ZSTD_c_ldmMinMatch :
119983 +        if (value!=0)   /* 0 ==> default */
119984 +            BOUNDCHECK(ZSTD_c_ldmMinMatch, value);
119985 +        CCtxParams->ldmParams.minMatchLength = value;
119986 +        return CCtxParams->ldmParams.minMatchLength;
119988 +    case ZSTD_c_ldmBucketSizeLog :
119989 +        if (value!=0)   /* 0 ==> default */
119990 +            BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value);
119991 +        CCtxParams->ldmParams.bucketSizeLog = value;
119992 +        return CCtxParams->ldmParams.bucketSizeLog;
119994 +    case ZSTD_c_ldmHashRateLog :
119995 +        RETURN_ERROR_IF(value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN,
119996 +                        parameter_outOfBound, "Param out of bounds!");
119997 +        CCtxParams->ldmParams.hashRateLog = value;
119998 +        return CCtxParams->ldmParams.hashRateLog;
120000 +    case ZSTD_c_targetCBlockSize :
120001 +        if (value!=0)   /* 0 ==> default */
120002 +            BOUNDCHECK(ZSTD_c_targetCBlockSize, value);
120003 +        CCtxParams->targetCBlockSize = value;
120004 +        return CCtxParams->targetCBlockSize;
120006 +    case ZSTD_c_srcSizeHint :
120007 +        if (value!=0)    /* 0 ==> default */
120008 +            BOUNDCHECK(ZSTD_c_srcSizeHint, value);
120009 +        CCtxParams->srcSizeHint = value;
120010 +        return CCtxParams->srcSizeHint;
120012 +    case ZSTD_c_stableInBuffer:
120013 +        BOUNDCHECK(ZSTD_c_stableInBuffer, value);
120014 +        CCtxParams->inBufferMode = (ZSTD_bufferMode_e)value;
120015 +        return CCtxParams->inBufferMode;
120017 +    case ZSTD_c_stableOutBuffer:
120018 +        BOUNDCHECK(ZSTD_c_stableOutBuffer, value);
120019 +        CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value;
120020 +        return CCtxParams->outBufferMode;
120022 +    case ZSTD_c_blockDelimiters:
120023 +        BOUNDCHECK(ZSTD_c_blockDelimiters, value);
120024 +        CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value;
120025 +        return CCtxParams->blockDelimiters;
120027 +    case ZSTD_c_validateSequences:
120028 +        BOUNDCHECK(ZSTD_c_validateSequences, value);
120029 +        CCtxParams->validateSequences = value;
120030 +        return CCtxParams->validateSequences;
120032 +    default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
120033 +    }
120036 +size_t ZSTD_CCtx_getParameter(ZSTD_CCtx const* cctx, ZSTD_cParameter param, int* value)
120038 +    return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value);
120041 +size_t ZSTD_CCtxParams_getParameter(
120042 +        ZSTD_CCtx_params const* CCtxParams, ZSTD_cParameter param, int* value)
120044 +    switch(param)
120045 +    {
120046 +    case ZSTD_c_format :
120047 +        *value = CCtxParams->format;
120048 +        break;
120049 +    case ZSTD_c_compressionLevel :
120050 +        *value = CCtxParams->compressionLevel;
120051 +        break;
120052 +    case ZSTD_c_windowLog :
120053 +        *value = (int)CCtxParams->cParams.windowLog;
120054 +        break;
120055 +    case ZSTD_c_hashLog :
120056 +        *value = (int)CCtxParams->cParams.hashLog;
120057 +        break;
120058 +    case ZSTD_c_chainLog :
120059 +        *value = (int)CCtxParams->cParams.chainLog;
120060 +        break;
120061 +    case ZSTD_c_searchLog :
120062 +        *value = CCtxParams->cParams.searchLog;
120063 +        break;
120064 +    case ZSTD_c_minMatch :
120065 +        *value = CCtxParams->cParams.minMatch;
120066 +        break;
120067 +    case ZSTD_c_targetLength :
120068 +        *value = CCtxParams->cParams.targetLength;
120069 +        break;
120070 +    case ZSTD_c_strategy :
120071 +        *value = (unsigned)CCtxParams->cParams.strategy;
120072 +        break;
120073 +    case ZSTD_c_contentSizeFlag :
120074 +        *value = CCtxParams->fParams.contentSizeFlag;
120075 +        break;
120076 +    case ZSTD_c_checksumFlag :
120077 +        *value = CCtxParams->fParams.checksumFlag;
120078 +        break;
120079 +    case ZSTD_c_dictIDFlag :
120080 +        *value = !CCtxParams->fParams.noDictIDFlag;
120081 +        break;
120082 +    case ZSTD_c_forceMaxWindow :
120083 +        *value = CCtxParams->forceWindow;
120084 +        break;
120085 +    case ZSTD_c_forceAttachDict :
120086 +        *value = CCtxParams->attachDictPref;
120087 +        break;
120088 +    case ZSTD_c_literalCompressionMode :
120089 +        *value = CCtxParams->literalCompressionMode;
120090 +        break;
120091 +    case ZSTD_c_nbWorkers :
120092 +        assert(CCtxParams->nbWorkers == 0);
120093 +        *value = CCtxParams->nbWorkers;
120094 +        break;
120095 +    case ZSTD_c_jobSize :
120096 +        RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
120097 +    case ZSTD_c_overlapLog :
120098 +        RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
120099 +    case ZSTD_c_rsyncable :
120100 +        RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
120101 +    case ZSTD_c_enableDedicatedDictSearch :
120102 +        *value = CCtxParams->enableDedicatedDictSearch;
120103 +        break;
120104 +    case ZSTD_c_enableLongDistanceMatching :
120105 +        *value = CCtxParams->ldmParams.enableLdm;
120106 +        break;
120107 +    case ZSTD_c_ldmHashLog :
120108 +        *value = CCtxParams->ldmParams.hashLog;
120109 +        break;
120110 +    case ZSTD_c_ldmMinMatch :
120111 +        *value = CCtxParams->ldmParams.minMatchLength;
120112 +        break;
120113 +    case ZSTD_c_ldmBucketSizeLog :
120114 +        *value = CCtxParams->ldmParams.bucketSizeLog;
120115 +        break;
120116 +    case ZSTD_c_ldmHashRateLog :
120117 +        *value = CCtxParams->ldmParams.hashRateLog;
120118 +        break;
120119 +    case ZSTD_c_targetCBlockSize :
120120 +        *value = (int)CCtxParams->targetCBlockSize;
120121 +        break;
120122 +    case ZSTD_c_srcSizeHint :
120123 +        *value = (int)CCtxParams->srcSizeHint;
120124 +        break;
120125 +    case ZSTD_c_stableInBuffer :
120126 +        *value = (int)CCtxParams->inBufferMode;
120127 +        break;
120128 +    case ZSTD_c_stableOutBuffer :
120129 +        *value = (int)CCtxParams->outBufferMode;
120130 +        break;
120131 +    case ZSTD_c_blockDelimiters :
120132 +        *value = (int)CCtxParams->blockDelimiters;
120133 +        break;
120134 +    case ZSTD_c_validateSequences :
120135 +        *value = (int)CCtxParams->validateSequences;
120136 +        break;
120137 +    default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
120138 +    }
120139 +    return 0;
120142 +/** ZSTD_CCtx_setParametersUsingCCtxParams() :
120143 + *  just applies `params` into `cctx`
120144 + *  no action is performed, parameters are merely stored.
120145 + *  If ZSTDMT is enabled, parameters are pushed to cctx->mtctx.
120146 + *    This is possible even if a compression is ongoing.
120147 + *    In which case, new parameters will be applied on the fly, starting with next compression job.
120148 + */
120149 +size_t ZSTD_CCtx_setParametersUsingCCtxParams(
120150 +        ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
120152 +    DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
120153 +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
120154 +                    "The context is in the wrong stage!");
120155 +    RETURN_ERROR_IF(cctx->cdict, stage_wrong,
120156 +                    "Can't override parameters with cdict attached (some must "
120157 +                    "be inherited from the cdict).");
120159 +    cctx->requestedParams = *params;
120160 +    return 0;
120163 +ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
120165 +    DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
120166 +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
120167 +                    "Can't set pledgedSrcSize when not in init stage.");
120168 +    cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
120169 +    return 0;
120172 +static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(
120173 +        int const compressionLevel,
120174 +        size_t const dictSize);
120175 +static int ZSTD_dedicatedDictSearch_isSupported(
120176 +        const ZSTD_compressionParameters* cParams);
120177 +static void ZSTD_dedicatedDictSearch_revertCParams(
120178 +        ZSTD_compressionParameters* cParams);
120181 + * Initializes the local dict using the requested parameters.
120182 + * NOTE: This does not use the pledged src size, because it may be used for more
120183 + * than one compression.
120184 + */
120185 +static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
120187 +    ZSTD_localDict* const dl = &cctx->localDict;
120188 +    if (dl->dict == NULL) {
120189 +        /* No local dictionary. */
120190 +        assert(dl->dictBuffer == NULL);
120191 +        assert(dl->cdict == NULL);
120192 +        assert(dl->dictSize == 0);
120193 +        return 0;
120194 +    }
120195 +    if (dl->cdict != NULL) {
120196 +        assert(cctx->cdict == dl->cdict);
120197 +        /* Local dictionary already initialized. */
120198 +        return 0;
120199 +    }
120200 +    assert(dl->dictSize > 0);
120201 +    assert(cctx->cdict == NULL);
120202 +    assert(cctx->prefixDict.dict == NULL);
120204 +    dl->cdict = ZSTD_createCDict_advanced2(
120205 +            dl->dict,
120206 +            dl->dictSize,
120207 +            ZSTD_dlm_byRef,
120208 +            dl->dictContentType,
120209 +            &cctx->requestedParams,
120210 +            cctx->customMem);
120211 +    RETURN_ERROR_IF(!dl->cdict, memory_allocation, "ZSTD_createCDict_advanced failed");
120212 +    cctx->cdict = dl->cdict;
120213 +    return 0;
120216 +size_t ZSTD_CCtx_loadDictionary_advanced(
120217 +        ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
120218 +        ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
120220 +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
120221 +                    "Can't load a dictionary when ctx is not in init stage.");
120222 +    DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
120223 +    ZSTD_clearAllDicts(cctx);  /* in case one already exists */
120224 +    if (dict == NULL || dictSize == 0)  /* no dictionary mode */
120225 +        return 0;
120226 +    if (dictLoadMethod == ZSTD_dlm_byRef) {
120227 +        cctx->localDict.dict = dict;
120228 +    } else {
120229 +        void* dictBuffer;
120230 +        RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
120231 +                        "no malloc for static CCtx");
120232 +        dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem);
120233 +        RETURN_ERROR_IF(!dictBuffer, memory_allocation, "NULL pointer!");
120234 +        ZSTD_memcpy(dictBuffer, dict, dictSize);
120235 +        cctx->localDict.dictBuffer = dictBuffer;
120236 +        cctx->localDict.dict = dictBuffer;
120237 +    }
120238 +    cctx->localDict.dictSize = dictSize;
120239 +    cctx->localDict.dictContentType = dictContentType;
120240 +    return 0;
120243 +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(
120244 +      ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
120246 +    return ZSTD_CCtx_loadDictionary_advanced(
120247 +            cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
120250 +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
120252 +    return ZSTD_CCtx_loadDictionary_advanced(
120253 +            cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
120257 +size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
120259 +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
120260 +                    "Can't ref a dict when ctx not in init stage.");
120261 +    /* Free the existing local cdict (if any) to save memory. */
120262 +    ZSTD_clearAllDicts(cctx);
120263 +    cctx->cdict = cdict;
120264 +    return 0;
120267 +size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool)
120269 +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
120270 +                    "Can't ref a pool when ctx not in init stage.");
120271 +    cctx->pool = pool;
120272 +    return 0;
120275 +size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
120277 +    return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent);
120280 +size_t ZSTD_CCtx_refPrefix_advanced(
120281 +        ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
120283 +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
120284 +                    "Can't ref a prefix when ctx not in init stage.");
120285 +    ZSTD_clearAllDicts(cctx);
120286 +    if (prefix != NULL && prefixSize > 0) {
120287 +        cctx->prefixDict.dict = prefix;
120288 +        cctx->prefixDict.dictSize = prefixSize;
120289 +        cctx->prefixDict.dictContentType = dictContentType;
120290 +    }
120291 +    return 0;
120294 +/*! ZSTD_CCtx_reset() :
120295 + *  Also dumps dictionary */
120296 +size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
120298 +    if ( (reset == ZSTD_reset_session_only)
120299 +      || (reset == ZSTD_reset_session_and_parameters) ) {
120300 +        cctx->streamStage = zcss_init;
120301 +        cctx->pledgedSrcSizePlusOne = 0;
120302 +    }
120303 +    if ( (reset == ZSTD_reset_parameters)
120304 +      || (reset == ZSTD_reset_session_and_parameters) ) {
120305 +        RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
120306 +                        "Can't reset parameters only when not in init stage.");
120307 +        ZSTD_clearAllDicts(cctx);
120308 +        return ZSTD_CCtxParams_reset(&cctx->requestedParams);
120309 +    }
120310 +    return 0;
120314 +/** ZSTD_checkCParams() :
120315 +    control CParam values remain within authorized range.
120316 +    @return : 0, or an error code if one value is beyond authorized range */
120317 +size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
120319 +    BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog);
120320 +    BOUNDCHECK(ZSTD_c_chainLog,  (int)cParams.chainLog);
120321 +    BOUNDCHECK(ZSTD_c_hashLog,   (int)cParams.hashLog);
120322 +    BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog);
120323 +    BOUNDCHECK(ZSTD_c_minMatch,  (int)cParams.minMatch);
120324 +    BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength);
120325 +    BOUNDCHECK(ZSTD_c_strategy,  cParams.strategy);
120326 +    return 0;
120329 +/** ZSTD_clampCParams() :
120330 + *  make CParam values within valid range.
120331 + *  @return : valid CParams */
120332 +static ZSTD_compressionParameters
120333 +ZSTD_clampCParams(ZSTD_compressionParameters cParams)
120335 +#   define CLAMP_TYPE(cParam, val, type) {                                \
120336 +        ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);         \
120337 +        if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound;      \
120338 +        else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
120339 +    }
120340 +#   define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned)
120341 +    CLAMP(ZSTD_c_windowLog, cParams.windowLog);
120342 +    CLAMP(ZSTD_c_chainLog,  cParams.chainLog);
120343 +    CLAMP(ZSTD_c_hashLog,   cParams.hashLog);
120344 +    CLAMP(ZSTD_c_searchLog, cParams.searchLog);
120345 +    CLAMP(ZSTD_c_minMatch,  cParams.minMatch);
120346 +    CLAMP(ZSTD_c_targetLength,cParams.targetLength);
120347 +    CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy);
120348 +    return cParams;
120351 +/** ZSTD_cycleLog() :
120352 + *  condition for correct operation : hashLog > 1 */
120353 +U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
120355 +    U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
120356 +    return hashLog - btScale;
120359 +/** ZSTD_dictAndWindowLog() :
120360 + * Returns an adjusted window log that is large enough to fit the source and the dictionary.
120361 + * The zstd format says that the entire dictionary is valid if one byte of the dictionary
120362 + * is within the window. So the hashLog and chainLog should be large enough to reference both
120363 + * the dictionary and the window. So we must use this adjusted dictAndWindowLog when downsizing
120364 + * the hashLog and windowLog.
120365 + * NOTE: srcSize must not be ZSTD_CONTENTSIZE_UNKNOWN.
120366 + */
120367 +static U32 ZSTD_dictAndWindowLog(U32 windowLog, U64 srcSize, U64 dictSize)
120369 +    const U64 maxWindowSize = 1ULL << ZSTD_WINDOWLOG_MAX;
120370 +    /* No dictionary ==> No change */
120371 +    if (dictSize == 0) {
120372 +        return windowLog;
120373 +    }
120374 +    assert(windowLog <= ZSTD_WINDOWLOG_MAX);
120375 +    assert(srcSize != ZSTD_CONTENTSIZE_UNKNOWN); /* Handled in ZSTD_adjustCParams_internal() */
120376 +    {
120377 +        U64 const windowSize = 1ULL << windowLog;
120378 +        U64 const dictAndWindowSize = dictSize + windowSize;
120379 +        /* If the window size is already large enough to fit both the source and the dictionary
120380 +         * then just use the window size. Otherwise adjust so that it fits the dictionary and
120381 +         * the window.
120382 +         */
120383 +        if (windowSize >= dictSize + srcSize) {
120384 +            return windowLog; /* Window size large enough already */
120385 +        } else if (dictAndWindowSize >= maxWindowSize) {
120386 +            return ZSTD_WINDOWLOG_MAX; /* Larger than max window log */
120387 +        } else  {
120388 +            return ZSTD_highbit32((U32)dictAndWindowSize - 1) + 1;
120389 +        }
120390 +    }
120393 +/** ZSTD_adjustCParams_internal() :
120394 + *  optimize `cPar` for a specified input (`srcSize` and `dictSize`).
120395 + *  mostly downsize to reduce memory consumption and initialization latency.
120396 + * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
120397 + * `mode` is the mode for parameter adjustment. See docs for `ZSTD_cParamMode_e`.
120398 + *  note : `srcSize==0` means 0!
120399 + *  condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */
120400 +static ZSTD_compressionParameters
120401 +ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
120402 +                            unsigned long long srcSize,
120403 +                            size_t dictSize,
120404 +                            ZSTD_cParamMode_e mode)
120406 +    const U64 minSrcSize = 513; /* (1<<9) + 1 */
120407 +    const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
120408 +    assert(ZSTD_checkCParams(cPar)==0);
120410 +    switch (mode) {
120411 +    case ZSTD_cpm_unknown:
120412 +    case ZSTD_cpm_noAttachDict:
120413 +        /* If we don't know the source size, don't make any
120414 +         * assumptions about it. We will already have selected
120415 +         * smaller parameters if a dictionary is in use.
120416 +         */
120417 +        break;
120418 +    case ZSTD_cpm_createCDict:
120419 +        /* Assume a small source size when creating a dictionary
120420 +         * with an unkown source size.
120421 +         */
120422 +        if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN)
120423 +            srcSize = minSrcSize;
120424 +        break;
120425 +    case ZSTD_cpm_attachDict:
120426 +        /* Dictionary has its own dedicated parameters which have
120427 +         * already been selected. We are selecting parameters
120428 +         * for only the source.
120429 +         */
120430 +        dictSize = 0;
120431 +        break;
120432 +    default:
120433 +        assert(0);
120434 +        break;
120435 +    }
120437 +    /* resize windowLog if input is small enough, to use less memory */
120438 +    if ( (srcSize < maxWindowResize)
120439 +      && (dictSize < maxWindowResize) )  {
120440 +        U32 const tSize = (U32)(srcSize + dictSize);
120441 +        static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
120442 +        U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
120443 +                            ZSTD_highbit32(tSize-1) + 1;
120444 +        if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
120445 +    }
120446 +    if (srcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
120447 +        U32 const dictAndWindowLog = ZSTD_dictAndWindowLog(cPar.windowLog, (U64)srcSize, (U64)dictSize);
120448 +        U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
120449 +        if (cPar.hashLog > dictAndWindowLog+1) cPar.hashLog = dictAndWindowLog+1;
120450 +        if (cycleLog > dictAndWindowLog)
120451 +            cPar.chainLog -= (cycleLog - dictAndWindowLog);
120452 +    }
120454 +    if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
120455 +        cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN;  /* minimum wlog required for valid frame header */
120457 +    return cPar;
120460 +ZSTD_compressionParameters
120461 +ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
120462 +                   unsigned long long srcSize,
120463 +                   size_t dictSize)
120465 +    cPar = ZSTD_clampCParams(cPar);   /* resulting cPar is necessarily valid (all parameters within range) */
120466 +    if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN;
120467 +    return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown);
120470 +static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
120471 +static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
120473 +static void ZSTD_overrideCParams(
120474 +              ZSTD_compressionParameters* cParams,
120475 +        const ZSTD_compressionParameters* overrides)
120477 +    if (overrides->windowLog)    cParams->windowLog    = overrides->windowLog;
120478 +    if (overrides->hashLog)      cParams->hashLog      = overrides->hashLog;
120479 +    if (overrides->chainLog)     cParams->chainLog     = overrides->chainLog;
120480 +    if (overrides->searchLog)    cParams->searchLog    = overrides->searchLog;
120481 +    if (overrides->minMatch)     cParams->minMatch     = overrides->minMatch;
120482 +    if (overrides->targetLength) cParams->targetLength = overrides->targetLength;
120483 +    if (overrides->strategy)     cParams->strategy     = overrides->strategy;
120486 +ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
120487 +        const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
120489 +    ZSTD_compressionParameters cParams;
120490 +    if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) {
120491 +      srcSizeHint = CCtxParams->srcSizeHint;
120492 +    }
120493 +    cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode);
120494 +    if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
120495 +    ZSTD_overrideCParams(&cParams, &CCtxParams->cParams);
120496 +    assert(!ZSTD_checkCParams(cParams));
120497 +    /* srcSizeHint == 0 means 0 */
120498 +    return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode);
120501 +static size_t
120502 +ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
120503 +                       const U32 forCCtx)
120505 +    size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
120506 +    size_t const hSize = ((size_t)1) << cParams->hashLog;
120507 +    U32    const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
120508 +    size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
120509 +    /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't
120510 +     * surrounded by redzones in ASAN. */
120511 +    size_t const tableSpace = chainSize * sizeof(U32)
120512 +                            + hSize * sizeof(U32)
120513 +                            + h3Size * sizeof(U32);
120514 +    size_t const optPotentialSpace =
120515 +        ZSTD_cwksp_alloc_size((MaxML+1) * sizeof(U32))
120516 +      + ZSTD_cwksp_alloc_size((MaxLL+1) * sizeof(U32))
120517 +      + ZSTD_cwksp_alloc_size((MaxOff+1) * sizeof(U32))
120518 +      + ZSTD_cwksp_alloc_size((1<<Litbits) * sizeof(U32))
120519 +      + ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))
120520 +      + ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
120521 +    size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
120522 +                                ? optPotentialSpace
120523 +                                : 0;
120524 +    DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
120525 +                (U32)chainSize, (U32)hSize, (U32)h3Size);
120526 +    return tableSpace + optSpace;
120529 +static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
120530 +        const ZSTD_compressionParameters* cParams,
120531 +        const ldmParams_t* ldmParams,
120532 +        const int isStatic,
120533 +        const size_t buffInSize,
120534 +        const size_t buffOutSize,
120535 +        const U64 pledgedSrcSize)
120537 +    size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << cParams->windowLog), pledgedSrcSize));
120538 +    size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
120539 +    U32    const divider = (cParams->minMatch==3) ? 3 : 4;
120540 +    size_t const maxNbSeq = blockSize / divider;
120541 +    size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
120542 +                            + ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(seqDef))
120543 +                            + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
120544 +    size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE);
120545 +    size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
120546 +    size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, /* forCCtx */ 1);
120548 +    size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams);
120549 +    size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize);
120550 +    size_t const ldmSeqSpace = ldmParams->enableLdm ?
120551 +        ZSTD_cwksp_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0;
120554 +    size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize)
120555 +                             + ZSTD_cwksp_alloc_size(buffOutSize);
120557 +    size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0;
120559 +    size_t const neededSpace =
120560 +        cctxSpace +
120561 +        entropySpace +
120562 +        blockStateSpace +
120563 +        ldmSpace +
120564 +        ldmSeqSpace +
120565 +        matchStateSize +
120566 +        tokenSpace +
120567 +        bufferSpace;
120569 +    DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace);
120570 +    return neededSpace;
120573 +size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
120575 +    ZSTD_compressionParameters const cParams =
120576 +                ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
120578 +    RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
120579 +    /* estimateCCtxSize is for one-shot compression. So no buffers should
120580 +     * be needed. However, we still allocate two 0-sized buffers, which can
120581 +     * take space under ASAN. */
120582 +    return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
120583 +        &cParams, &params->ldmParams, 1, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN);
120586 +size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
120588 +    ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
120589 +    return ZSTD_estimateCCtxSize_usingCCtxParams(&params);
120592 +static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
120594 +    int tier = 0;
120595 +    size_t largestSize = 0;
120596 +    static const unsigned long long srcSizeTiers[4] = {16 KB, 128 KB, 256 KB, ZSTD_CONTENTSIZE_UNKNOWN};
120597 +    for (; tier < 4; ++tier) {
120598 +        /* Choose the set of cParams for a given level across all srcSizes that give the largest cctxSize */
120599 +        ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeTiers[tier], 0, ZSTD_cpm_noAttachDict);
120600 +        largestSize = MAX(ZSTD_estimateCCtxSize_usingCParams(cParams), largestSize);
120601 +    }
120602 +    return largestSize;
120605 +size_t ZSTD_estimateCCtxSize(int compressionLevel)
120607 +    int level;
120608 +    size_t memBudget = 0;
120609 +    for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
120610 +        /* Ensure monotonically increasing memory usage as compression level increases */
120611 +        size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
120612 +        if (newMB > memBudget) memBudget = newMB;
120613 +    }
120614 +    return memBudget;
120617 +size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
120619 +    RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
120620 +    {   ZSTD_compressionParameters const cParams =
120621 +                ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
120622 +        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
120623 +        size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered)
120624 +                ? ((size_t)1 << cParams.windowLog) + blockSize
120625 +                : 0;
120626 +        size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered)
120627 +                ? ZSTD_compressBound(blockSize) + 1
120628 +                : 0;
120630 +        return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
120631 +            &cParams, &params->ldmParams, 1, inBuffSize, outBuffSize,
120632 +            ZSTD_CONTENTSIZE_UNKNOWN);
120633 +    }
120636 +size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
120638 +    ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
120639 +    return ZSTD_estimateCStreamSize_usingCCtxParams(&params);
120642 +static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
120644 +    ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
120645 +    return ZSTD_estimateCStreamSize_usingCParams(cParams);
120648 +size_t ZSTD_estimateCStreamSize(int compressionLevel)
120650 +    int level;
120651 +    size_t memBudget = 0;
120652 +    for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
120653 +        size_t const newMB = ZSTD_estimateCStreamSize_internal(level);
120654 +        if (newMB > memBudget) memBudget = newMB;
120655 +    }
120656 +    return memBudget;
120659 +/* ZSTD_getFrameProgression():
120660 + * tells how much data has been consumed (input) and produced (output) for current frame.
120661 + * able to count progression inside worker threads (non-blocking mode).
120662 + */
120663 +ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx)
120665 +    {   ZSTD_frameProgression fp;
120666 +        size_t const buffered = (cctx->inBuff == NULL) ? 0 :
120667 +                                cctx->inBuffPos - cctx->inToCompress;
120668 +        if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress);
120669 +        assert(buffered <= ZSTD_BLOCKSIZE_MAX);
120670 +        fp.ingested = cctx->consumedSrcSize + buffered;
120671 +        fp.consumed = cctx->consumedSrcSize;
120672 +        fp.produced = cctx->producedCSize;
120673 +        fp.flushed  = cctx->producedCSize;   /* simplified; some data might still be left within streaming output buffer */
120674 +        fp.currentJobID = 0;
120675 +        fp.nbActiveWorkers = 0;
120676 +        return fp;
120677 +}   }
120679 +/*! ZSTD_toFlushNow()
120680 + *  Only useful for multithreading scenarios currently (nbWorkers >= 1).
120681 + */
120682 +size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx)
120684 +    (void)cctx;
120685 +    return 0;   /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */
120688 +static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,
120689 +                                    ZSTD_compressionParameters cParams2)
120691 +    (void)cParams1;
120692 +    (void)cParams2;
120693 +    assert(cParams1.windowLog    == cParams2.windowLog);
120694 +    assert(cParams1.chainLog     == cParams2.chainLog);
120695 +    assert(cParams1.hashLog      == cParams2.hashLog);
120696 +    assert(cParams1.searchLog    == cParams2.searchLog);
120697 +    assert(cParams1.minMatch     == cParams2.minMatch);
120698 +    assert(cParams1.targetLength == cParams2.targetLength);
120699 +    assert(cParams1.strategy     == cParams2.strategy);
120702 +void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
120704 +    int i;
120705 +    for (i = 0; i < ZSTD_REP_NUM; ++i)
120706 +        bs->rep[i] = repStartValue[i];
120707 +    bs->entropy.huf.repeatMode = HUF_repeat_none;
120708 +    bs->entropy.fse.offcode_repeatMode = FSE_repeat_none;
120709 +    bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none;
120710 +    bs->entropy.fse.litlength_repeatMode = FSE_repeat_none;
120713 +/*! ZSTD_invalidateMatchState()
120714 + *  Invalidate all the matches in the match finder tables.
120715 + *  Requires nextSrc and base to be set (can be NULL).
120716 + */
120717 +static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
120719 +    ZSTD_window_clear(&ms->window);
120721 +    ms->nextToUpdate = ms->window.dictLimit;
120722 +    ms->loadedDictEnd = 0;
120723 +    ms->opt.litLengthSum = 0;  /* force reset of btopt stats */
120724 +    ms->dictMatchState = NULL;
120728 + * Controls, for this matchState reset, whether the tables need to be cleared /
120729 + * prepared for the coming compression (ZSTDcrp_makeClean), or whether the
120730 + * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a
120731 + * subsequent operation will overwrite the table space anyways (e.g., copying
120732 + * the matchState contents in from a CDict).
120733 + */
120734 +typedef enum {
120735 +    ZSTDcrp_makeClean,
120736 +    ZSTDcrp_leaveDirty
120737 +} ZSTD_compResetPolicy_e;
120740 + * Controls, for this matchState reset, whether indexing can continue where it
120741 + * left off (ZSTDirp_continue), or whether it needs to be restarted from zero
120742 + * (ZSTDirp_reset).
120743 + */
120744 +typedef enum {
120745 +    ZSTDirp_continue,
120746 +    ZSTDirp_reset
120747 +} ZSTD_indexResetPolicy_e;
120749 +typedef enum {
120750 +    ZSTD_resetTarget_CDict,
120751 +    ZSTD_resetTarget_CCtx
120752 +} ZSTD_resetTarget_e;
120754 +static size_t
120755 +ZSTD_reset_matchState(ZSTD_matchState_t* ms,
120756 +                      ZSTD_cwksp* ws,
120757 +                const ZSTD_compressionParameters* cParams,
120758 +                const ZSTD_compResetPolicy_e crp,
120759 +                const ZSTD_indexResetPolicy_e forceResetIndex,
120760 +                const ZSTD_resetTarget_e forWho)
120762 +    size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
120763 +    size_t const hSize = ((size_t)1) << cParams->hashLog;
120764 +    U32    const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
120765 +    size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
120767 +    DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset);
120768 +    if (forceResetIndex == ZSTDirp_reset) {
120769 +        ZSTD_window_init(&ms->window);
120770 +        ZSTD_cwksp_mark_tables_dirty(ws);
120771 +    }
120773 +    ms->hashLog3 = hashLog3;
120775 +    ZSTD_invalidateMatchState(ms);
120777 +    assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */
120779 +    ZSTD_cwksp_clear_tables(ws);
120781 +    DEBUGLOG(5, "reserving table space");
120782 +    /* table Space */
120783 +    ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32));
120784 +    ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32));
120785 +    ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32));
120786 +    RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
120787 +                    "failed a workspace allocation in ZSTD_reset_matchState");
120789 +    DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_leaveDirty);
120790 +    if (crp!=ZSTDcrp_leaveDirty) {
120791 +        /* reset tables only */
120792 +        ZSTD_cwksp_clean_tables(ws);
120793 +    }
120795 +    /* opt parser space */
120796 +    if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
120797 +        DEBUGLOG(4, "reserving optimal parser space");
120798 +        ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<<Litbits) * sizeof(unsigned));
120799 +        ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));
120800 +        ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));
120801 +        ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));
120802 +        ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t));
120803 +        ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
120804 +    }
120806 +    ms->cParams = *cParams;
120808 +    RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
120809 +                    "failed a workspace allocation in ZSTD_reset_matchState");
120811 +    return 0;
120814 +/* ZSTD_indexTooCloseToMax() :
120815 + * minor optimization : prefer memset() rather than reduceIndex()
120816 + * which is measurably slow in some circumstances (reported for Visual Studio).
120817 + * Works when re-using a context for a lot of smallish inputs :
120818 + * if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN,
120819 + * memset() will be triggered before reduceIndex().
120820 + */
120821 +#define ZSTD_INDEXOVERFLOW_MARGIN (16 MB)
120822 +static int ZSTD_indexTooCloseToMax(ZSTD_window_t w)
120824 +    return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN);
120827 +/*! ZSTD_resetCCtx_internal() :
120828 +    note : `params` are assumed fully validated at this stage */
120829 +static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
120830 +                                      ZSTD_CCtx_params params,
120831 +                                      U64 const pledgedSrcSize,
120832 +                                      ZSTD_compResetPolicy_e const crp,
120833 +                                      ZSTD_buffered_policy_e const zbuff)
120835 +    ZSTD_cwksp* const ws = &zc->workspace;
120836 +    DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u",
120837 +                (U32)pledgedSrcSize, params.cParams.windowLog);
120838 +    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
120840 +    zc->isFirstBlock = 1;
120842 +    if (params.ldmParams.enableLdm) {
120843 +        /* Adjust long distance matching parameters */
120844 +        ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
120845 +        assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
120846 +        assert(params.ldmParams.hashRateLog < 32);
120847 +    }
120849 +    {   size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
120850 +        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
120851 +        U32    const divider = (params.cParams.minMatch==3) ? 3 : 4;
120852 +        size_t const maxNbSeq = blockSize / divider;
120853 +        size_t const buffOutSize = (zbuff == ZSTDb_buffered && params.outBufferMode == ZSTD_bm_buffered)
120854 +                ? ZSTD_compressBound(blockSize) + 1
120855 +                : 0;
120856 +        size_t const buffInSize = (zbuff == ZSTDb_buffered && params.inBufferMode == ZSTD_bm_buffered)
120857 +                ? windowSize + blockSize
120858 +                : 0;
120859 +        size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize);
120861 +        int const indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window);
120862 +        ZSTD_indexResetPolicy_e needsIndexReset =
120863 +            (!indexTooClose && zc->initialized) ? ZSTDirp_continue : ZSTDirp_reset;
120865 +        size_t const neededSpace =
120866 +            ZSTD_estimateCCtxSize_usingCCtxParams_internal(
120867 +                &params.cParams, &params.ldmParams, zc->staticSize != 0,
120868 +                buffInSize, buffOutSize, pledgedSrcSize);
120869 +        FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!");
120871 +        if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0);
120873 +        /* Check if workspace is large enough, alloc a new one if needed */
120874 +        {
120875 +            int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;
120876 +            int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);
120878 +            DEBUGLOG(4, "Need %zu B workspace", neededSpace);
120879 +            DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
120881 +            if (workspaceTooSmall || workspaceWasteful) {
120882 +                DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB",
120883 +                            ZSTD_cwksp_sizeof(ws) >> 10,
120884 +                            neededSpace >> 10);
120886 +                RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize");
120888 +                needsIndexReset = ZSTDirp_reset;
120890 +                ZSTD_cwksp_free(ws, zc->customMem);
120891 +                FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem), "");
120893 +                DEBUGLOG(5, "reserving object space");
120894 +                /* Statically sized space.
120895 +                 * entropyWorkspace never moves,
120896 +                 * though prev/next block swap places */
120897 +                assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t)));
120898 +                zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
120899 +                RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock");
120900 +                zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
120901 +                RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock");
120902 +                zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE);
120903 +                RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate entropyWorkspace");
120904 +        }   }
120906 +        ZSTD_cwksp_clear(ws);
120908 +        /* init params */
120909 +        zc->appliedParams = params;
120910 +        zc->blockState.matchState.cParams = params.cParams;
120911 +        zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
120912 +        zc->consumedSrcSize = 0;
120913 +        zc->producedCSize = 0;
120914 +        if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
120915 +            zc->appliedParams.fParams.contentSizeFlag = 0;
120916 +        DEBUGLOG(4, "pledged content size : %u ; flag : %u",
120917 +            (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
120918 +        zc->blockSize = blockSize;
120920 +        xxh64_reset(&zc->xxhState, 0);
120921 +        zc->stage = ZSTDcs_init;
120922 +        zc->dictID = 0;
120923 +        zc->dictContentSize = 0;
120925 +        ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
120927 +        /* ZSTD_wildcopy() is used to copy into the literals buffer,
120928 +         * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
120929 +         */
120930 +        zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH);
120931 +        zc->seqStore.maxNbLit = blockSize;
120933 +        /* buffers */
120934 +        zc->bufferedPolicy = zbuff;
120935 +        zc->inBuffSize = buffInSize;
120936 +        zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize);
120937 +        zc->outBuffSize = buffOutSize;
120938 +        zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize);
120940 +        /* ldm bucketOffsets table */
120941 +        if (params.ldmParams.enableLdm) {
120942 +            /* TODO: avoid memset? */
120943 +            size_t const numBuckets =
120944 +                  ((size_t)1) << (params.ldmParams.hashLog -
120945 +                                  params.ldmParams.bucketSizeLog);
120946 +            zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets);
120947 +            ZSTD_memset(zc->ldmState.bucketOffsets, 0, numBuckets);
120948 +        }
120950 +        /* sequences storage */
120951 +        ZSTD_referenceExternalSequences(zc, NULL, 0);
120952 +        zc->seqStore.maxNbSeq = maxNbSeq;
120953 +        zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
120954 +        zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
120955 +        zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
120956 +        zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef));
120958 +        FORWARD_IF_ERROR(ZSTD_reset_matchState(
120959 +            &zc->blockState.matchState,
120960 +            ws,
120961 +            &params.cParams,
120962 +            crp,
120963 +            needsIndexReset,
120964 +            ZSTD_resetTarget_CCtx), "");
120966 +        /* ldm hash table */
120967 +        if (params.ldmParams.enableLdm) {
120968 +            /* TODO: avoid memset? */
120969 +            size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog;
120970 +            zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t));
120971 +            ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t));
120972 +            zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq));
120973 +            zc->maxNbLdmSequences = maxNbLdmSeq;
120975 +            ZSTD_window_init(&zc->ldmState.window);
120976 +            ZSTD_window_clear(&zc->ldmState.window);
120977 +            zc->ldmState.loadedDictEnd = 0;
120978 +        }
120980 +        /* Due to alignment, when reusing a workspace, we can actually consume
120981 +         * up to 3 extra bytes for alignment. See the comments in zstd_cwksp.h
120982 +         */
120983 +        assert(ZSTD_cwksp_used(ws) >= neededSpace &&
120984 +               ZSTD_cwksp_used(ws) <= neededSpace + 3);
120986 +        DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws));
120987 +        zc->initialized = 1;
120989 +        return 0;
120990 +    }
120993 +/* ZSTD_invalidateRepCodes() :
120994 + * ensures next compression will not use repcodes from previous block.
120995 + * Note : only works with regular variant;
120996 + *        do not use with extDict variant ! */
120997 +void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
120998 +    int i;
120999 +    for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0;
121000 +    assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
121003 +/* These are the approximate sizes for each strategy past which copying the
121004 + * dictionary tables into the working context is faster than using them
121005 + * in-place.
121006 + */
121007 +static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = {
121008 +    8 KB,  /* unused */
121009 +    8 KB,  /* ZSTD_fast */
121010 +    16 KB, /* ZSTD_dfast */
121011 +    32 KB, /* ZSTD_greedy */
121012 +    32 KB, /* ZSTD_lazy */
121013 +    32 KB, /* ZSTD_lazy2 */
121014 +    32 KB, /* ZSTD_btlazy2 */
121015 +    32 KB, /* ZSTD_btopt */
121016 +    8 KB,  /* ZSTD_btultra */
121017 +    8 KB   /* ZSTD_btultra2 */
121020 +static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
121021 +                                 const ZSTD_CCtx_params* params,
121022 +                                 U64 pledgedSrcSize)
121024 +    size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
121025 +    int const dedicatedDictSearch = cdict->matchState.dedicatedDictSearch;
121026 +    return dedicatedDictSearch
121027 +        || ( ( pledgedSrcSize <= cutoff
121028 +            || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
121029 +            || params->attachDictPref == ZSTD_dictForceAttach )
121030 +          && params->attachDictPref != ZSTD_dictForceCopy
121031 +          && !params->forceWindow ); /* dictMatchState isn't correctly
121032 +                                      * handled in _enforceMaxDist */
121035 +static size_t
121036 +ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
121037 +                        const ZSTD_CDict* cdict,
121038 +                        ZSTD_CCtx_params params,
121039 +                        U64 pledgedSrcSize,
121040 +                        ZSTD_buffered_policy_e zbuff)
121042 +    {
121043 +        ZSTD_compressionParameters adjusted_cdict_cParams = cdict->matchState.cParams;
121044 +        unsigned const windowLog = params.cParams.windowLog;
121045 +        assert(windowLog != 0);
121046 +        /* Resize working context table params for input only, since the dict
121047 +         * has its own tables. */
121048 +        /* pledgedSrcSize == 0 means 0! */
121050 +        if (cdict->matchState.dedicatedDictSearch) {
121051 +            ZSTD_dedicatedDictSearch_revertCParams(&adjusted_cdict_cParams);
121052 +        }
121054 +        params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize,
121055 +                                                     cdict->dictContentSize, ZSTD_cpm_attachDict);
121056 +        params.cParams.windowLog = windowLog;
121057 +        FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
121058 +                                                 ZSTDcrp_makeClean, zbuff), "");
121059 +        assert(cctx->appliedParams.cParams.strategy == adjusted_cdict_cParams.strategy);
121060 +    }
121062 +    {   const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc
121063 +                                  - cdict->matchState.window.base);
121064 +        const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit;
121065 +        if (cdictLen == 0) {
121066 +            /* don't even attach dictionaries with no contents */
121067 +            DEBUGLOG(4, "skipping attaching empty dictionary");
121068 +        } else {
121069 +            DEBUGLOG(4, "attaching dictionary into context");
121070 +            cctx->blockState.matchState.dictMatchState = &cdict->matchState;
121072 +            /* prep working match state so dict matches never have negative indices
121073 +             * when they are translated to the working context's index space. */
121074 +            if (cctx->blockState.matchState.window.dictLimit < cdictEnd) {
121075 +                cctx->blockState.matchState.window.nextSrc =
121076 +                    cctx->blockState.matchState.window.base + cdictEnd;
121077 +                ZSTD_window_clear(&cctx->blockState.matchState.window);
121078 +            }
121079 +            /* loadedDictEnd is expressed within the referential of the active context */
121080 +            cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit;
121081 +    }   }
121083 +    cctx->dictID = cdict->dictID;
121084 +    cctx->dictContentSize = cdict->dictContentSize;
121086 +    /* copy block state */
121087 +    ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
121089 +    return 0;
121092 +static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
121093 +                            const ZSTD_CDict* cdict,
121094 +                            ZSTD_CCtx_params params,
121095 +                            U64 pledgedSrcSize,
121096 +                            ZSTD_buffered_policy_e zbuff)
121098 +    const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
121100 +    assert(!cdict->matchState.dedicatedDictSearch);
121102 +    DEBUGLOG(4, "copying dictionary into context");
121104 +    {   unsigned const windowLog = params.cParams.windowLog;
121105 +        assert(windowLog != 0);
121106 +        /* Copy only compression parameters related to tables. */
121107 +        params.cParams = *cdict_cParams;
121108 +        params.cParams.windowLog = windowLog;
121109 +        FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
121110 +                                                 ZSTDcrp_leaveDirty, zbuff), "");
121111 +        assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
121112 +        assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
121113 +        assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);
121114 +    }
121116 +    ZSTD_cwksp_mark_tables_dirty(&cctx->workspace);
121118 +    /* copy tables */
121119 +    {   size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog);
121120 +        size_t const hSize =  (size_t)1 << cdict_cParams->hashLog;
121122 +        ZSTD_memcpy(cctx->blockState.matchState.hashTable,
121123 +               cdict->matchState.hashTable,
121124 +               hSize * sizeof(U32));
121125 +        ZSTD_memcpy(cctx->blockState.matchState.chainTable,
121126 +               cdict->matchState.chainTable,
121127 +               chainSize * sizeof(U32));
121128 +    }
121130 +    /* Zero the hashTable3, since the cdict never fills it */
121131 +    {   int const h3log = cctx->blockState.matchState.hashLog3;
121132 +        size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
121133 +        assert(cdict->matchState.hashLog3 == 0);
121134 +        ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
121135 +    }
121137 +    ZSTD_cwksp_mark_tables_clean(&cctx->workspace);
121139 +    /* copy dictionary offsets */
121140 +    {   ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
121141 +        ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
121142 +        dstMatchState->window       = srcMatchState->window;
121143 +        dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
121144 +        dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
121145 +    }
121147 +    cctx->dictID = cdict->dictID;
121148 +    cctx->dictContentSize = cdict->dictContentSize;
121150 +    /* copy block state */
121151 +    ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
121153 +    return 0;
121156 +/* We have a choice between copying the dictionary context into the working
121157 + * context, or referencing the dictionary context from the working context
121158 + * in-place. We decide here which strategy to use. */
121159 +static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
121160 +                            const ZSTD_CDict* cdict,
121161 +                            const ZSTD_CCtx_params* params,
121162 +                            U64 pledgedSrcSize,
121163 +                            ZSTD_buffered_policy_e zbuff)
121166 +    DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)",
121167 +                (unsigned)pledgedSrcSize);
121169 +    if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
121170 +        return ZSTD_resetCCtx_byAttachingCDict(
121171 +            cctx, cdict, *params, pledgedSrcSize, zbuff);
121172 +    } else {
121173 +        return ZSTD_resetCCtx_byCopyingCDict(
121174 +            cctx, cdict, *params, pledgedSrcSize, zbuff);
121175 +    }
121178 +/*! ZSTD_copyCCtx_internal() :
121179 + *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
121180 + *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
121181 + *  The "context", in this case, refers to the hash and chain tables,
121182 + *  entropy tables, and dictionary references.
121183 + * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx.
121184 + * @return : 0, or an error code */
121185 +static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
121186 +                            const ZSTD_CCtx* srcCCtx,
121187 +                            ZSTD_frameParameters fParams,
121188 +                            U64 pledgedSrcSize,
121189 +                            ZSTD_buffered_policy_e zbuff)
121191 +    DEBUGLOG(5, "ZSTD_copyCCtx_internal");
121192 +    RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong,
121193 +                    "Can't copy a ctx that's not in init stage.");
121195 +    ZSTD_memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
121196 +    {   ZSTD_CCtx_params params = dstCCtx->requestedParams;
121197 +        /* Copy only compression parameters related to tables. */
121198 +        params.cParams = srcCCtx->appliedParams.cParams;
121199 +        params.fParams = fParams;
121200 +        ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize,
121201 +                                ZSTDcrp_leaveDirty, zbuff);
121202 +        assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
121203 +        assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
121204 +        assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);
121205 +        assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog);
121206 +        assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
121207 +    }
121209 +    ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace);
121211 +    /* copy tables */
121212 +    {   size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog);
121213 +        size_t const hSize =  (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
121214 +        int const h3log = srcCCtx->blockState.matchState.hashLog3;
121215 +        size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
121217 +        ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable,
121218 +               srcCCtx->blockState.matchState.hashTable,
121219 +               hSize * sizeof(U32));
121220 +        ZSTD_memcpy(dstCCtx->blockState.matchState.chainTable,
121221 +               srcCCtx->blockState.matchState.chainTable,
121222 +               chainSize * sizeof(U32));
121223 +        ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable3,
121224 +               srcCCtx->blockState.matchState.hashTable3,
121225 +               h3Size * sizeof(U32));
121226 +    }
121228 +    ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace);
121230 +    /* copy dictionary offsets */
121231 +    {
121232 +        const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
121233 +        ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
121234 +        dstMatchState->window       = srcMatchState->window;
121235 +        dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
121236 +        dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
121237 +    }
121238 +    dstCCtx->dictID = srcCCtx->dictID;
121239 +    dstCCtx->dictContentSize = srcCCtx->dictContentSize;
121241 +    /* copy block state */
121242 +    ZSTD_memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));
121244 +    return 0;
121247 +/*! ZSTD_copyCCtx() :
121248 + *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
121249 + *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
121250 + *  pledgedSrcSize==0 means "unknown".
121251 +*   @return : 0, or an error code */
121252 +size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
121254 +    ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
121255 +    ZSTD_buffered_policy_e const zbuff = srcCCtx->bufferedPolicy;
121256 +    ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
121257 +    if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
121258 +    fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
121260 +    return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
121261 +                                fParams, pledgedSrcSize,
121262 +                                zbuff);
121266 +#define ZSTD_ROWSIZE 16
121267 +/*! ZSTD_reduceTable() :
121268 + *  reduce table indexes by `reducerValue`, or squash to zero.
121269 + *  PreserveMark preserves "unsorted mark" for btlazy2 strategy.
121270 + *  It must be set to a clear 0/1 value, to remove branch during inlining.
121271 + *  Presume table size is a multiple of ZSTD_ROWSIZE
121272 + *  to help auto-vectorization */
121273 +FORCE_INLINE_TEMPLATE void
121274 +ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)
121276 +    int const nbRows = (int)size / ZSTD_ROWSIZE;
121277 +    int cellNb = 0;
121278 +    int rowNb;
121279 +    assert((size & (ZSTD_ROWSIZE-1)) == 0);  /* multiple of ZSTD_ROWSIZE */
121280 +    assert(size < (1U<<31));   /* can be casted to int */
121283 +    for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
121284 +        int column;
121285 +        for (column=0; column<ZSTD_ROWSIZE; column++) {
121286 +            if (preserveMark) {
121287 +                U32 const adder = (table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) ? reducerValue : 0;
121288 +                table[cellNb] += adder;
121289 +            }
121290 +            if (table[cellNb] < reducerValue) table[cellNb] = 0;
121291 +            else table[cellNb] -= reducerValue;
121292 +            cellNb++;
121293 +    }   }
121296 +static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)
121298 +    ZSTD_reduceTable_internal(table, size, reducerValue, 0);
121301 +static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue)
121303 +    ZSTD_reduceTable_internal(table, size, reducerValue, 1);
121306 +/*! ZSTD_reduceIndex() :
121307 +*   rescale all indexes to avoid future overflow (indexes are U32) */
121308 +static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)
121310 +    {   U32 const hSize = (U32)1 << params->cParams.hashLog;
121311 +        ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
121312 +    }
121314 +    if (params->cParams.strategy != ZSTD_fast) {
121315 +        U32 const chainSize = (U32)1 << params->cParams.chainLog;
121316 +        if (params->cParams.strategy == ZSTD_btlazy2)
121317 +            ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
121318 +        else
121319 +            ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
121320 +    }
121322 +    if (ms->hashLog3) {
121323 +        U32 const h3Size = (U32)1 << ms->hashLog3;
121324 +        ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);
121325 +    }
121329 +/*-*******************************************************
121330 +*  Block entropic compression
121331 +*********************************************************/
121333 +/* See doc/zstd_compression_format.md for detailed format description */
121335 +void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
121337 +    const seqDef* const sequences = seqStorePtr->sequencesStart;
121338 +    BYTE* const llCodeTable = seqStorePtr->llCode;
121339 +    BYTE* const ofCodeTable = seqStorePtr->ofCode;
121340 +    BYTE* const mlCodeTable = seqStorePtr->mlCode;
121341 +    U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
121342 +    U32 u;
121343 +    assert(nbSeq <= seqStorePtr->maxNbSeq);
121344 +    for (u=0; u<nbSeq; u++) {
121345 +        U32 const llv = sequences[u].litLength;
121346 +        U32 const mlv = sequences[u].matchLength;
121347 +        llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
121348 +        ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
121349 +        mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
121350 +    }
121351 +    if (seqStorePtr->longLengthID==1)
121352 +        llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
121353 +    if (seqStorePtr->longLengthID==2)
121354 +        mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
121357 +/* ZSTD_useTargetCBlockSize():
121358 + * Returns if target compressed block size param is being used.
121359 + * If used, compression will do best effort to make a compressed block size to be around targetCBlockSize.
121360 + * Returns 1 if true, 0 otherwise. */
121361 +static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams)
121363 +    DEBUGLOG(5, "ZSTD_useTargetCBlockSize (targetCBlockSize=%zu)", cctxParams->targetCBlockSize);
121364 +    return (cctxParams->targetCBlockSize != 0);
121367 +/* ZSTD_entropyCompressSequences_internal():
121368 + * actually compresses both literals and sequences */
121369 +MEM_STATIC size_t
121370 +ZSTD_entropyCompressSequences_internal(seqStore_t* seqStorePtr,
121371 +                          const ZSTD_entropyCTables_t* prevEntropy,
121372 +                                ZSTD_entropyCTables_t* nextEntropy,
121373 +                          const ZSTD_CCtx_params* cctxParams,
121374 +                                void* dst, size_t dstCapacity,
121375 +                                void* entropyWorkspace, size_t entropyWkspSize,
121376 +                          const int bmi2)
121378 +    const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
121379 +    ZSTD_strategy const strategy = cctxParams->cParams.strategy;
121380 +    unsigned* count = (unsigned*)entropyWorkspace;
121381 +    FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
121382 +    FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
121383 +    FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
121384 +    U32 LLtype, Offtype, MLtype;   /* compressed, raw or rle */
121385 +    const seqDef* const sequences = seqStorePtr->sequencesStart;
121386 +    const BYTE* const ofCodeTable = seqStorePtr->ofCode;
121387 +    const BYTE* const llCodeTable = seqStorePtr->llCode;
121388 +    const BYTE* const mlCodeTable = seqStorePtr->mlCode;
121389 +    BYTE* const ostart = (BYTE*)dst;
121390 +    BYTE* const oend = ostart + dstCapacity;
121391 +    BYTE* op = ostart;
121392 +    size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
121393 +    BYTE* seqHead;
121394 +    BYTE* lastNCount = NULL;
121396 +    entropyWorkspace = count + (MaxSeq + 1);
121397 +    entropyWkspSize -= (MaxSeq + 1) * sizeof(*count);
121399 +    DEBUGLOG(4, "ZSTD_entropyCompressSequences_internal (nbSeq=%zu)", nbSeq);
121400 +    ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
121401 +    assert(entropyWkspSize >= HUF_WORKSPACE_SIZE);
121403 +    /* Compress literals */
121404 +    {   const BYTE* const literals = seqStorePtr->litStart;
121405 +        size_t const litSize = (size_t)(seqStorePtr->lit - literals);
121406 +        size_t const cSize = ZSTD_compressLiterals(
121407 +                                    &prevEntropy->huf, &nextEntropy->huf,
121408 +                                    cctxParams->cParams.strategy,
121409 +                                    ZSTD_disableLiteralsCompression(cctxParams),
121410 +                                    op, dstCapacity,
121411 +                                    literals, litSize,
121412 +                                    entropyWorkspace, entropyWkspSize,
121413 +                                    bmi2);
121414 +        FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed");
121415 +        assert(cSize <= dstCapacity);
121416 +        op += cSize;
121417 +    }
121419 +    /* Sequences Header */
121420 +    RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
121421 +                    dstSize_tooSmall, "Can't fit seq hdr in output buf!");
121422 +    if (nbSeq < 128) {
121423 +        *op++ = (BYTE)nbSeq;
121424 +    } else if (nbSeq < LONGNBSEQ) {
121425 +        op[0] = (BYTE)((nbSeq>>8) + 0x80);
121426 +        op[1] = (BYTE)nbSeq;
121427 +        op+=2;
121428 +    } else {
121429 +        op[0]=0xFF;
121430 +        MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ));
121431 +        op+=3;
121432 +    }
121433 +    assert(op <= oend);
121434 +    if (nbSeq==0) {
121435 +        /* Copy the old tables over as if we repeated them */
121436 +        ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
121437 +        return (size_t)(op - ostart);
121438 +    }
121440 +    /* seqHead : flags for FSE encoding type */
121441 +    seqHead = op++;
121442 +    assert(op <= oend);
121444 +    /* convert length/distances into codes */
121445 +    ZSTD_seqToCodes(seqStorePtr);
121446 +    /* build CTable for Literal Lengths */
121447 +    {   unsigned max = MaxLL;
121448 +        size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize);   /* can't fail */
121449 +        DEBUGLOG(5, "Building LL table");
121450 +        nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode;
121451 +        LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode,
121452 +                                        count, max, mostFrequent, nbSeq,
121453 +                                        LLFSELog, prevEntropy->fse.litlengthCTable,
121454 +                                        LL_defaultNorm, LL_defaultNormLog,
121455 +                                        ZSTD_defaultAllowed, strategy);
121456 +        assert(set_basic < set_compressed && set_rle < set_compressed);
121457 +        assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
121458 +        {   size_t const countSize = ZSTD_buildCTable(
121459 +                op, (size_t)(oend - op),
121460 +                CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
121461 +                count, max, llCodeTable, nbSeq,
121462 +                LL_defaultNorm, LL_defaultNormLog, MaxLL,
121463 +                prevEntropy->fse.litlengthCTable,
121464 +                sizeof(prevEntropy->fse.litlengthCTable),
121465 +                entropyWorkspace, entropyWkspSize);
121466 +            FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed");
121467 +            if (LLtype == set_compressed)
121468 +                lastNCount = op;
121469 +            op += countSize;
121470 +            assert(op <= oend);
121471 +    }   }
121472 +    /* build CTable for Offsets */
121473 +    {   unsigned max = MaxOff;
121474 +        size_t const mostFrequent = HIST_countFast_wksp(
121475 +            count, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize);  /* can't fail */
121476 +        /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
121477 +        ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
121478 +        DEBUGLOG(5, "Building OF table");
121479 +        nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode;
121480 +        Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode,
121481 +                                        count, max, mostFrequent, nbSeq,
121482 +                                        OffFSELog, prevEntropy->fse.offcodeCTable,
121483 +                                        OF_defaultNorm, OF_defaultNormLog,
121484 +                                        defaultPolicy, strategy);
121485 +        assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
121486 +        {   size_t const countSize = ZSTD_buildCTable(
121487 +                op, (size_t)(oend - op),
121488 +                CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
121489 +                count, max, ofCodeTable, nbSeq,
121490 +                OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
121491 +                prevEntropy->fse.offcodeCTable,
121492 +                sizeof(prevEntropy->fse.offcodeCTable),
121493 +                entropyWorkspace, entropyWkspSize);
121494 +            FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed");
121495 +            if (Offtype == set_compressed)
121496 +                lastNCount = op;
121497 +            op += countSize;
121498 +            assert(op <= oend);
121499 +    }   }
121500 +    /* build CTable for MatchLengths */
121501 +    {   unsigned max = MaxML;
121502 +        size_t const mostFrequent = HIST_countFast_wksp(
121503 +            count, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize);   /* can't fail */
121504 +        DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
121505 +        nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode;
121506 +        MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode,
121507 +                                        count, max, mostFrequent, nbSeq,
121508 +                                        MLFSELog, prevEntropy->fse.matchlengthCTable,
121509 +                                        ML_defaultNorm, ML_defaultNormLog,
121510 +                                        ZSTD_defaultAllowed, strategy);
121511 +        assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
121512 +        {   size_t const countSize = ZSTD_buildCTable(
121513 +                op, (size_t)(oend - op),
121514 +                CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
121515 +                count, max, mlCodeTable, nbSeq,
121516 +                ML_defaultNorm, ML_defaultNormLog, MaxML,
121517 +                prevEntropy->fse.matchlengthCTable,
121518 +                sizeof(prevEntropy->fse.matchlengthCTable),
121519 +                entropyWorkspace, entropyWkspSize);
121520 +            FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed");
121521 +            if (MLtype == set_compressed)
121522 +                lastNCount = op;
121523 +            op += countSize;
121524 +            assert(op <= oend);
121525 +    }   }
121527 +    *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
121529 +    {   size_t const bitstreamSize = ZSTD_encodeSequences(
121530 +                                        op, (size_t)(oend - op),
121531 +                                        CTable_MatchLength, mlCodeTable,
121532 +                                        CTable_OffsetBits, ofCodeTable,
121533 +                                        CTable_LitLength, llCodeTable,
121534 +                                        sequences, nbSeq,
121535 +                                        longOffsets, bmi2);
121536 +        FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
121537 +        op += bitstreamSize;
121538 +        assert(op <= oend);
121539 +        /* zstd versions <= 1.3.4 mistakenly report corruption when
121540 +         * FSE_readNCount() receives a buffer < 4 bytes.
121541 +         * Fixed by https://github.com/facebook/zstd/pull/1146.
121542 +         * This can happen when the last set_compressed table present is 2
121543 +         * bytes and the bitstream is only one byte.
121544 +         * In this exceedingly rare case, we will simply emit an uncompressed
121545 +         * block, since it isn't worth optimizing.
121546 +         */
121547 +        if (lastNCount && (op - lastNCount) < 4) {
121548 +            /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
121549 +            assert(op - lastNCount == 3);
121550 +            DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
121551 +                        "emitting an uncompressed block.");
121552 +            return 0;
121553 +        }
121554 +    }
121556 +    DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart));
121557 +    return (size_t)(op - ostart);
121560 +MEM_STATIC size_t
121561 +ZSTD_entropyCompressSequences(seqStore_t* seqStorePtr,
121562 +                       const ZSTD_entropyCTables_t* prevEntropy,
121563 +                             ZSTD_entropyCTables_t* nextEntropy,
121564 +                       const ZSTD_CCtx_params* cctxParams,
121565 +                             void* dst, size_t dstCapacity,
121566 +                             size_t srcSize,
121567 +                             void* entropyWorkspace, size_t entropyWkspSize,
121568 +                             int bmi2)
121570 +    size_t const cSize = ZSTD_entropyCompressSequences_internal(
121571 +                            seqStorePtr, prevEntropy, nextEntropy, cctxParams,
121572 +                            dst, dstCapacity,
121573 +                            entropyWorkspace, entropyWkspSize, bmi2);
121574 +    if (cSize == 0) return 0;
121575 +    /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
121576 +     * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
121577 +     */
121578 +    if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
121579 +        return 0;  /* block not compressed */
121580 +    FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSequences_internal failed");
121582 +    /* Check compressibility */
121583 +    {   size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
121584 +        if (cSize >= maxCSize) return 0;  /* block not compressed */
121585 +    }
121586 +    DEBUGLOG(4, "ZSTD_entropyCompressSequences() cSize: %zu\n", cSize);
121587 +    return cSize;
121590 +/* ZSTD_selectBlockCompressor() :
121591 + * Not static, but internal use only (used by long distance matcher)
121592 + * assumption : strat is a valid strategy */
121593 +ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode)
121595 +    static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {
121596 +        { ZSTD_compressBlock_fast  /* default for 0 */,
121597 +          ZSTD_compressBlock_fast,
121598 +          ZSTD_compressBlock_doubleFast,
121599 +          ZSTD_compressBlock_greedy,
121600 +          ZSTD_compressBlock_lazy,
121601 +          ZSTD_compressBlock_lazy2,
121602 +          ZSTD_compressBlock_btlazy2,
121603 +          ZSTD_compressBlock_btopt,
121604 +          ZSTD_compressBlock_btultra,
121605 +          ZSTD_compressBlock_btultra2 },
121606 +        { ZSTD_compressBlock_fast_extDict  /* default for 0 */,
121607 +          ZSTD_compressBlock_fast_extDict,
121608 +          ZSTD_compressBlock_doubleFast_extDict,
121609 +          ZSTD_compressBlock_greedy_extDict,
121610 +          ZSTD_compressBlock_lazy_extDict,
121611 +          ZSTD_compressBlock_lazy2_extDict,
121612 +          ZSTD_compressBlock_btlazy2_extDict,
121613 +          ZSTD_compressBlock_btopt_extDict,
121614 +          ZSTD_compressBlock_btultra_extDict,
121615 +          ZSTD_compressBlock_btultra_extDict },
121616 +        { ZSTD_compressBlock_fast_dictMatchState  /* default for 0 */,
121617 +          ZSTD_compressBlock_fast_dictMatchState,
121618 +          ZSTD_compressBlock_doubleFast_dictMatchState,
121619 +          ZSTD_compressBlock_greedy_dictMatchState,
121620 +          ZSTD_compressBlock_lazy_dictMatchState,
121621 +          ZSTD_compressBlock_lazy2_dictMatchState,
121622 +          ZSTD_compressBlock_btlazy2_dictMatchState,
121623 +          ZSTD_compressBlock_btopt_dictMatchState,
121624 +          ZSTD_compressBlock_btultra_dictMatchState,
121625 +          ZSTD_compressBlock_btultra_dictMatchState },
121626 +        { NULL  /* default for 0 */,
121627 +          NULL,
121628 +          NULL,
121629 +          ZSTD_compressBlock_greedy_dedicatedDictSearch,
121630 +          ZSTD_compressBlock_lazy_dedicatedDictSearch,
121631 +          ZSTD_compressBlock_lazy2_dedicatedDictSearch,
121632 +          NULL,
121633 +          NULL,
121634 +          NULL,
121635 +          NULL }
121636 +    };
121637 +    ZSTD_blockCompressor selectedCompressor;
121638 +    ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
121640 +    assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
121641 +    selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
121642 +    assert(selectedCompressor != NULL);
121643 +    return selectedCompressor;
121646 +static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
121647 +                                   const BYTE* anchor, size_t lastLLSize)
121649 +    ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize);
121650 +    seqStorePtr->lit += lastLLSize;
121653 +void ZSTD_resetSeqStore(seqStore_t* ssPtr)
121655 +    ssPtr->lit = ssPtr->litStart;
121656 +    ssPtr->sequences = ssPtr->sequencesStart;
121657 +    ssPtr->longLengthID = 0;
121660 +typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;
121662 +static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
121664 +    ZSTD_matchState_t* const ms = &zc->blockState.matchState;
121665 +    DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize);
121666 +    assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
121667 +    /* Assert that we have correctly flushed the ctx params into the ms's copy */
121668 +    ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
121669 +    if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
121670 +        if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) {
121671 +            ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize);
121672 +        } else {
121673 +            ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);
121674 +        }
121675 +        return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */
121676 +    }
121677 +    ZSTD_resetSeqStore(&(zc->seqStore));
121678 +    /* required for optimal parser to read stats from dictionary */
121679 +    ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;
121680 +    /* tell the optimal parser how we expect to compress literals */
121681 +    ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode;
121682 +    /* a gap between an attached dict and the current window is not safe,
121683 +     * they must remain adjacent,
121684 +     * and when that stops being the case, the dict must be unset */
121685 +    assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);
121687 +    /* limited update after a very long match */
121688 +    {   const BYTE* const base = ms->window.base;
121689 +        const BYTE* const istart = (const BYTE*)src;
121690 +        const U32 curr = (U32)(istart-base);
121691 +        if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1));   /* ensure no overflow */
121692 +        if (curr > ms->nextToUpdate + 384)
121693 +            ms->nextToUpdate = curr - MIN(192, (U32)(curr - ms->nextToUpdate - 384));
121694 +    }
121696 +    /* select and store sequences */
121697 +    {   ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
121698 +        size_t lastLLSize;
121699 +        {   int i;
121700 +            for (i = 0; i < ZSTD_REP_NUM; ++i)
121701 +                zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];
121702 +        }
121703 +        if (zc->externSeqStore.pos < zc->externSeqStore.size) {
121704 +            assert(!zc->appliedParams.ldmParams.enableLdm);
121705 +            /* Updates ldmSeqStore.pos */
121706 +            lastLLSize =
121707 +                ZSTD_ldm_blockCompress(&zc->externSeqStore,
121708 +                                       ms, &zc->seqStore,
121709 +                                       zc->blockState.nextCBlock->rep,
121710 +                                       src, srcSize);
121711 +            assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
121712 +        } else if (zc->appliedParams.ldmParams.enableLdm) {
121713 +            rawSeqStore_t ldmSeqStore = kNullRawSeqStore;
121715 +            ldmSeqStore.seq = zc->ldmSequences;
121716 +            ldmSeqStore.capacity = zc->maxNbLdmSequences;
121717 +            /* Updates ldmSeqStore.size */
121718 +            FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
121719 +                                               &zc->appliedParams.ldmParams,
121720 +                                               src, srcSize), "");
121721 +            /* Updates ldmSeqStore.pos */
121722 +            lastLLSize =
121723 +                ZSTD_ldm_blockCompress(&ldmSeqStore,
121724 +                                       ms, &zc->seqStore,
121725 +                                       zc->blockState.nextCBlock->rep,
121726 +                                       src, srcSize);
121727 +            assert(ldmSeqStore.pos == ldmSeqStore.size);
121728 +        } else {   /* not long range mode */
121729 +            ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode);
121730 +            ms->ldmSeqStore = NULL;
121731 +            lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
121732 +        }
121733 +        {   const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
121734 +            ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
121735 +    }   }
121736 +    return ZSTDbss_compress;
121739 +static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
121741 +    const seqStore_t* seqStore = ZSTD_getSeqStore(zc);
121742 +    const seqDef* seqStoreSeqs = seqStore->sequencesStart;
121743 +    size_t seqStoreSeqSize = seqStore->sequences - seqStoreSeqs;
121744 +    size_t seqStoreLiteralsSize = (size_t)(seqStore->lit - seqStore->litStart);
121745 +    size_t literalsRead = 0;
121746 +    size_t lastLLSize;
121748 +    ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex];
121749 +    size_t i;
121750 +    repcodes_t updatedRepcodes;
121752 +    assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences);
121753 +    /* Ensure we have enough space for last literals "sequence" */
121754 +    assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1);
121755 +    ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
121756 +    for (i = 0; i < seqStoreSeqSize; ++i) {
121757 +        U32 rawOffset = seqStoreSeqs[i].offset - ZSTD_REP_NUM;
121758 +        outSeqs[i].litLength = seqStoreSeqs[i].litLength;
121759 +        outSeqs[i].matchLength = seqStoreSeqs[i].matchLength + MINMATCH;
121760 +        outSeqs[i].rep = 0;
121762 +        if (i == seqStore->longLengthPos) {
121763 +            if (seqStore->longLengthID == 1) {
121764 +                outSeqs[i].litLength += 0x10000;
121765 +            } else if (seqStore->longLengthID == 2) {
121766 +                outSeqs[i].matchLength += 0x10000;
121767 +            }
121768 +        }
121770 +        if (seqStoreSeqs[i].offset <= ZSTD_REP_NUM) {
121771 +            /* Derive the correct offset corresponding to a repcode */
121772 +            outSeqs[i].rep = seqStoreSeqs[i].offset;
121773 +            if (outSeqs[i].litLength != 0) {
121774 +                rawOffset = updatedRepcodes.rep[outSeqs[i].rep - 1];
121775 +            } else {
121776 +                if (outSeqs[i].rep == 3) {
121777 +                    rawOffset = updatedRepcodes.rep[0] - 1;
121778 +                } else {
121779 +                    rawOffset = updatedRepcodes.rep[outSeqs[i].rep];
121780 +                }
121781 +            }
121782 +        }
121783 +        outSeqs[i].offset = rawOffset;
121784 +        /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode
121785 +           so we provide seqStoreSeqs[i].offset - 1 */
121786 +        updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep,
121787 +                                         seqStoreSeqs[i].offset - 1,
121788 +                                         seqStoreSeqs[i].litLength == 0);
121789 +        literalsRead += outSeqs[i].litLength;
121790 +    }
121791 +    /* Insert last literals (if any exist) in the block as a sequence with ml == off == 0.
121792 +     * If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker
121793 +     * for the block boundary, according to the API.
121794 +     */
121795 +    assert(seqStoreLiteralsSize >= literalsRead);
121796 +    lastLLSize = seqStoreLiteralsSize - literalsRead;
121797 +    outSeqs[i].litLength = (U32)lastLLSize;
121798 +    outSeqs[i].matchLength = outSeqs[i].offset = outSeqs[i].rep = 0;
121799 +    seqStoreSeqSize++;
121800 +    zc->seqCollector.seqIndex += seqStoreSeqSize;
121803 +size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
121804 +                              size_t outSeqsSize, const void* src, size_t srcSize)
121806 +    const size_t dstCapacity = ZSTD_compressBound(srcSize);
121807 +    void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem);
121808 +    SeqCollector seqCollector;
121810 +    RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!");
121812 +    seqCollector.collectSequences = 1;
121813 +    seqCollector.seqStart = outSeqs;
121814 +    seqCollector.seqIndex = 0;
121815 +    seqCollector.maxSequences = outSeqsSize;
121816 +    zc->seqCollector = seqCollector;
121818 +    ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
121819 +    ZSTD_customFree(dst, ZSTD_defaultCMem);
121820 +    return zc->seqCollector.seqIndex;
121823 +size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize) {
121824 +    size_t in = 0;
121825 +    size_t out = 0;
121826 +    for (; in < seqsSize; ++in) {
121827 +        if (sequences[in].offset == 0 && sequences[in].matchLength == 0) {
121828 +            if (in != seqsSize - 1) {
121829 +                sequences[in+1].litLength += sequences[in].litLength;
121830 +            }
121831 +        } else {
121832 +            sequences[out] = sequences[in];
121833 +            ++out;
121834 +        }
121835 +    }
121836 +    return out;
121839 +/* Unrolled loop to read four size_ts of input at a time. Returns 1 if is RLE, 0 if not. */
121840 +static int ZSTD_isRLE(const BYTE* src, size_t length) {
121841 +    const BYTE* ip = src;
121842 +    const BYTE value = ip[0];
121843 +    const size_t valueST = (size_t)((U64)value * 0x0101010101010101ULL);
121844 +    const size_t unrollSize = sizeof(size_t) * 4;
121845 +    const size_t unrollMask = unrollSize - 1;
121846 +    const size_t prefixLength = length & unrollMask;
121847 +    size_t i;
121848 +    size_t u;
121849 +    if (length == 1) return 1;
121850 +    /* Check if prefix is RLE first before using unrolled loop */
121851 +    if (prefixLength && ZSTD_count(ip+1, ip, ip+prefixLength) != prefixLength-1) {
121852 +        return 0;
121853 +    }
121854 +    for (i = prefixLength; i != length; i += unrollSize) {
121855 +        for (u = 0; u < unrollSize; u += sizeof(size_t)) {
121856 +            if (MEM_readST(ip + i + u) != valueST) {
121857 +                return 0;
121858 +            }
121859 +        }
121860 +    }
121861 +    return 1;
121864 +/* Returns true if the given block may be RLE.
121865 + * This is just a heuristic based on the compressibility.
121866 + * It may return both false positives and false negatives.
121867 + */
121868 +static int ZSTD_maybeRLE(seqStore_t const* seqStore)
121870 +    size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
121871 +    size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart);
121873 +    return nbSeqs < 4 && nbLits < 10;
121876 +static void ZSTD_confirmRepcodesAndEntropyTables(ZSTD_CCtx* zc)
121878 +    ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
121879 +    zc->blockState.prevCBlock = zc->blockState.nextCBlock;
121880 +    zc->blockState.nextCBlock = tmp;
121883 +static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
121884 +                                        void* dst, size_t dstCapacity,
121885 +                                        const void* src, size_t srcSize, U32 frame)
121887 +    /* This the upper bound for the length of an rle block.
121888 +     * This isn't the actual upper bound. Finding the real threshold
121889 +     * needs further investigation.
121890 +     */
121891 +    const U32 rleMaxLength = 25;
121892 +    size_t cSize;
121893 +    const BYTE* ip = (const BYTE*)src;
121894 +    BYTE* op = (BYTE*)dst;
121895 +    DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
121896 +                (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
121897 +                (unsigned)zc->blockState.matchState.nextToUpdate);
121899 +    {   const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
121900 +        FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
121901 +        if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; }
121902 +    }
121904 +    if (zc->seqCollector.collectSequences) {
121905 +        ZSTD_copyBlockSequences(zc);
121906 +        ZSTD_confirmRepcodesAndEntropyTables(zc);
121907 +        return 0;
121908 +    }
121910 +    /* encode sequences and literals */
121911 +    cSize = ZSTD_entropyCompressSequences(&zc->seqStore,
121912 +            &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
121913 +            &zc->appliedParams,
121914 +            dst, dstCapacity,
121915 +            srcSize,
121916 +            zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
121917 +            zc->bmi2);
121919 +    if (zc->seqCollector.collectSequences) {
121920 +        ZSTD_copyBlockSequences(zc);
121921 +        return 0;
121922 +    }
121925 +    if (frame &&
121926 +        /* We don't want to emit our first block as a RLE even if it qualifies because
121927 +         * doing so will cause the decoder (cli only) to throw a "should consume all input error."
121928 +         * This is only an issue for zstd <= v1.4.3
121929 +         */
121930 +        !zc->isFirstBlock &&
121931 +        cSize < rleMaxLength &&
121932 +        ZSTD_isRLE(ip, srcSize))
121933 +    {
121934 +        cSize = 1;
121935 +        op[0] = ip[0];
121936 +    }
121938 +out:
121939 +    if (!ZSTD_isError(cSize) && cSize > 1) {
121940 +        ZSTD_confirmRepcodesAndEntropyTables(zc);
121941 +    }
121942 +    /* We check that dictionaries have offset codes available for the first
121943 +     * block. After the first block, the offcode table might not have large
121944 +     * enough codes to represent the offsets in the data.
121945 +     */
121946 +    if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
121947 +        zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
121949 +    return cSize;
121952 +static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,
121953 +                               void* dst, size_t dstCapacity,
121954 +                               const void* src, size_t srcSize,
121955 +                               const size_t bss, U32 lastBlock)
121957 +    DEBUGLOG(6, "Attempting ZSTD_compressSuperBlock()");
121958 +    if (bss == ZSTDbss_compress) {
121959 +        if (/* We don't want to emit our first block as a RLE even if it qualifies because
121960 +            * doing so will cause the decoder (cli only) to throw a "should consume all input error."
121961 +            * This is only an issue for zstd <= v1.4.3
121962 +            */
121963 +            !zc->isFirstBlock &&
121964 +            ZSTD_maybeRLE(&zc->seqStore) &&
121965 +            ZSTD_isRLE((BYTE const*)src, srcSize))
121966 +        {
121967 +            return ZSTD_rleCompressBlock(dst, dstCapacity, *(BYTE const*)src, srcSize, lastBlock);
121968 +        }
121969 +        /* Attempt superblock compression.
121970 +         *
121971 +         * Note that compressed size of ZSTD_compressSuperBlock() is not bound by the
121972 +         * standard ZSTD_compressBound(). This is a problem, because even if we have
121973 +         * space now, taking an extra byte now could cause us to run out of space later
121974 +         * and violate ZSTD_compressBound().
121975 +         *
121976 +         * Define blockBound(blockSize) = blockSize + ZSTD_blockHeaderSize.
121977 +         *
121978 +         * In order to respect ZSTD_compressBound() we must attempt to emit a raw
121979 +         * uncompressed block in these cases:
121980 +         *   * cSize == 0: Return code for an uncompressed block.
121981 +         *   * cSize == dstSize_tooSmall: We may have expanded beyond blockBound(srcSize).
121982 +         *     ZSTD_noCompressBlock() will return dstSize_tooSmall if we are really out of
121983 +         *     output space.
121984 +         *   * cSize >= blockBound(srcSize): We have expanded the block too much so
121985 +         *     emit an uncompressed block.
121986 +         */
121987 +        {
121988 +            size_t const cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock);
121989 +            if (cSize != ERROR(dstSize_tooSmall)) {
121990 +                size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy);
121991 +                FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed");
121992 +                if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) {
121993 +                    ZSTD_confirmRepcodesAndEntropyTables(zc);
121994 +                    return cSize;
121995 +                }
121996 +            }
121997 +        }
121998 +    }
122000 +    DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()");
122001 +    /* Superblock compression failed, attempt to emit a single no compress block.
122002 +     * The decoder will be able to stream this block since it is uncompressed.
122003 +     */
122004 +    return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock);
122007 +static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc,
122008 +                               void* dst, size_t dstCapacity,
122009 +                               const void* src, size_t srcSize,
122010 +                               U32 lastBlock)
122012 +    size_t cSize = 0;
122013 +    const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
122014 +    DEBUGLOG(5, "ZSTD_compressBlock_targetCBlockSize (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u, srcSize=%zu)",
122015 +                (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate, srcSize);
122016 +    FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
122018 +    cSize = ZSTD_compressBlock_targetCBlockSize_body(zc, dst, dstCapacity, src, srcSize, bss, lastBlock);
122019 +    FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize_body failed");
122021 +    if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
122022 +        zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
122024 +    return cSize;
122027 +static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
122028 +                                         ZSTD_cwksp* ws,
122029 +                                         ZSTD_CCtx_params const* params,
122030 +                                         void const* ip,
122031 +                                         void const* iend)
122033 +    if (ZSTD_window_needOverflowCorrection(ms->window, iend)) {
122034 +        U32 const maxDist = (U32)1 << params->cParams.windowLog;
122035 +        U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);
122036 +        U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
122037 +        ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
122038 +        ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
122039 +        ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
122040 +        ZSTD_cwksp_mark_tables_dirty(ws);
122041 +        ZSTD_reduceIndex(ms, params, correction);
122042 +        ZSTD_cwksp_mark_tables_clean(ws);
122043 +        if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
122044 +        else ms->nextToUpdate -= correction;
122045 +        /* invalidate dictionaries on overflow correction */
122046 +        ms->loadedDictEnd = 0;
122047 +        ms->dictMatchState = NULL;
122048 +    }
122051 +/*! ZSTD_compress_frameChunk() :
122052 +*   Compress a chunk of data into one or multiple blocks.
122053 +*   All blocks will be terminated, all input will be consumed.
122054 +*   Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
122055 +*   Frame is supposed already started (header already produced)
122056 +*   @return : compressed size, or an error code
122058 +static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
122059 +                                     void* dst, size_t dstCapacity,
122060 +                               const void* src, size_t srcSize,
122061 +                                     U32 lastFrameChunk)
122063 +    size_t blockSize = cctx->blockSize;
122064 +    size_t remaining = srcSize;
122065 +    const BYTE* ip = (const BYTE*)src;
122066 +    BYTE* const ostart = (BYTE*)dst;
122067 +    BYTE* op = ostart;
122068 +    U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
122070 +    assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX);
122072 +    DEBUGLOG(4, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
122073 +    if (cctx->appliedParams.fParams.checksumFlag && srcSize)
122074 +        xxh64_update(&cctx->xxhState, src, srcSize);
122076 +    while (remaining) {
122077 +        ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
122078 +        U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
122080 +        RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
122081 +                        dstSize_tooSmall,
122082 +                        "not enough space to store compressed block");
122083 +        if (remaining < blockSize) blockSize = remaining;
122085 +        ZSTD_overflowCorrectIfNeeded(
122086 +            ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize);
122087 +        ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
122089 +        /* Ensure hash/chain table insertion resumes no sooner than lowlimit */
122090 +        if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
122092 +        {   size_t cSize;
122093 +            if (ZSTD_useTargetCBlockSize(&cctx->appliedParams)) {
122094 +                cSize = ZSTD_compressBlock_targetCBlockSize(cctx, op, dstCapacity, ip, blockSize, lastBlock);
122095 +                FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize failed");
122096 +                assert(cSize > 0);
122097 +                assert(cSize <= blockSize + ZSTD_blockHeaderSize);
122098 +            } else {
122099 +                cSize = ZSTD_compressBlock_internal(cctx,
122100 +                                        op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
122101 +                                        ip, blockSize, 1 /* frame */);
122102 +                FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_internal failed");
122104 +                if (cSize == 0) {  /* block is not compressible */
122105 +                    cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
122106 +                    FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
122107 +                } else {
122108 +                    U32 const cBlockHeader = cSize == 1 ?
122109 +                        lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
122110 +                        lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
122111 +                    MEM_writeLE24(op, cBlockHeader);
122112 +                    cSize += ZSTD_blockHeaderSize;
122113 +                }
122114 +            }
122117 +            ip += blockSize;
122118 +            assert(remaining >= blockSize);
122119 +            remaining -= blockSize;
122120 +            op += cSize;
122121 +            assert(dstCapacity >= cSize);
122122 +            dstCapacity -= cSize;
122123 +            cctx->isFirstBlock = 0;
122124 +            DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
122125 +                        (unsigned)cSize);
122126 +    }   }
122128 +    if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
122129 +    return (size_t)(op-ostart);
122133 +static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
122134 +                                    const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID)
122135 +{   BYTE* const op = (BYTE*)dst;
122136 +    U32   const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536);   /* 0-3 */
122137 +    U32   const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength;   /* 0-3 */
122138 +    U32   const checksumFlag = params->fParams.checksumFlag>0;
122139 +    U32   const windowSize = (U32)1 << params->cParams.windowLog;
122140 +    U32   const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
122141 +    BYTE  const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
122142 +    U32   const fcsCode = params->fParams.contentSizeFlag ?
122143 +                     (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0;  /* 0-3 */
122144 +    BYTE  const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
122145 +    size_t pos=0;
122147 +    assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
122148 +    RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall,
122149 +                    "dst buf is too small to fit worst-case frame header size.");
122150 +    DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
122151 +                !params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
122152 +    if (params->format == ZSTD_f_zstd1) {
122153 +        MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
122154 +        pos = 4;
122155 +    }
122156 +    op[pos++] = frameHeaderDescriptionByte;
122157 +    if (!singleSegment) op[pos++] = windowLogByte;
122158 +    switch(dictIDSizeCode)
122159 +    {
122160 +        default:  assert(0); /* impossible */
122161 +        case 0 : break;
122162 +        case 1 : op[pos] = (BYTE)(dictID); pos++; break;
122163 +        case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
122164 +        case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
122165 +    }
122166 +    switch(fcsCode)
122167 +    {
122168 +        default:  assert(0); /* impossible */
122169 +        case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
122170 +        case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
122171 +        case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
122172 +        case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
122173 +    }
122174 +    return pos;
122177 +/* ZSTD_writeSkippableFrame_advanced() :
122178 + * Writes out a skippable frame with the specified magic number variant (16 are supported),
122179 + * from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15, and the desired source data.
122181 + * Returns the total number of bytes written, or a ZSTD error code.
122182 + */
122183 +size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
122184 +                                const void* src, size_t srcSize, unsigned magicVariant) {
122185 +    BYTE* op = (BYTE*)dst;
122186 +    RETURN_ERROR_IF(dstCapacity < srcSize + ZSTD_SKIPPABLEHEADERSIZE /* Skippable frame overhead */,
122187 +                    dstSize_tooSmall, "Not enough room for skippable frame");
122188 +    RETURN_ERROR_IF(srcSize > (unsigned)0xFFFFFFFF, srcSize_wrong, "Src size too large for skippable frame");
122189 +    RETURN_ERROR_IF(magicVariant > 15, parameter_outOfBound, "Skippable frame magic number variant not supported");
122191 +    MEM_writeLE32(op, (U32)(ZSTD_MAGIC_SKIPPABLE_START + magicVariant));
122192 +    MEM_writeLE32(op+4, (U32)srcSize);
122193 +    ZSTD_memcpy(op+8, src, srcSize);
122194 +    return srcSize + ZSTD_SKIPPABLEHEADERSIZE;
122197 +/* ZSTD_writeLastEmptyBlock() :
122198 + * output an empty Block with end-of-frame mark to complete a frame
122199 + * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
122200 + *           or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
122201 + */
122202 +size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
122204 +    RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall,
122205 +                    "dst buf is too small to write frame trailer empty block.");
122206 +    {   U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1);  /* 0 size */
122207 +        MEM_writeLE24(dst, cBlockHeader24);
122208 +        return ZSTD_blockHeaderSize;
122209 +    }
122212 +size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
122214 +    RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong,
122215 +                    "wrong cctx stage");
122216 +    RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm,
122217 +                    parameter_unsupported,
122218 +                    "incompatible with ldm");
122219 +    cctx->externSeqStore.seq = seq;
122220 +    cctx->externSeqStore.size = nbSeq;
122221 +    cctx->externSeqStore.capacity = nbSeq;
122222 +    cctx->externSeqStore.pos = 0;
122223 +    cctx->externSeqStore.posInSequence = 0;
122224 +    return 0;
122228 +static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
122229 +                              void* dst, size_t dstCapacity,
122230 +                        const void* src, size_t srcSize,
122231 +                               U32 frame, U32 lastFrameChunk)
122233 +    ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
122234 +    size_t fhSize = 0;
122236 +    DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
122237 +                cctx->stage, (unsigned)srcSize);
122238 +    RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong,
122239 +                    "missing init (ZSTD_compressBegin)");
122241 +    if (frame && (cctx->stage==ZSTDcs_init)) {
122242 +        fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams,
122243 +                                       cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
122244 +        FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
122245 +        assert(fhSize <= dstCapacity);
122246 +        dstCapacity -= fhSize;
122247 +        dst = (char*)dst + fhSize;
122248 +        cctx->stage = ZSTDcs_ongoing;
122249 +    }
122251 +    if (!srcSize) return fhSize;  /* do not generate an empty block if no input */
122253 +    if (!ZSTD_window_update(&ms->window, src, srcSize)) {
122254 +        ms->nextToUpdate = ms->window.dictLimit;
122255 +    }
122256 +    if (cctx->appliedParams.ldmParams.enableLdm) {
122257 +        ZSTD_window_update(&cctx->ldmState.window, src, srcSize);
122258 +    }
122260 +    if (!frame) {
122261 +        /* overflow check and correction for block mode */
122262 +        ZSTD_overflowCorrectIfNeeded(
122263 +            ms, &cctx->workspace, &cctx->appliedParams,
122264 +            src, (BYTE const*)src + srcSize);
122265 +    }
122267 +    DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
122268 +    {   size_t const cSize = frame ?
122269 +                             ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
122270 +                             ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */);
122271 +        FORWARD_IF_ERROR(cSize, "%s", frame ? "ZSTD_compress_frameChunk failed" : "ZSTD_compressBlock_internal failed");
122272 +        cctx->consumedSrcSize += srcSize;
122273 +        cctx->producedCSize += (cSize + fhSize);
122274 +        assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
122275 +        if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */
122276 +            ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
122277 +            RETURN_ERROR_IF(
122278 +                cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne,
122279 +                srcSize_wrong,
122280 +                "error : pledgedSrcSize = %u, while realSrcSize >= %u",
122281 +                (unsigned)cctx->pledgedSrcSizePlusOne-1,
122282 +                (unsigned)cctx->consumedSrcSize);
122283 +        }
122284 +        return cSize + fhSize;
122285 +    }
122288 +size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
122289 +                              void* dst, size_t dstCapacity,
122290 +                        const void* src, size_t srcSize)
122292 +    DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
122293 +    return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
122297 +size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
122299 +    ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
122300 +    assert(!ZSTD_checkCParams(cParams));
122301 +    return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
122304 +size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
122306 +    DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize);
122307 +    { size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
122308 +      RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); }
122310 +    return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
122313 +/*! ZSTD_loadDictionaryContent() :
122314 + *  @return : 0, or an error code
122315 + */
122316 +static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
122317 +                                         ldmState_t* ls,
122318 +                                         ZSTD_cwksp* ws,
122319 +                                         ZSTD_CCtx_params const* params,
122320 +                                         const void* src, size_t srcSize,
122321 +                                         ZSTD_dictTableLoadMethod_e dtlm)
122323 +    const BYTE* ip = (const BYTE*) src;
122324 +    const BYTE* const iend = ip + srcSize;
122326 +    ZSTD_window_update(&ms->window, src, srcSize);
122327 +    ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
122329 +    if (params->ldmParams.enableLdm && ls != NULL) {
122330 +        ZSTD_window_update(&ls->window, src, srcSize);
122331 +        ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base);
122332 +    }
122334 +    /* Assert that we the ms params match the params we're being given */
122335 +    ZSTD_assertEqualCParams(params->cParams, ms->cParams);
122337 +    if (srcSize <= HASH_READ_SIZE) return 0;
122339 +    while (iend - ip > HASH_READ_SIZE) {
122340 +        size_t const remaining = (size_t)(iend - ip);
122341 +        size_t const chunk = MIN(remaining, ZSTD_CHUNKSIZE_MAX);
122342 +        const BYTE* const ichunk = ip + chunk;
122344 +        ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, ichunk);
122346 +        if (params->ldmParams.enableLdm && ls != NULL)
122347 +            ZSTD_ldm_fillHashTable(ls, (const BYTE*)src, (const BYTE*)src + srcSize, &params->ldmParams);
122349 +        switch(params->cParams.strategy)
122350 +        {
122351 +        case ZSTD_fast:
122352 +            ZSTD_fillHashTable(ms, ichunk, dtlm);
122353 +            break;
122354 +        case ZSTD_dfast:
122355 +            ZSTD_fillDoubleHashTable(ms, ichunk, dtlm);
122356 +            break;
122358 +        case ZSTD_greedy:
122359 +        case ZSTD_lazy:
122360 +        case ZSTD_lazy2:
122361 +            if (chunk >= HASH_READ_SIZE && ms->dedicatedDictSearch) {
122362 +                assert(chunk == remaining); /* must load everything in one go */
122363 +                ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, ichunk-HASH_READ_SIZE);
122364 +            } else if (chunk >= HASH_READ_SIZE) {
122365 +                ZSTD_insertAndFindFirstIndex(ms, ichunk-HASH_READ_SIZE);
122366 +            }
122367 +            break;
122369 +        case ZSTD_btlazy2:   /* we want the dictionary table fully sorted */
122370 +        case ZSTD_btopt:
122371 +        case ZSTD_btultra:
122372 +        case ZSTD_btultra2:
122373 +            if (chunk >= HASH_READ_SIZE)
122374 +                ZSTD_updateTree(ms, ichunk-HASH_READ_SIZE, ichunk);
122375 +            break;
122377 +        default:
122378 +            assert(0);  /* not possible : not a valid strategy id */
122379 +        }
122381 +        ip = ichunk;
122382 +    }
122384 +    ms->nextToUpdate = (U32)(iend - ms->window.base);
122385 +    return 0;
122389 +/* Dictionaries that assign zero probability to symbols that show up causes problems
122390 + * when FSE encoding. Mark dictionaries with zero probability symbols as FSE_repeat_check
122391 + * and only dictionaries with 100% valid symbols can be assumed valid.
122392 + */
122393 +static FSE_repeat ZSTD_dictNCountRepeat(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue)
122395 +    U32 s;
122396 +    if (dictMaxSymbolValue < maxSymbolValue) {
122397 +        return FSE_repeat_check;
122398 +    }
122399 +    for (s = 0; s <= maxSymbolValue; ++s) {
122400 +        if (normalizedCounter[s] == 0) {
122401 +            return FSE_repeat_check;
122402 +        }
122403 +    }
122404 +    return FSE_repeat_valid;
122407 +size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
122408 +                         const void* const dict, size_t dictSize)
122410 +    short offcodeNCount[MaxOff+1];
122411 +    unsigned offcodeMaxValue = MaxOff;
122412 +    const BYTE* dictPtr = (const BYTE*)dict;    /* skip magic num and dict ID */
122413 +    const BYTE* const dictEnd = dictPtr + dictSize;
122414 +    dictPtr += 8;
122415 +    bs->entropy.huf.repeatMode = HUF_repeat_check;
122417 +    {   unsigned maxSymbolValue = 255;
122418 +        unsigned hasZeroWeights = 1;
122419 +        size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr,
122420 +            dictEnd-dictPtr, &hasZeroWeights);
122422 +        /* We only set the loaded table as valid if it contains all non-zero
122423 +         * weights. Otherwise, we set it to check */
122424 +        if (!hasZeroWeights)
122425 +            bs->entropy.huf.repeatMode = HUF_repeat_valid;
122427 +        RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, "");
122428 +        RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, "");
122429 +        dictPtr += hufHeaderSize;
122430 +    }
122432 +    {   unsigned offcodeLog;
122433 +        size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
122434 +        RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
122435 +        RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
122436 +        /* fill all offset symbols to avoid garbage at end of table */
122437 +        RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
122438 +                bs->entropy.fse.offcodeCTable,
122439 +                offcodeNCount, MaxOff, offcodeLog,
122440 +                workspace, HUF_WORKSPACE_SIZE)),
122441 +            dictionary_corrupted, "");
122442 +        /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
122443 +        dictPtr += offcodeHeaderSize;
122444 +    }
122446 +    {   short matchlengthNCount[MaxML+1];
122447 +        unsigned matchlengthMaxValue = MaxML, matchlengthLog;
122448 +        size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
122449 +        RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
122450 +        RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
122451 +        RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
122452 +                bs->entropy.fse.matchlengthCTable,
122453 +                matchlengthNCount, matchlengthMaxValue, matchlengthLog,
122454 +                workspace, HUF_WORKSPACE_SIZE)),
122455 +            dictionary_corrupted, "");
122456 +        bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat(matchlengthNCount, matchlengthMaxValue, MaxML);
122457 +        dictPtr += matchlengthHeaderSize;
122458 +    }
122460 +    {   short litlengthNCount[MaxLL+1];
122461 +        unsigned litlengthMaxValue = MaxLL, litlengthLog;
122462 +        size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
122463 +        RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
122464 +        RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
122465 +        RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
122466 +                bs->entropy.fse.litlengthCTable,
122467 +                litlengthNCount, litlengthMaxValue, litlengthLog,
122468 +                workspace, HUF_WORKSPACE_SIZE)),
122469 +            dictionary_corrupted, "");
122470 +        bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat(litlengthNCount, litlengthMaxValue, MaxLL);
122471 +        dictPtr += litlengthHeaderSize;
122472 +    }
122474 +    RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
122475 +    bs->rep[0] = MEM_readLE32(dictPtr+0);
122476 +    bs->rep[1] = MEM_readLE32(dictPtr+4);
122477 +    bs->rep[2] = MEM_readLE32(dictPtr+8);
122478 +    dictPtr += 12;
122480 +    {   size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
122481 +        U32 offcodeMax = MaxOff;
122482 +        if (dictContentSize <= ((U32)-1) - 128 KB) {
122483 +            U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
122484 +            offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
122485 +        }
122486 +        /* All offset values <= dictContentSize + 128 KB must be representable for a valid table */
122487 +        bs->entropy.fse.offcode_repeatMode = ZSTD_dictNCountRepeat(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff));
122489 +        /* All repCodes must be <= dictContentSize and != 0 */
122490 +        {   U32 u;
122491 +            for (u=0; u<3; u++) {
122492 +                RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted, "");
122493 +                RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, "");
122494 +    }   }   }
122496 +    return dictPtr - (const BYTE*)dict;
122499 +/* Dictionary format :
122500 + * See :
122501 + * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#dictionary-format
122502 + */
122503 +/*! ZSTD_loadZstdDictionary() :
122504 + * @return : dictID, or an error code
122505 + *  assumptions : magic number supposed already checked
122506 + *                dictSize supposed >= 8
122507 + */
122508 +static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
122509 +                                      ZSTD_matchState_t* ms,
122510 +                                      ZSTD_cwksp* ws,
122511 +                                      ZSTD_CCtx_params const* params,
122512 +                                      const void* dict, size_t dictSize,
122513 +                                      ZSTD_dictTableLoadMethod_e dtlm,
122514 +                                      void* workspace)
122516 +    const BYTE* dictPtr = (const BYTE*)dict;
122517 +    const BYTE* const dictEnd = dictPtr + dictSize;
122518 +    size_t dictID;
122519 +    size_t eSize;
122521 +    ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
122522 +    assert(dictSize >= 8);
122523 +    assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
122525 +    dictID = params->fParams.noDictIDFlag ? 0 :  MEM_readLE32(dictPtr + 4 /* skip magic number */ );
122526 +    eSize = ZSTD_loadCEntropy(bs, workspace, dict, dictSize);
122527 +    FORWARD_IF_ERROR(eSize, "ZSTD_loadCEntropy failed");
122528 +    dictPtr += eSize;
122530 +    {
122531 +        size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
122532 +        FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(
122533 +            ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), "");
122534 +    }
122535 +    return dictID;
122538 +/** ZSTD_compress_insertDictionary() :
122539 +*   @return : dictID, or an error code */
122540 +static size_t
122541 +ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
122542 +                               ZSTD_matchState_t* ms,
122543 +                               ldmState_t* ls,
122544 +                               ZSTD_cwksp* ws,
122545 +                         const ZSTD_CCtx_params* params,
122546 +                         const void* dict, size_t dictSize,
122547 +                               ZSTD_dictContentType_e dictContentType,
122548 +                               ZSTD_dictTableLoadMethod_e dtlm,
122549 +                               void* workspace)
122551 +    DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
122552 +    if ((dict==NULL) || (dictSize<8)) {
122553 +        RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
122554 +        return 0;
122555 +    }
122557 +    ZSTD_reset_compressedBlockState(bs);
122559 +    /* dict restricted modes */
122560 +    if (dictContentType == ZSTD_dct_rawContent)
122561 +        return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm);
122563 +    if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
122564 +        if (dictContentType == ZSTD_dct_auto) {
122565 +            DEBUGLOG(4, "raw content dictionary detected");
122566 +            return ZSTD_loadDictionaryContent(
122567 +                ms, ls, ws, params, dict, dictSize, dtlm);
122568 +        }
122569 +        RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
122570 +        assert(0);   /* impossible */
122571 +    }
122573 +    /* dict as full zstd dictionary */
122574 +    return ZSTD_loadZstdDictionary(
122575 +        bs, ms, ws, params, dict, dictSize, dtlm, workspace);
122578 +#define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB)
122579 +#define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL)
122581 +/*! ZSTD_compressBegin_internal() :
122582 + * @return : 0, or an error code */
122583 +static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
122584 +                                    const void* dict, size_t dictSize,
122585 +                                    ZSTD_dictContentType_e dictContentType,
122586 +                                    ZSTD_dictTableLoadMethod_e dtlm,
122587 +                                    const ZSTD_CDict* cdict,
122588 +                                    const ZSTD_CCtx_params* params, U64 pledgedSrcSize,
122589 +                                    ZSTD_buffered_policy_e zbuff)
122591 +    DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog);
122592 +    /* params are supposed to be fully validated at this point */
122593 +    assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
122594 +    assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
122595 +    if ( (cdict)
122596 +      && (cdict->dictContentSize > 0)
122597 +      && ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
122598 +        || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
122599 +        || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
122600 +        || cdict->compressionLevel == 0)
122601 +      && (params->attachDictPref != ZSTD_dictForceLoad) ) {
122602 +        return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
122603 +    }
122605 +    FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, *params, pledgedSrcSize,
122606 +                                     ZSTDcrp_makeClean, zbuff) , "");
122607 +    {   size_t const dictID = cdict ?
122608 +                ZSTD_compress_insertDictionary(
122609 +                        cctx->blockState.prevCBlock, &cctx->blockState.matchState,
122610 +                        &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent,
122611 +                        cdict->dictContentSize, cdict->dictContentType, dtlm,
122612 +                        cctx->entropyWorkspace)
122613 +              : ZSTD_compress_insertDictionary(
122614 +                        cctx->blockState.prevCBlock, &cctx->blockState.matchState,
122615 +                        &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize,
122616 +                        dictContentType, dtlm, cctx->entropyWorkspace);
122617 +        FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
122618 +        assert(dictID <= UINT_MAX);
122619 +        cctx->dictID = (U32)dictID;
122620 +        cctx->dictContentSize = cdict ? cdict->dictContentSize : dictSize;
122621 +    }
122622 +    return 0;
122625 +size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
122626 +                                    const void* dict, size_t dictSize,
122627 +                                    ZSTD_dictContentType_e dictContentType,
122628 +                                    ZSTD_dictTableLoadMethod_e dtlm,
122629 +                                    const ZSTD_CDict* cdict,
122630 +                                    const ZSTD_CCtx_params* params,
122631 +                                    unsigned long long pledgedSrcSize)
122633 +    DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog);
122634 +    /* compression parameters verification and optimization */
122635 +    FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) , "");
122636 +    return ZSTD_compressBegin_internal(cctx,
122637 +                                       dict, dictSize, dictContentType, dtlm,
122638 +                                       cdict,
122639 +                                       params, pledgedSrcSize,
122640 +                                       ZSTDb_not_buffered);
122643 +/*! ZSTD_compressBegin_advanced() :
122644 +*   @return : 0, or an error code */
122645 +size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
122646 +                             const void* dict, size_t dictSize,
122647 +                                   ZSTD_parameters params, unsigned long long pledgedSrcSize)
122649 +    ZSTD_CCtx_params cctxParams;
122650 +    ZSTD_CCtxParams_init_internal(&cctxParams, &params, ZSTD_NO_CLEVEL);
122651 +    return ZSTD_compressBegin_advanced_internal(cctx,
122652 +                                            dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
122653 +                                            NULL /*cdict*/,
122654 +                                            &cctxParams, pledgedSrcSize);
122657 +size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
122659 +    ZSTD_CCtx_params cctxParams;
122660 +    {
122661 +        ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict);
122662 +        ZSTD_CCtxParams_init_internal(&cctxParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel);
122663 +    }
122664 +    DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
122665 +    return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
122666 +                                       &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
122669 +size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
122671 +    return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
122675 +/*! ZSTD_writeEpilogue() :
122676 +*   Ends a frame.
122677 +*   @return : nb of bytes written into dst (or an error code) */
122678 +static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
122680 +    BYTE* const ostart = (BYTE*)dst;
122681 +    BYTE* op = ostart;
122682 +    size_t fhSize = 0;
122684 +    DEBUGLOG(4, "ZSTD_writeEpilogue");
122685 +    RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
122687 +    /* special case : empty frame */
122688 +    if (cctx->stage == ZSTDcs_init) {
122689 +        fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
122690 +        FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
122691 +        dstCapacity -= fhSize;
122692 +        op += fhSize;
122693 +        cctx->stage = ZSTDcs_ongoing;
122694 +    }
122696 +    if (cctx->stage != ZSTDcs_ending) {
122697 +        /* write one last empty block, make it the "last" block */
122698 +        U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
122699 +        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for epilogue");
122700 +        MEM_writeLE32(op, cBlockHeader24);
122701 +        op += ZSTD_blockHeaderSize;
122702 +        dstCapacity -= ZSTD_blockHeaderSize;
122703 +    }
122705 +    if (cctx->appliedParams.fParams.checksumFlag) {
122706 +        U32 const checksum = (U32) xxh64_digest(&cctx->xxhState);
122707 +        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
122708 +        DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
122709 +        MEM_writeLE32(op, checksum);
122710 +        op += 4;
122711 +    }
122713 +    cctx->stage = ZSTDcs_created;  /* return to "created but no init" status */
122714 +    return op-ostart;
122717 +void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize)
122719 +    (void)cctx;
122720 +    (void)extraCSize;
122723 +size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
122724 +                         void* dst, size_t dstCapacity,
122725 +                   const void* src, size_t srcSize)
122727 +    size_t endResult;
122728 +    size_t const cSize = ZSTD_compressContinue_internal(cctx,
122729 +                                dst, dstCapacity, src, srcSize,
122730 +                                1 /* frame mode */, 1 /* last chunk */);
122731 +    FORWARD_IF_ERROR(cSize, "ZSTD_compressContinue_internal failed");
122732 +    endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
122733 +    FORWARD_IF_ERROR(endResult, "ZSTD_writeEpilogue failed");
122734 +    assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
122735 +    if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */
122736 +        ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
122737 +        DEBUGLOG(4, "end of frame : controlling src size");
122738 +        RETURN_ERROR_IF(
122739 +            cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1,
122740 +            srcSize_wrong,
122741 +             "error : pledgedSrcSize = %u, while realSrcSize = %u",
122742 +            (unsigned)cctx->pledgedSrcSizePlusOne-1,
122743 +            (unsigned)cctx->consumedSrcSize);
122744 +    }
122745 +    ZSTD_CCtx_trace(cctx, endResult);
122746 +    return cSize + endResult;
122749 +size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
122750 +                               void* dst, size_t dstCapacity,
122751 +                         const void* src, size_t srcSize,
122752 +                         const void* dict,size_t dictSize,
122753 +                               ZSTD_parameters params)
122755 +    ZSTD_CCtx_params cctxParams;
122756 +    DEBUGLOG(4, "ZSTD_compress_advanced");
122757 +    FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), "");
122758 +    ZSTD_CCtxParams_init_internal(&cctxParams, &params, ZSTD_NO_CLEVEL);
122759 +    return ZSTD_compress_advanced_internal(cctx,
122760 +                                           dst, dstCapacity,
122761 +                                           src, srcSize,
122762 +                                           dict, dictSize,
122763 +                                           &cctxParams);
122766 +/* Internal */
122767 +size_t ZSTD_compress_advanced_internal(
122768 +        ZSTD_CCtx* cctx,
122769 +        void* dst, size_t dstCapacity,
122770 +        const void* src, size_t srcSize,
122771 +        const void* dict,size_t dictSize,
122772 +        const ZSTD_CCtx_params* params)
122774 +    DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
122775 +    FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
122776 +                         dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
122777 +                         params, srcSize, ZSTDb_not_buffered) , "");
122778 +    return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
122781 +size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
122782 +                               void* dst, size_t dstCapacity,
122783 +                         const void* src, size_t srcSize,
122784 +                         const void* dict, size_t dictSize,
122785 +                               int compressionLevel)
122787 +    ZSTD_CCtx_params cctxParams;
122788 +    {
122789 +        ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0, ZSTD_cpm_noAttachDict);
122790 +        assert(params.fParams.contentSizeFlag == 1);
122791 +        ZSTD_CCtxParams_init_internal(&cctxParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT: compressionLevel);
122792 +    }
122793 +    DEBUGLOG(4, "ZSTD_compress_usingDict (srcSize=%u)", (unsigned)srcSize);
122794 +    return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctxParams);
122797 +size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
122798 +                         void* dst, size_t dstCapacity,
122799 +                   const void* src, size_t srcSize,
122800 +                         int compressionLevel)
122802 +    DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize);
122803 +    assert(cctx != NULL);
122804 +    return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
122807 +size_t ZSTD_compress(void* dst, size_t dstCapacity,
122808 +               const void* src, size_t srcSize,
122809 +                     int compressionLevel)
122811 +    size_t result;
122812 +    ZSTD_CCtx* cctx = ZSTD_createCCtx();
122813 +    RETURN_ERROR_IF(!cctx, memory_allocation, "ZSTD_createCCtx failed");
122814 +    result = ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel);
122815 +    ZSTD_freeCCtx(cctx);
122816 +    return result;
122820 +/* =====  Dictionary API  ===== */
122822 +/*! ZSTD_estimateCDictSize_advanced() :
122823 + *  Estimate amount of memory that will be needed to create a dictionary with following arguments */
122824 +size_t ZSTD_estimateCDictSize_advanced(
122825 +        size_t dictSize, ZSTD_compressionParameters cParams,
122826 +        ZSTD_dictLoadMethod_e dictLoadMethod)
122828 +    DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
122829 +    return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
122830 +         + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
122831 +         + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
122832 +         + (dictLoadMethod == ZSTD_dlm_byRef ? 0
122833 +            : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *))));
122836 +size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
122838 +    ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
122839 +    return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
122842 +size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
122844 +    if (cdict==NULL) return 0;   /* support sizeof on NULL */
122845 +    DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict));
122846 +    /* cdict may be in the workspace */
122847 +    return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict))
122848 +        + ZSTD_cwksp_sizeof(&cdict->workspace);
122851 +static size_t ZSTD_initCDict_internal(
122852 +                    ZSTD_CDict* cdict,
122853 +              const void* dictBuffer, size_t dictSize,
122854 +                    ZSTD_dictLoadMethod_e dictLoadMethod,
122855 +                    ZSTD_dictContentType_e dictContentType,
122856 +                    ZSTD_CCtx_params params)
122858 +    DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType);
122859 +    assert(!ZSTD_checkCParams(params.cParams));
122860 +    cdict->matchState.cParams = params.cParams;
122861 +    cdict->matchState.dedicatedDictSearch = params.enableDedicatedDictSearch;
122862 +    if (cdict->matchState.dedicatedDictSearch && dictSize > ZSTD_CHUNKSIZE_MAX) {
122863 +        cdict->matchState.dedicatedDictSearch = 0;
122864 +    }
122865 +    if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
122866 +        cdict->dictContent = dictBuffer;
122867 +    } else {
122868 +         void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*)));
122869 +        RETURN_ERROR_IF(!internalBuffer, memory_allocation, "NULL pointer!");
122870 +        cdict->dictContent = internalBuffer;
122871 +        ZSTD_memcpy(internalBuffer, dictBuffer, dictSize);
122872 +    }
122873 +    cdict->dictContentSize = dictSize;
122874 +    cdict->dictContentType = dictContentType;
122876 +    cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE);
122879 +    /* Reset the state to no dictionary */
122880 +    ZSTD_reset_compressedBlockState(&cdict->cBlockState);
122881 +    FORWARD_IF_ERROR(ZSTD_reset_matchState(
122882 +        &cdict->matchState,
122883 +        &cdict->workspace,
122884 +        &params.cParams,
122885 +        ZSTDcrp_makeClean,
122886 +        ZSTDirp_reset,
122887 +        ZSTD_resetTarget_CDict), "");
122888 +    /* (Maybe) load the dictionary
122889 +     * Skips loading the dictionary if it is < 8 bytes.
122890 +     */
122891 +    {   params.compressionLevel = ZSTD_CLEVEL_DEFAULT;
122892 +        params.fParams.contentSizeFlag = 1;
122893 +        {   size_t const dictID = ZSTD_compress_insertDictionary(
122894 +                    &cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace,
122895 +                    &params, cdict->dictContent, cdict->dictContentSize,
122896 +                    dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace);
122897 +            FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
122898 +            assert(dictID <= (size_t)(U32)-1);
122899 +            cdict->dictID = (U32)dictID;
122900 +        }
122901 +    }
122903 +    return 0;
122906 +static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize,
122907 +                                      ZSTD_dictLoadMethod_e dictLoadMethod,
122908 +                                      ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
122910 +    if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
122912 +    {   size_t const workspaceSize =
122913 +            ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +
122914 +            ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) +
122915 +            ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0) +
122916 +            (dictLoadMethod == ZSTD_dlm_byRef ? 0
122917 +             : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))));
122918 +        void* const workspace = ZSTD_customMalloc(workspaceSize, customMem);
122919 +        ZSTD_cwksp ws;
122920 +        ZSTD_CDict* cdict;
122922 +        if (!workspace) {
122923 +            ZSTD_customFree(workspace, customMem);
122924 +            return NULL;
122925 +        }
122927 +        ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_dynamic_alloc);
122929 +        cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
122930 +        assert(cdict != NULL);
122931 +        ZSTD_cwksp_move(&cdict->workspace, &ws);
122932 +        cdict->customMem = customMem;
122933 +        cdict->compressionLevel = ZSTD_NO_CLEVEL; /* signals advanced API usage */
122935 +        return cdict;
122936 +    }
122939 +ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
122940 +                                      ZSTD_dictLoadMethod_e dictLoadMethod,
122941 +                                      ZSTD_dictContentType_e dictContentType,
122942 +                                      ZSTD_compressionParameters cParams,
122943 +                                      ZSTD_customMem customMem)
122945 +    ZSTD_CCtx_params cctxParams;
122946 +    ZSTD_memset(&cctxParams, 0, sizeof(cctxParams));
122947 +    ZSTD_CCtxParams_init(&cctxParams, 0);
122948 +    cctxParams.cParams = cParams;
122949 +    cctxParams.customMem = customMem;
122950 +    return ZSTD_createCDict_advanced2(
122951 +        dictBuffer, dictSize,
122952 +        dictLoadMethod, dictContentType,
122953 +        &cctxParams, customMem);
122956 +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2(
122957 +        const void* dict, size_t dictSize,
122958 +        ZSTD_dictLoadMethod_e dictLoadMethod,
122959 +        ZSTD_dictContentType_e dictContentType,
122960 +        const ZSTD_CCtx_params* originalCctxParams,
122961 +        ZSTD_customMem customMem)
122963 +    ZSTD_CCtx_params cctxParams = *originalCctxParams;
122964 +    ZSTD_compressionParameters cParams;
122965 +    ZSTD_CDict* cdict;
122967 +    DEBUGLOG(3, "ZSTD_createCDict_advanced2, mode %u", (unsigned)dictContentType);
122968 +    if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
122970 +    if (cctxParams.enableDedicatedDictSearch) {
122971 +        cParams = ZSTD_dedicatedDictSearch_getCParams(
122972 +            cctxParams.compressionLevel, dictSize);
122973 +        ZSTD_overrideCParams(&cParams, &cctxParams.cParams);
122974 +    } else {
122975 +        cParams = ZSTD_getCParamsFromCCtxParams(
122976 +            &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
122977 +    }
122979 +    if (!ZSTD_dedicatedDictSearch_isSupported(&cParams)) {
122980 +        /* Fall back to non-DDSS params */
122981 +        cctxParams.enableDedicatedDictSearch = 0;
122982 +        cParams = ZSTD_getCParamsFromCCtxParams(
122983 +            &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
122984 +    }
122986 +    cctxParams.cParams = cParams;
122988 +    cdict = ZSTD_createCDict_advanced_internal(dictSize,
122989 +                        dictLoadMethod, cctxParams.cParams,
122990 +                        customMem);
122992 +    if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
122993 +                                    dict, dictSize,
122994 +                                    dictLoadMethod, dictContentType,
122995 +                                    cctxParams) )) {
122996 +        ZSTD_freeCDict(cdict);
122997 +        return NULL;
122998 +    }
123000 +    return cdict;
123003 +ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
123005 +    ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
123006 +    ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
123007 +                                                  ZSTD_dlm_byCopy, ZSTD_dct_auto,
123008 +                                                  cParams, ZSTD_defaultCMem);
123009 +    if (cdict)
123010 +        cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
123011 +    return cdict;
123014 +ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
123016 +    ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
123017 +    ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
123018 +                                     ZSTD_dlm_byRef, ZSTD_dct_auto,
123019 +                                     cParams, ZSTD_defaultCMem);
123020 +    if (cdict)
123021 +        cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
123022 +    return cdict;
123025 +size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
123027 +    if (cdict==NULL) return 0;   /* support free on NULL */
123028 +    {   ZSTD_customMem const cMem = cdict->customMem;
123029 +        int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict);
123030 +        ZSTD_cwksp_free(&cdict->workspace, cMem);
123031 +        if (!cdictInWorkspace) {
123032 +            ZSTD_customFree(cdict, cMem);
123033 +        }
123034 +        return 0;
123035 +    }
123038 +/*! ZSTD_initStaticCDict_advanced() :
123039 + *  Generate a digested dictionary in provided memory area.
123040 + *  workspace: The memory area to emplace the dictionary into.
123041 + *             Provided pointer must 8-bytes aligned.
123042 + *             It must outlive dictionary usage.
123043 + *  workspaceSize: Use ZSTD_estimateCDictSize()
123044 + *                 to determine how large workspace must be.
123045 + *  cParams : use ZSTD_getCParams() to transform a compression level
123046 + *            into its relevants cParams.
123047 + * @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
123048 + *  Note : there is no corresponding "free" function.
123049 + *         Since workspace was allocated externally, it must be freed externally.
123050 + */
123051 +const ZSTD_CDict* ZSTD_initStaticCDict(
123052 +                                 void* workspace, size_t workspaceSize,
123053 +                           const void* dict, size_t dictSize,
123054 +                                 ZSTD_dictLoadMethod_e dictLoadMethod,
123055 +                                 ZSTD_dictContentType_e dictContentType,
123056 +                                 ZSTD_compressionParameters cParams)
123058 +    size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
123059 +    size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
123060 +                            + (dictLoadMethod == ZSTD_dlm_byRef ? 0
123061 +                               : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))))
123062 +                            + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
123063 +                            + matchStateSize;
123064 +    ZSTD_CDict* cdict;
123065 +    ZSTD_CCtx_params params;
123067 +    if ((size_t)workspace & 7) return NULL;  /* 8-aligned */
123069 +    {
123070 +        ZSTD_cwksp ws;
123071 +        ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
123072 +        cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
123073 +        if (cdict == NULL) return NULL;
123074 +        ZSTD_cwksp_move(&cdict->workspace, &ws);
123075 +    }
123077 +    DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
123078 +        (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
123079 +    if (workspaceSize < neededSize) return NULL;
123081 +    ZSTD_CCtxParams_init(&params, 0);
123082 +    params.cParams = cParams;
123084 +    if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
123085 +                                              dict, dictSize,
123086 +                                              dictLoadMethod, dictContentType,
123087 +                                              params) ))
123088 +        return NULL;
123090 +    return cdict;
123093 +ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
123095 +    assert(cdict != NULL);
123096 +    return cdict->matchState.cParams;
123099 +/*! ZSTD_getDictID_fromCDict() :
123100 + *  Provides the dictID of the dictionary loaded into `cdict`.
123101 + *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
123102 + *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
123103 +unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict)
123105 +    if (cdict==NULL) return 0;
123106 +    return cdict->dictID;
123110 +/* ZSTD_compressBegin_usingCDict_advanced() :
123111 + * cdict must be != NULL */
123112 +size_t ZSTD_compressBegin_usingCDict_advanced(
123113 +    ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
123114 +    ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
123116 +    ZSTD_CCtx_params cctxParams;
123117 +    DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced");
123118 +    RETURN_ERROR_IF(cdict==NULL, dictionary_wrong, "NULL pointer!");
123119 +    /* Initialize the cctxParams from the cdict */
123120 +    {
123121 +        ZSTD_parameters params;
123122 +        params.fParams = fParams;
123123 +        params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
123124 +                        || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
123125 +                        || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
123126 +                        || cdict->compressionLevel == 0 ) ?
123127 +                ZSTD_getCParamsFromCDict(cdict)
123128 +              : ZSTD_getCParams(cdict->compressionLevel,
123129 +                                pledgedSrcSize,
123130 +                                cdict->dictContentSize);
123131 +        ZSTD_CCtxParams_init_internal(&cctxParams, &params, cdict->compressionLevel);
123132 +    }
123133 +    /* Increase window log to fit the entire dictionary and source if the
123134 +     * source size is known. Limit the increase to 19, which is the
123135 +     * window log for compression level 1 with the largest source size.
123136 +     */
123137 +    if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
123138 +        U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);
123139 +        U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;
123140 +        cctxParams.cParams.windowLog = MAX(cctxParams.cParams.windowLog, limitedSrcLog);
123141 +    }
123142 +    return ZSTD_compressBegin_internal(cctx,
123143 +                                        NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
123144 +                                        cdict,
123145 +                                        &cctxParams, pledgedSrcSize,
123146 +                                        ZSTDb_not_buffered);
123149 +/* ZSTD_compressBegin_usingCDict() :
123150 + * pledgedSrcSize=0 means "unknown"
123151 + * if pledgedSrcSize>0, it will enable contentSizeFlag */
123152 +size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
123154 +    ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
123155 +    DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag);
123156 +    return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
123159 +size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
123160 +                                void* dst, size_t dstCapacity,
123161 +                                const void* src, size_t srcSize,
123162 +                                const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
123164 +    FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize), "");   /* will check if cdict != NULL */
123165 +    return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
123168 +/*! ZSTD_compress_usingCDict() :
123169 + *  Compression using a digested Dictionary.
123170 + *  Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
123171 + *  Note that compression parameters are decided at CDict creation time
123172 + *  while frame parameters are hardcoded */
123173 +size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
123174 +                                void* dst, size_t dstCapacity,
123175 +                                const void* src, size_t srcSize,
123176 +                                const ZSTD_CDict* cdict)
123178 +    ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
123179 +    return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
123184 +/* ******************************************************************
123185 +*  Streaming
123186 +********************************************************************/
123188 +ZSTD_CStream* ZSTD_createCStream(void)
123190 +    DEBUGLOG(3, "ZSTD_createCStream");
123191 +    return ZSTD_createCStream_advanced(ZSTD_defaultCMem);
123194 +ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)
123196 +    return ZSTD_initStaticCCtx(workspace, workspaceSize);
123199 +ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
123200 +{   /* CStream and CCtx are now same object */
123201 +    return ZSTD_createCCtx_advanced(customMem);
123204 +size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
123206 +    return ZSTD_freeCCtx(zcs);   /* same object */
123211 +/*======   Initialization   ======*/
123213 +size_t ZSTD_CStreamInSize(void)  { return ZSTD_BLOCKSIZE_MAX; }
123215 +size_t ZSTD_CStreamOutSize(void)
123217 +    return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
123220 +static ZSTD_cParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize)
123222 +    if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize))
123223 +        return ZSTD_cpm_attachDict;
123224 +    else
123225 +        return ZSTD_cpm_noAttachDict;
123228 +/* ZSTD_resetCStream():
123229 + * pledgedSrcSize == 0 means "unknown" */
123230 +size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss)
123232 +    /* temporary : 0 interpreted as "unknown" during transition period.
123233 +     * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
123234 +     * 0 will be interpreted as "empty" in the future.
123235 +     */
123236 +    U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
123237 +    DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize);
123238 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
123239 +    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
123240 +    return 0;
123243 +/*! ZSTD_initCStream_internal() :
123244 + *  Note : for lib/compress only. Used by zstdmt_compress.c.
123245 + *  Assumption 1 : params are valid
123246 + *  Assumption 2 : either dict, or cdict, is defined, not both */
123247 +size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
123248 +                    const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
123249 +                    const ZSTD_CCtx_params* params,
123250 +                    unsigned long long pledgedSrcSize)
123252 +    DEBUGLOG(4, "ZSTD_initCStream_internal");
123253 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
123254 +    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
123255 +    assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
123256 +    zcs->requestedParams = *params;
123257 +    assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
123258 +    if (dict) {
123259 +        FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
123260 +    } else {
123261 +        /* Dictionary is cleared if !cdict */
123262 +        FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
123263 +    }
123264 +    return 0;
123267 +/* ZSTD_initCStream_usingCDict_advanced() :
123268 + * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
123269 +size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
123270 +                                            const ZSTD_CDict* cdict,
123271 +                                            ZSTD_frameParameters fParams,
123272 +                                            unsigned long long pledgedSrcSize)
123274 +    DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
123275 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
123276 +    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
123277 +    zcs->requestedParams.fParams = fParams;
123278 +    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
123279 +    return 0;
123282 +/* note : cdict must outlive compression session */
123283 +size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
123285 +    DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
123286 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
123287 +    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
123288 +    return 0;
123292 +/* ZSTD_initCStream_advanced() :
123293 + * pledgedSrcSize must be exact.
123294 + * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
123295 + * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */
123296 +size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
123297 +                                 const void* dict, size_t dictSize,
123298 +                                 ZSTD_parameters params, unsigned long long pss)
123300 +    /* for compatibility with older programs relying on this behavior.
123301 +     * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN.
123302 +     * This line will be removed in the future.
123303 +     */
123304 +    U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
123305 +    DEBUGLOG(4, "ZSTD_initCStream_advanced");
123306 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
123307 +    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
123308 +    FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
123309 +    ZSTD_CCtxParams_setZstdParams(&zcs->requestedParams, &params);
123310 +    FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
123311 +    return 0;
123314 +size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
123316 +    DEBUGLOG(4, "ZSTD_initCStream_usingDict");
123317 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
123318 +    FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
123319 +    FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
123320 +    return 0;
123323 +size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
123325 +    /* temporary : 0 interpreted as "unknown" during transition period.
123326 +     * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
123327 +     * 0 will be interpreted as "empty" in the future.
123328 +     */
123329 +    U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
123330 +    DEBUGLOG(4, "ZSTD_initCStream_srcSize");
123331 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
123332 +    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
123333 +    FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
123334 +    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
123335 +    return 0;
123338 +size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
123340 +    DEBUGLOG(4, "ZSTD_initCStream");
123341 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
123342 +    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
123343 +    FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
123344 +    return 0;
123347 +/*======   Compression   ======*/
123349 +static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)
123351 +    size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
123352 +    if (hintInSize==0) hintInSize = cctx->blockSize;
123353 +    return hintInSize;
123356 +/** ZSTD_compressStream_generic():
123357 + *  internal function for all *compressStream*() variants
123358 + *  non-static, because can be called from zstdmt_compress.c
123359 + * @return : hint size for next input */
123360 +static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
123361 +                                          ZSTD_outBuffer* output,
123362 +                                          ZSTD_inBuffer* input,
123363 +                                          ZSTD_EndDirective const flushMode)
123365 +    const char* const istart = (const char*)input->src;
123366 +    const char* const iend = input->size != 0 ? istart + input->size : istart;
123367 +    const char* ip = input->pos != 0 ? istart + input->pos : istart;
123368 +    char* const ostart = (char*)output->dst;
123369 +    char* const oend = output->size != 0 ? ostart + output->size : ostart;
123370 +    char* op = output->pos != 0 ? ostart + output->pos : ostart;
123371 +    U32 someMoreWork = 1;
123373 +    /* check expectations */
123374 +    DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
123375 +    if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
123376 +        assert(zcs->inBuff != NULL);
123377 +        assert(zcs->inBuffSize > 0);
123378 +    }
123379 +    if (zcs->appliedParams.outBufferMode == ZSTD_bm_buffered) {
123380 +        assert(zcs->outBuff !=  NULL);
123381 +        assert(zcs->outBuffSize > 0);
123382 +    }
123383 +    assert(output->pos <= output->size);
123384 +    assert(input->pos <= input->size);
123385 +    assert((U32)flushMode <= (U32)ZSTD_e_end);
123387 +    while (someMoreWork) {
123388 +        switch(zcs->streamStage)
123389 +        {
123390 +        case zcss_init:
123391 +            RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!");
123393 +        case zcss_load:
123394 +            if ( (flushMode == ZSTD_e_end)
123395 +              && ( (size_t)(oend-op) >= ZSTD_compressBound(iend-ip)     /* Enough output space */
123396 +                || zcs->appliedParams.outBufferMode == ZSTD_bm_stable)  /* OR we are allowed to return dstSizeTooSmall */
123397 +              && (zcs->inBuffPos == 0) ) {
123398 +                /* shortcut to compression pass directly into output buffer */
123399 +                size_t const cSize = ZSTD_compressEnd(zcs,
123400 +                                                op, oend-op, ip, iend-ip);
123401 +                DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
123402 +                FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed");
123403 +                ip = iend;
123404 +                op += cSize;
123405 +                zcs->frameEnded = 1;
123406 +                ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
123407 +                someMoreWork = 0; break;
123408 +            }
123409 +            /* complete loading into inBuffer in buffered mode */
123410 +            if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
123411 +                size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
123412 +                size_t const loaded = ZSTD_limitCopy(
123413 +                                        zcs->inBuff + zcs->inBuffPos, toLoad,
123414 +                                        ip, iend-ip);
123415 +                zcs->inBuffPos += loaded;
123416 +                if (loaded != 0)
123417 +                    ip += loaded;
123418 +                if ( (flushMode == ZSTD_e_continue)
123419 +                  && (zcs->inBuffPos < zcs->inBuffTarget) ) {
123420 +                    /* not enough input to fill full block : stop here */
123421 +                    someMoreWork = 0; break;
123422 +                }
123423 +                if ( (flushMode == ZSTD_e_flush)
123424 +                  && (zcs->inBuffPos == zcs->inToCompress) ) {
123425 +                    /* empty */
123426 +                    someMoreWork = 0; break;
123427 +                }
123428 +            }
123429 +            /* compress current block (note : this stage cannot be stopped in the middle) */
123430 +            DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
123431 +            {   int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered);
123432 +                void* cDst;
123433 +                size_t cSize;
123434 +                size_t oSize = oend-op;
123435 +                size_t const iSize = inputBuffered
123436 +                    ? zcs->inBuffPos - zcs->inToCompress
123437 +                    : MIN((size_t)(iend - ip), zcs->blockSize);
123438 +                if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable)
123439 +                    cDst = op;   /* compress into output buffer, to skip flush stage */
123440 +                else
123441 +                    cDst = zcs->outBuff, oSize = zcs->outBuffSize;
123442 +                if (inputBuffered) {
123443 +                    unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
123444 +                    cSize = lastBlock ?
123445 +                            ZSTD_compressEnd(zcs, cDst, oSize,
123446 +                                        zcs->inBuff + zcs->inToCompress, iSize) :
123447 +                            ZSTD_compressContinue(zcs, cDst, oSize,
123448 +                                        zcs->inBuff + zcs->inToCompress, iSize);
123449 +                    FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
123450 +                    zcs->frameEnded = lastBlock;
123451 +                    /* prepare next block */
123452 +                    zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
123453 +                    if (zcs->inBuffTarget > zcs->inBuffSize)
123454 +                        zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
123455 +                    DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
123456 +                            (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
123457 +                    if (!lastBlock)
123458 +                        assert(zcs->inBuffTarget <= zcs->inBuffSize);
123459 +                    zcs->inToCompress = zcs->inBuffPos;
123460 +                } else {
123461 +                    unsigned const lastBlock = (ip + iSize == iend);
123462 +                    assert(flushMode == ZSTD_e_end /* Already validated */);
123463 +                    cSize = lastBlock ?
123464 +                            ZSTD_compressEnd(zcs, cDst, oSize, ip, iSize) :
123465 +                            ZSTD_compressContinue(zcs, cDst, oSize, ip, iSize);
123466 +                    /* Consume the input prior to error checking to mirror buffered mode. */
123467 +                    if (iSize > 0)
123468 +                        ip += iSize;
123469 +                    FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
123470 +                    zcs->frameEnded = lastBlock;
123471 +                    if (lastBlock)
123472 +                        assert(ip == iend);
123473 +                }
123474 +                if (cDst == op) {  /* no need to flush */
123475 +                    op += cSize;
123476 +                    if (zcs->frameEnded) {
123477 +                        DEBUGLOG(5, "Frame completed directly in outBuffer");
123478 +                        someMoreWork = 0;
123479 +                        ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
123480 +                    }
123481 +                    break;
123482 +                }
123483 +                zcs->outBuffContentSize = cSize;
123484 +                zcs->outBuffFlushedSize = 0;
123485 +                zcs->streamStage = zcss_flush; /* pass-through to flush stage */
123486 +            }
123487 +           /* fall-through */
123488 +        case zcss_flush:
123489 +            DEBUGLOG(5, "flush stage");
123490 +            assert(zcs->appliedParams.outBufferMode == ZSTD_bm_buffered);
123491 +            {   size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
123492 +                size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op),
123493 +                            zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
123494 +                DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
123495 +                            (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed);
123496 +                if (flushed)
123497 +                    op += flushed;
123498 +                zcs->outBuffFlushedSize += flushed;
123499 +                if (toFlush!=flushed) {
123500 +                    /* flush not fully completed, presumably because dst is too small */
123501 +                    assert(op==oend);
123502 +                    someMoreWork = 0;
123503 +                    break;
123504 +                }
123505 +                zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
123506 +                if (zcs->frameEnded) {
123507 +                    DEBUGLOG(5, "Frame completed on flush");
123508 +                    someMoreWork = 0;
123509 +                    ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
123510 +                    break;
123511 +                }
123512 +                zcs->streamStage = zcss_load;
123513 +                break;
123514 +            }
123516 +        default: /* impossible */
123517 +            assert(0);
123518 +        }
123519 +    }
123521 +    input->pos = ip - istart;
123522 +    output->pos = op - ostart;
123523 +    if (zcs->frameEnded) return 0;
123524 +    return ZSTD_nextInputSizeHint(zcs);
123527 +static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx)
123529 +    return ZSTD_nextInputSizeHint(cctx);
123533 +size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
123535 +    FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) , "");
123536 +    return ZSTD_nextInputSizeHint_MTorST(zcs);
123539 +/* After a compression call set the expected input/output buffer.
123540 + * This is validated at the start of the next compression call.
123541 + */
123542 +static void ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, ZSTD_outBuffer const* output, ZSTD_inBuffer const* input)
123544 +    if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
123545 +        cctx->expectedInBuffer = *input;
123546 +    }
123547 +    if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
123548 +        cctx->expectedOutBufferSize = output->size - output->pos;
123549 +    }
123552 +/* Validate that the input/output buffers match the expectations set by
123553 + * ZSTD_setBufferExpectations.
123554 + */
123555 +static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx,
123556 +                                        ZSTD_outBuffer const* output,
123557 +                                        ZSTD_inBuffer const* input,
123558 +                                        ZSTD_EndDirective endOp)
123560 +    if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
123561 +        ZSTD_inBuffer const expect = cctx->expectedInBuffer;
123562 +        if (expect.src != input->src || expect.pos != input->pos || expect.size != input->size)
123563 +            RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer enabled but input differs!");
123564 +        if (endOp != ZSTD_e_end)
123565 +            RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer can only be used with ZSTD_e_end!");
123566 +    }
123567 +    if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
123568 +        size_t const outBufferSize = output->size - output->pos;
123569 +        if (cctx->expectedOutBufferSize != outBufferSize)
123570 +            RETURN_ERROR(dstBuffer_wrong, "ZSTD_c_stableOutBuffer enabled but output size differs!");
123571 +    }
123572 +    return 0;
123575 +static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
123576 +                                             ZSTD_EndDirective endOp,
123577 +                                             size_t inSize) {
123578 +    ZSTD_CCtx_params params = cctx->requestedParams;
123579 +    ZSTD_prefixDict const prefixDict = cctx->prefixDict;
123580 +    FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */
123581 +    ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));   /* single usage */
123582 +    assert(prefixDict.dict==NULL || cctx->cdict==NULL);    /* only one can be set */
123583 +    if (cctx->cdict)
123584 +        params.compressionLevel = cctx->cdict->compressionLevel; /* let cdict take priority in terms of compression level */
123585 +    DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
123586 +    if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1;  /* auto-fix pledgedSrcSize */
123587 +    {
123588 +        size_t const dictSize = prefixDict.dict
123589 +                ? prefixDict.dictSize
123590 +                : (cctx->cdict ? cctx->cdict->dictContentSize : 0);
123591 +        ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, &params, cctx->pledgedSrcSizePlusOne - 1);
123592 +        params.cParams = ZSTD_getCParamsFromCCtxParams(
123593 +                &params, cctx->pledgedSrcSizePlusOne-1,
123594 +                dictSize, mode);
123595 +    }
123597 +    if (ZSTD_CParams_shouldEnableLdm(&params.cParams)) {
123598 +        /* Enable LDM by default for optimal parser and window size >= 128MB */
123599 +        DEBUGLOG(4, "LDM enabled by default (window size >= 128MB, strategy >= btopt)");
123600 +        params.ldmParams.enableLdm = 1;
123601 +    }
123603 +    {   U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1;
123604 +        assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
123605 +        FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
123606 +                prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, ZSTD_dtlm_fast,
123607 +                cctx->cdict,
123608 +                &params, pledgedSrcSize,
123609 +                ZSTDb_buffered) , "");
123610 +        assert(cctx->appliedParams.nbWorkers == 0);
123611 +        cctx->inToCompress = 0;
123612 +        cctx->inBuffPos = 0;
123613 +        if (cctx->appliedParams.inBufferMode == ZSTD_bm_buffered) {
123614 +            /* for small input: avoid automatic flush on reaching end of block, since
123615 +            * it would require to add a 3-bytes null block to end frame
123616 +            */
123617 +            cctx->inBuffTarget = cctx->blockSize + (cctx->blockSize == pledgedSrcSize);
123618 +        } else {
123619 +            cctx->inBuffTarget = 0;
123620 +        }
123621 +        cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0;
123622 +        cctx->streamStage = zcss_load;
123623 +        cctx->frameEnded = 0;
123624 +    }
123625 +    return 0;
123628 +size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
123629 +                             ZSTD_outBuffer* output,
123630 +                             ZSTD_inBuffer* input,
123631 +                             ZSTD_EndDirective endOp)
123633 +    DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
123634 +    /* check conditions */
123635 +    RETURN_ERROR_IF(output->pos > output->size, dstSize_tooSmall, "invalid output buffer");
123636 +    RETURN_ERROR_IF(input->pos  > input->size, srcSize_wrong, "invalid input buffer");
123637 +    RETURN_ERROR_IF((U32)endOp > (U32)ZSTD_e_end, parameter_outOfBound, "invalid endDirective");
123638 +    assert(cctx != NULL);
123640 +    /* transparent initialization stage */
123641 +    if (cctx->streamStage == zcss_init) {
123642 +        FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, input->size), "CompressStream2 initialization failed");
123643 +        ZSTD_setBufferExpectations(cctx, output, input);    /* Set initial buffer expectations now that we've initialized */
123644 +    }
123645 +    /* end of transparent initialization stage */
123647 +    FORWARD_IF_ERROR(ZSTD_checkBufferStability(cctx, output, input, endOp), "invalid buffers");
123648 +    /* compression stage */
123649 +    FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) , "");
123650 +    DEBUGLOG(5, "completed ZSTD_compressStream2");
123651 +    ZSTD_setBufferExpectations(cctx, output, input);
123652 +    return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
123655 +size_t ZSTD_compressStream2_simpleArgs (
123656 +                            ZSTD_CCtx* cctx,
123657 +                            void* dst, size_t dstCapacity, size_t* dstPos,
123658 +                      const void* src, size_t srcSize, size_t* srcPos,
123659 +                            ZSTD_EndDirective endOp)
123661 +    ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
123662 +    ZSTD_inBuffer  input  = { src, srcSize, *srcPos };
123663 +    /* ZSTD_compressStream2() will check validity of dstPos and srcPos */
123664 +    size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
123665 +    *dstPos = output.pos;
123666 +    *srcPos = input.pos;
123667 +    return cErr;
123670 +size_t ZSTD_compress2(ZSTD_CCtx* cctx,
123671 +                      void* dst, size_t dstCapacity,
123672 +                      const void* src, size_t srcSize)
123674 +    ZSTD_bufferMode_e const originalInBufferMode = cctx->requestedParams.inBufferMode;
123675 +    ZSTD_bufferMode_e const originalOutBufferMode = cctx->requestedParams.outBufferMode;
123676 +    DEBUGLOG(4, "ZSTD_compress2 (srcSize=%u)", (unsigned)srcSize);
123677 +    ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
123678 +    /* Enable stable input/output buffers. */
123679 +    cctx->requestedParams.inBufferMode = ZSTD_bm_stable;
123680 +    cctx->requestedParams.outBufferMode = ZSTD_bm_stable;
123681 +    {   size_t oPos = 0;
123682 +        size_t iPos = 0;
123683 +        size_t const result = ZSTD_compressStream2_simpleArgs(cctx,
123684 +                                        dst, dstCapacity, &oPos,
123685 +                                        src, srcSize, &iPos,
123686 +                                        ZSTD_e_end);
123687 +        /* Reset to the original values. */
123688 +        cctx->requestedParams.inBufferMode = originalInBufferMode;
123689 +        cctx->requestedParams.outBufferMode = originalOutBufferMode;
123690 +        FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed");
123691 +        if (result != 0) {  /* compression not completed, due to lack of output space */
123692 +            assert(oPos == dstCapacity);
123693 +            RETURN_ERROR(dstSize_tooSmall, "");
123694 +        }
123695 +        assert(iPos == srcSize);   /* all input is expected consumed */
123696 +        return oPos;
123697 +    }
123700 +typedef struct {
123701 +    U32 idx;             /* Index in array of ZSTD_Sequence */
123702 +    U32 posInSequence;   /* Position within sequence at idx */
123703 +    size_t posInSrc;        /* Number of bytes given by sequences provided so far */
123704 +} ZSTD_sequencePosition;
123706 +/* Returns a ZSTD error code if sequence is not valid */
123707 +static size_t ZSTD_validateSequence(U32 offCode, U32 matchLength,
123708 +                                    size_t posInSrc, U32 windowLog, size_t dictSize, U32 minMatch) {
123709 +    size_t offsetBound;
123710 +    U32 windowSize = 1 << windowLog;
123711 +    /* posInSrc represents the amount of data the the decoder would decode up to this point.
123712 +     * As long as the amount of data decoded is less than or equal to window size, offsets may be
123713 +     * larger than the total length of output decoded in order to reference the dict, even larger than
123714 +     * window size. After output surpasses windowSize, we're limited to windowSize offsets again.
123715 +     */
123716 +    offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize;
123717 +    RETURN_ERROR_IF(offCode > offsetBound + ZSTD_REP_MOVE, corruption_detected, "Offset too large!");
123718 +    RETURN_ERROR_IF(matchLength < minMatch, corruption_detected, "Matchlength too small");
123719 +    return 0;
123722 +/* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */
123723 +static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0) {
123724 +    U32 offCode = rawOffset + ZSTD_REP_MOVE;
123725 +    U32 repCode = 0;
123727 +    if (!ll0 && rawOffset == rep[0]) {
123728 +        repCode = 1;
123729 +    } else if (rawOffset == rep[1]) {
123730 +        repCode = 2 - ll0;
123731 +    } else if (rawOffset == rep[2]) {
123732 +        repCode = 3 - ll0;
123733 +    } else if (ll0 && rawOffset == rep[0] - 1) {
123734 +        repCode = 3;
123735 +    }
123736 +    if (repCode) {
123737 +        /* ZSTD_storeSeq expects a number in the range [0, 2] to represent a repcode */
123738 +        offCode = repCode - 1;
123739 +    }
123740 +    return offCode;
123743 +/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of
123744 + * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter.
123745 + */
123746 +static size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
123747 +                                                             const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
123748 +                                                             const void* src, size_t blockSize) {
123749 +    U32 idx = seqPos->idx;
123750 +    BYTE const* ip = (BYTE const*)(src);
123751 +    const BYTE* const iend = ip + blockSize;
123752 +    repcodes_t updatedRepcodes;
123753 +    U32 dictSize;
123754 +    U32 litLength;
123755 +    U32 matchLength;
123756 +    U32 ll0;
123757 +    U32 offCode;
123759 +    if (cctx->cdict) {
123760 +        dictSize = (U32)cctx->cdict->dictContentSize;
123761 +    } else if (cctx->prefixDict.dict) {
123762 +        dictSize = (U32)cctx->prefixDict.dictSize;
123763 +    } else {
123764 +        dictSize = 0;
123765 +    }
123766 +    ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
123767 +    for (; (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0) && idx < inSeqsSize; ++idx) {
123768 +        litLength = inSeqs[idx].litLength;
123769 +        matchLength = inSeqs[idx].matchLength;
123770 +        ll0 = litLength == 0;
123771 +        offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
123772 +        updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
123774 +        DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
123775 +        if (cctx->appliedParams.validateSequences) {
123776 +            seqPos->posInSrc += litLength + matchLength;
123777 +            FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
123778 +                                                cctx->appliedParams.cParams.windowLog, dictSize,
123779 +                                                cctx->appliedParams.cParams.minMatch),
123780 +                                                "Sequence validation failed");
123781 +        }
123782 +        RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
123783 +                        "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
123784 +        ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength - MINMATCH);
123785 +        ip += matchLength + litLength;
123786 +    }
123787 +    ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
123789 +    if (inSeqs[idx].litLength) {
123790 +        DEBUGLOG(6, "Storing last literals of size: %u", inSeqs[idx].litLength);
123791 +        ZSTD_storeLastLiterals(&cctx->seqStore, ip, inSeqs[idx].litLength);
123792 +        ip += inSeqs[idx].litLength;
123793 +        seqPos->posInSrc += inSeqs[idx].litLength;
123794 +    }
123795 +    RETURN_ERROR_IF(ip != iend, corruption_detected, "Blocksize doesn't agree with block delimiter!");
123796 +    seqPos->idx = idx+1;
123797 +    return 0;
123800 +/* Returns the number of bytes to move the current read position back by. Only non-zero
123801 + * if we ended up splitting a sequence. Otherwise, it may return a ZSTD error if something
123802 + * went wrong.
123804 + * This function will attempt to scan through blockSize bytes represented by the sequences
123805 + * in inSeqs, storing any (partial) sequences.
123807 + * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to
123808 + * avoid splitting a match, or to avoid splitting a match such that it would produce a match
123809 + * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block.
123810 + */
123811 +static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
123812 +                                                       const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
123813 +                                                       const void* src, size_t blockSize) {
123814 +    U32 idx = seqPos->idx;
123815 +    U32 startPosInSequence = seqPos->posInSequence;
123816 +    U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize;
123817 +    size_t dictSize;
123818 +    BYTE const* ip = (BYTE const*)(src);
123819 +    BYTE const* iend = ip + blockSize;  /* May be adjusted if we decide to process fewer than blockSize bytes */
123820 +    repcodes_t updatedRepcodes;
123821 +    U32 bytesAdjustment = 0;
123822 +    U32 finalMatchSplit = 0;
123823 +    U32 litLength;
123824 +    U32 matchLength;
123825 +    U32 rawOffset;
123826 +    U32 offCode;
123828 +    if (cctx->cdict) {
123829 +        dictSize = cctx->cdict->dictContentSize;
123830 +    } else if (cctx->prefixDict.dict) {
123831 +        dictSize = cctx->prefixDict.dictSize;
123832 +    } else {
123833 +        dictSize = 0;
123834 +    }
123835 +    DEBUGLOG(5, "ZSTD_copySequencesToSeqStore: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize);
123836 +    DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
123837 +    ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
123838 +    while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) {
123839 +        const ZSTD_Sequence currSeq = inSeqs[idx];
123840 +        litLength = currSeq.litLength;
123841 +        matchLength = currSeq.matchLength;
123842 +        rawOffset = currSeq.offset;
123844 +        /* Modify the sequence depending on where endPosInSequence lies */
123845 +        if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) {
123846 +            if (startPosInSequence >= litLength) {
123847 +                startPosInSequence -= litLength;
123848 +                litLength = 0;
123849 +                matchLength -= startPosInSequence;
123850 +            } else {
123851 +                litLength -= startPosInSequence;
123852 +            }
123853 +            /* Move to the next sequence */
123854 +            endPosInSequence -= currSeq.litLength + currSeq.matchLength;
123855 +            startPosInSequence = 0;
123856 +            idx++;
123857 +        } else {
123858 +            /* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence
123859 +               does not reach the end of the match. So, we have to split the sequence */
123860 +            DEBUGLOG(6, "Require a split: diff: %u, idx: %u PIS: %u",
123861 +                     currSeq.litLength + currSeq.matchLength - endPosInSequence, idx, endPosInSequence);
123862 +            if (endPosInSequence > litLength) {
123863 +                U32 firstHalfMatchLength;
123864 +                litLength = startPosInSequence >= litLength ? 0 : litLength - startPosInSequence;
123865 +                firstHalfMatchLength = endPosInSequence - startPosInSequence - litLength;
123866 +                if (matchLength > blockSize && firstHalfMatchLength >= cctx->appliedParams.cParams.minMatch) {
123867 +                    /* Only ever split the match if it is larger than the block size */
123868 +                    U32 secondHalfMatchLength = currSeq.matchLength + currSeq.litLength - endPosInSequence;
123869 +                    if (secondHalfMatchLength < cctx->appliedParams.cParams.minMatch) {
123870 +                        /* Move the endPosInSequence backward so that it creates match of minMatch length */
123871 +                        endPosInSequence -= cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
123872 +                        bytesAdjustment = cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
123873 +                        firstHalfMatchLength -= bytesAdjustment;
123874 +                    }
123875 +                    matchLength = firstHalfMatchLength;
123876 +                    /* Flag that we split the last match - after storing the sequence, exit the loop,
123877 +                       but keep the value of endPosInSequence */
123878 +                    finalMatchSplit = 1;
123879 +                } else {
123880 +                    /* Move the position in sequence backwards so that we don't split match, and break to store
123881 +                     * the last literals. We use the original currSeq.litLength as a marker for where endPosInSequence
123882 +                     * should go. We prefer to do this whenever it is not necessary to split the match, or if doing so
123883 +                     * would cause the first half of the match to be too small
123884 +                     */
123885 +                    bytesAdjustment = endPosInSequence - currSeq.litLength;
123886 +                    endPosInSequence = currSeq.litLength;
123887 +                    break;
123888 +                }
123889 +            } else {
123890 +                /* This sequence ends inside the literals, break to store the last literals */
123891 +                break;
123892 +            }
123893 +        }
123894 +        /* Check if this offset can be represented with a repcode */
123895 +        {   U32 ll0 = (litLength == 0);
123896 +            offCode = ZSTD_finalizeOffCode(rawOffset, updatedRepcodes.rep, ll0);
123897 +            updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
123898 +        }
123900 +        if (cctx->appliedParams.validateSequences) {
123901 +            seqPos->posInSrc += litLength + matchLength;
123902 +            FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
123903 +                                                   cctx->appliedParams.cParams.windowLog, dictSize,
123904 +                                                   cctx->appliedParams.cParams.minMatch),
123905 +                                                   "Sequence validation failed");
123906 +        }
123907 +        DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
123908 +        RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
123909 +                        "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
123910 +        ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength - MINMATCH);
123911 +        ip += matchLength + litLength;
123912 +    }
123913 +    DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
123914 +    assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength);
123915 +    seqPos->idx = idx;
123916 +    seqPos->posInSequence = endPosInSequence;
123917 +    ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
123919 +    iend -= bytesAdjustment;
123920 +    if (ip != iend) {
123921 +        /* Store any last literals */
123922 +        U32 lastLLSize = (U32)(iend - ip);
123923 +        assert(ip <= iend);
123924 +        DEBUGLOG(6, "Storing last literals of size: %u", lastLLSize);
123925 +        ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize);
123926 +        seqPos->posInSrc += lastLLSize;
123927 +    }
123929 +    return bytesAdjustment;
123932 +typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
123933 +                                       const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
123934 +                                       const void* src, size_t blockSize);
123935 +static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) {
123936 +    ZSTD_sequenceCopier sequenceCopier = NULL;
123937 +    assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode));
123938 +    if (mode == ZSTD_sf_explicitBlockDelimiters) {
123939 +        return ZSTD_copySequencesToSeqStoreExplicitBlockDelim;
123940 +    } else if (mode == ZSTD_sf_noBlockDelimiters) {
123941 +        return ZSTD_copySequencesToSeqStoreNoBlockDelim;
123942 +    }
123943 +    assert(sequenceCopier != NULL);
123944 +    return sequenceCopier;
123947 +/* Compress, block-by-block, all of the sequences given.
123949 + * Returns the cumulative size of all compressed blocks (including their headers), otherwise a ZSTD error.
123950 + */
123951 +static size_t ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
123952 +                                              void* dst, size_t dstCapacity,
123953 +                                              const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
123954 +                                              const void* src, size_t srcSize) {
123955 +    size_t cSize = 0;
123956 +    U32 lastBlock;
123957 +    size_t blockSize;
123958 +    size_t compressedSeqsSize;
123959 +    size_t remaining = srcSize;
123960 +    ZSTD_sequencePosition seqPos = {0, 0, 0};
123962 +    BYTE const* ip = (BYTE const*)src;
123963 +    BYTE* op = (BYTE*)dst;
123964 +    ZSTD_sequenceCopier sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);
123966 +    DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize);
123967 +    /* Special case: empty frame */
123968 +    if (remaining == 0) {
123969 +        U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1);
123970 +        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "No room for empty frame block header");
123971 +        MEM_writeLE32(op, cBlockHeader24);
123972 +        op += ZSTD_blockHeaderSize;
123973 +        dstCapacity -= ZSTD_blockHeaderSize;
123974 +        cSize += ZSTD_blockHeaderSize;
123975 +    }
123977 +    while (remaining) {
123978 +        size_t cBlockSize;
123979 +        size_t additionalByteAdjustment;
123980 +        lastBlock = remaining <= cctx->blockSize;
123981 +        blockSize = lastBlock ? (U32)remaining : (U32)cctx->blockSize;
123982 +        ZSTD_resetSeqStore(&cctx->seqStore);
123983 +        DEBUGLOG(4, "Working on new block. Blocksize: %zu", blockSize);
123985 +        additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize);
123986 +        FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy");
123987 +        blockSize -= additionalByteAdjustment;
123989 +        /* If blocks are too small, emit as a nocompress block */
123990 +        if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
123991 +            cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
123992 +            FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
123993 +            DEBUGLOG(4, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize);
123994 +            cSize += cBlockSize;
123995 +            ip += blockSize;
123996 +            op += cBlockSize;
123997 +            remaining -= blockSize;
123998 +            dstCapacity -= cBlockSize;
123999 +            continue;
124000 +        }
124002 +        compressedSeqsSize = ZSTD_entropyCompressSequences(&cctx->seqStore,
124003 +                                &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy,
124004 +                                &cctx->appliedParams,
124005 +                                op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize,
124006 +                                blockSize,
124007 +                                cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
124008 +                                cctx->bmi2);
124009 +        FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed");
124010 +        DEBUGLOG(4, "Compressed sequences size: %zu", compressedSeqsSize);
124012 +        if (!cctx->isFirstBlock &&
124013 +            ZSTD_maybeRLE(&cctx->seqStore) &&
124014 +            ZSTD_isRLE((BYTE const*)src, srcSize)) {
124015 +            /* We don't want to emit our first block as a RLE even if it qualifies because
124016 +            * doing so will cause the decoder (cli only) to throw a "should consume all input error."
124017 +            * This is only an issue for zstd <= v1.4.3
124018 +            */
124019 +            compressedSeqsSize = 1;
124020 +        }
124022 +        if (compressedSeqsSize == 0) {
124023 +            /* ZSTD_noCompressBlock writes the block header as well */
124024 +            cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
124025 +            FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
124026 +            DEBUGLOG(4, "Writing out nocompress block, size: %zu", cBlockSize);
124027 +        } else if (compressedSeqsSize == 1) {
124028 +            cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock);
124029 +            FORWARD_IF_ERROR(cBlockSize, "RLE compress block failed");
124030 +            DEBUGLOG(4, "Writing out RLE block, size: %zu", cBlockSize);
124031 +        } else {
124032 +            U32 cBlockHeader;
124033 +            /* Error checking and repcodes update */
124034 +            ZSTD_confirmRepcodesAndEntropyTables(cctx);
124035 +            if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
124036 +                cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
124038 +            /* Write block header into beginning of block*/
124039 +            cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3);
124040 +            MEM_writeLE24(op, cBlockHeader);
124041 +            cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize;
124042 +            DEBUGLOG(4, "Writing out compressed block, size: %zu", cBlockSize);
124043 +        }
124045 +        cSize += cBlockSize;
124046 +        DEBUGLOG(4, "cSize running total: %zu", cSize);
124048 +        if (lastBlock) {
124049 +            break;
124050 +        } else {
124051 +            ip += blockSize;
124052 +            op += cBlockSize;
124053 +            remaining -= blockSize;
124054 +            dstCapacity -= cBlockSize;
124055 +            cctx->isFirstBlock = 0;
124056 +        }
124057 +    }
124059 +    return cSize;
124062 +size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapacity,
124063 +                              const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
124064 +                              const void* src, size_t srcSize) {
124065 +    BYTE* op = (BYTE*)dst;
124066 +    size_t cSize = 0;
124067 +    size_t compressedBlocksSize = 0;
124068 +    size_t frameHeaderSize = 0;
124070 +    /* Transparent initialization stage, same as compressStream2() */
124071 +    DEBUGLOG(3, "ZSTD_compressSequences()");
124072 +    assert(cctx != NULL);
124073 +    FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed");
124074 +    /* Begin writing output, starting with frame header */
124075 +    frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, srcSize, cctx->dictID);
124076 +    op += frameHeaderSize;
124077 +    dstCapacity -= frameHeaderSize;
124078 +    cSize += frameHeaderSize;
124079 +    if (cctx->appliedParams.fParams.checksumFlag && srcSize) {
124080 +        xxh64_update(&cctx->xxhState, src, srcSize);
124081 +    }
124082 +    /* cSize includes block header size and compressed sequences size */
124083 +    compressedBlocksSize = ZSTD_compressSequences_internal(cctx,
124084 +                                                           op, dstCapacity,
124085 +                                                           inSeqs, inSeqsSize,
124086 +                                                           src, srcSize);
124087 +    FORWARD_IF_ERROR(compressedBlocksSize, "Compressing blocks failed!");
124088 +    cSize += compressedBlocksSize;
124089 +    dstCapacity -= compressedBlocksSize;
124091 +    if (cctx->appliedParams.fParams.checksumFlag) {
124092 +        U32 const checksum = (U32) xxh64_digest(&cctx->xxhState);
124093 +        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
124094 +        DEBUGLOG(4, "Write checksum : %08X", (unsigned)checksum);
124095 +        MEM_writeLE32((char*)dst + cSize, checksum);
124096 +        cSize += 4;
124097 +    }
124099 +    DEBUGLOG(3, "Final compressed size: %zu", cSize);
124100 +    return cSize;
124103 +/*======   Finalize   ======*/
124105 +/*! ZSTD_flushStream() :
124106 + * @return : amount of data remaining to flush */
124107 +size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
124109 +    ZSTD_inBuffer input = { NULL, 0, 0 };
124110 +    return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);
124114 +size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
124116 +    ZSTD_inBuffer input = { NULL, 0, 0 };
124117 +    size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
124118 +    FORWARD_IF_ERROR( remainingToFlush , "ZSTD_compressStream2 failed");
124119 +    if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush;   /* minimal estimation */
124120 +    /* single thread mode : attempt to calculate remaining to flush more precisely */
124121 +    {   size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
124122 +        size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4);
124123 +        size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize;
124124 +        DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush);
124125 +        return toFlush;
124126 +    }
124130 +/*-=====  Pre-defined compression levels  =====-*/
124132 +#define ZSTD_MAX_CLEVEL     22
124133 +int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
124134 +int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
124136 +static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
124137 +{   /* "default" - for any srcSize > 256 KB */
124138 +    /* W,  C,  H,  S,  L, TL, strat */
124139 +    { 19, 12, 13,  1,  6,  1, ZSTD_fast    },  /* base for negative levels */
124140 +    { 19, 13, 14,  1,  7,  0, ZSTD_fast    },  /* level  1 */
124141 +    { 20, 15, 16,  1,  6,  0, ZSTD_fast    },  /* level  2 */
124142 +    { 21, 16, 17,  1,  5,  0, ZSTD_dfast   },  /* level  3 */
124143 +    { 21, 18, 18,  1,  5,  0, ZSTD_dfast   },  /* level  4 */
124144 +    { 21, 18, 19,  2,  5,  2, ZSTD_greedy  },  /* level  5 */
124145 +    { 21, 19, 19,  3,  5,  4, ZSTD_greedy  },  /* level  6 */
124146 +    { 21, 19, 19,  3,  5,  8, ZSTD_lazy    },  /* level  7 */
124147 +    { 21, 19, 19,  3,  5, 16, ZSTD_lazy2   },  /* level  8 */
124148 +    { 21, 19, 20,  4,  5, 16, ZSTD_lazy2   },  /* level  9 */
124149 +    { 22, 20, 21,  4,  5, 16, ZSTD_lazy2   },  /* level 10 */
124150 +    { 22, 21, 22,  4,  5, 16, ZSTD_lazy2   },  /* level 11 */
124151 +    { 22, 21, 22,  5,  5, 16, ZSTD_lazy2   },  /* level 12 */
124152 +    { 22, 21, 22,  5,  5, 32, ZSTD_btlazy2 },  /* level 13 */
124153 +    { 22, 22, 23,  5,  5, 32, ZSTD_btlazy2 },  /* level 14 */
124154 +    { 22, 23, 23,  6,  5, 32, ZSTD_btlazy2 },  /* level 15 */
124155 +    { 22, 22, 22,  5,  5, 48, ZSTD_btopt   },  /* level 16 */
124156 +    { 23, 23, 22,  5,  4, 64, ZSTD_btopt   },  /* level 17 */
124157 +    { 23, 23, 22,  6,  3, 64, ZSTD_btultra },  /* level 18 */
124158 +    { 23, 24, 22,  7,  3,256, ZSTD_btultra2},  /* level 19 */
124159 +    { 25, 25, 23,  7,  3,256, ZSTD_btultra2},  /* level 20 */
124160 +    { 26, 26, 24,  7,  3,512, ZSTD_btultra2},  /* level 21 */
124161 +    { 27, 27, 25,  9,  3,999, ZSTD_btultra2},  /* level 22 */
124163 +{   /* for srcSize <= 256 KB */
124164 +    /* W,  C,  H,  S,  L,  T, strat */
124165 +    { 18, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
124166 +    { 18, 13, 14,  1,  6,  0, ZSTD_fast    },  /* level  1 */
124167 +    { 18, 14, 14,  1,  5,  0, ZSTD_dfast   },  /* level  2 */
124168 +    { 18, 16, 16,  1,  4,  0, ZSTD_dfast   },  /* level  3 */
124169 +    { 18, 16, 17,  2,  5,  2, ZSTD_greedy  },  /* level  4.*/
124170 +    { 18, 18, 18,  3,  5,  2, ZSTD_greedy  },  /* level  5.*/
124171 +    { 18, 18, 19,  3,  5,  4, ZSTD_lazy    },  /* level  6.*/
124172 +    { 18, 18, 19,  4,  4,  4, ZSTD_lazy    },  /* level  7 */
124173 +    { 18, 18, 19,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
124174 +    { 18, 18, 19,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
124175 +    { 18, 18, 19,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
124176 +    { 18, 18, 19,  5,  4, 12, ZSTD_btlazy2 },  /* level 11.*/
124177 +    { 18, 19, 19,  7,  4, 12, ZSTD_btlazy2 },  /* level 12.*/
124178 +    { 18, 18, 19,  4,  4, 16, ZSTD_btopt   },  /* level 13 */
124179 +    { 18, 18, 19,  4,  3, 32, ZSTD_btopt   },  /* level 14.*/
124180 +    { 18, 18, 19,  6,  3,128, ZSTD_btopt   },  /* level 15.*/
124181 +    { 18, 19, 19,  6,  3,128, ZSTD_btultra },  /* level 16.*/
124182 +    { 18, 19, 19,  8,  3,256, ZSTD_btultra },  /* level 17.*/
124183 +    { 18, 19, 19,  6,  3,128, ZSTD_btultra2},  /* level 18.*/
124184 +    { 18, 19, 19,  8,  3,256, ZSTD_btultra2},  /* level 19.*/
124185 +    { 18, 19, 19, 10,  3,512, ZSTD_btultra2},  /* level 20.*/
124186 +    { 18, 19, 19, 12,  3,512, ZSTD_btultra2},  /* level 21.*/
124187 +    { 18, 19, 19, 13,  3,999, ZSTD_btultra2},  /* level 22.*/
124189 +{   /* for srcSize <= 128 KB */
124190 +    /* W,  C,  H,  S,  L,  T, strat */
124191 +    { 17, 12, 12,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
124192 +    { 17, 12, 13,  1,  6,  0, ZSTD_fast    },  /* level  1 */
124193 +    { 17, 13, 15,  1,  5,  0, ZSTD_fast    },  /* level  2 */
124194 +    { 17, 15, 16,  2,  5,  0, ZSTD_dfast   },  /* level  3 */
124195 +    { 17, 17, 17,  2,  4,  0, ZSTD_dfast   },  /* level  4 */
124196 +    { 17, 16, 17,  3,  4,  2, ZSTD_greedy  },  /* level  5 */
124197 +    { 17, 17, 17,  3,  4,  4, ZSTD_lazy    },  /* level  6 */
124198 +    { 17, 17, 17,  3,  4,  8, ZSTD_lazy2   },  /* level  7 */
124199 +    { 17, 17, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
124200 +    { 17, 17, 17,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
124201 +    { 17, 17, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
124202 +    { 17, 17, 17,  5,  4,  8, ZSTD_btlazy2 },  /* level 11 */
124203 +    { 17, 18, 17,  7,  4, 12, ZSTD_btlazy2 },  /* level 12 */
124204 +    { 17, 18, 17,  3,  4, 12, ZSTD_btopt   },  /* level 13.*/
124205 +    { 17, 18, 17,  4,  3, 32, ZSTD_btopt   },  /* level 14.*/
124206 +    { 17, 18, 17,  6,  3,256, ZSTD_btopt   },  /* level 15.*/
124207 +    { 17, 18, 17,  6,  3,128, ZSTD_btultra },  /* level 16.*/
124208 +    { 17, 18, 17,  8,  3,256, ZSTD_btultra },  /* level 17.*/
124209 +    { 17, 18, 17, 10,  3,512, ZSTD_btultra },  /* level 18.*/
124210 +    { 17, 18, 17,  5,  3,256, ZSTD_btultra2},  /* level 19.*/
124211 +    { 17, 18, 17,  7,  3,512, ZSTD_btultra2},  /* level 20.*/
124212 +    { 17, 18, 17,  9,  3,512, ZSTD_btultra2},  /* level 21.*/
124213 +    { 17, 18, 17, 11,  3,999, ZSTD_btultra2},  /* level 22.*/
124215 +{   /* for srcSize <= 16 KB */
124216 +    /* W,  C,  H,  S,  L,  T, strat */
124217 +    { 14, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
124218 +    { 14, 14, 15,  1,  5,  0, ZSTD_fast    },  /* level  1 */
124219 +    { 14, 14, 15,  1,  4,  0, ZSTD_fast    },  /* level  2 */
124220 +    { 14, 14, 15,  2,  4,  0, ZSTD_dfast   },  /* level  3 */
124221 +    { 14, 14, 14,  4,  4,  2, ZSTD_greedy  },  /* level  4 */
124222 +    { 14, 14, 14,  3,  4,  4, ZSTD_lazy    },  /* level  5.*/
124223 +    { 14, 14, 14,  4,  4,  8, ZSTD_lazy2   },  /* level  6 */
124224 +    { 14, 14, 14,  6,  4,  8, ZSTD_lazy2   },  /* level  7 */
124225 +    { 14, 14, 14,  8,  4,  8, ZSTD_lazy2   },  /* level  8.*/
124226 +    { 14, 15, 14,  5,  4,  8, ZSTD_btlazy2 },  /* level  9.*/
124227 +    { 14, 15, 14,  9,  4,  8, ZSTD_btlazy2 },  /* level 10.*/
124228 +    { 14, 15, 14,  3,  4, 12, ZSTD_btopt   },  /* level 11.*/
124229 +    { 14, 15, 14,  4,  3, 24, ZSTD_btopt   },  /* level 12.*/
124230 +    { 14, 15, 14,  5,  3, 32, ZSTD_btultra },  /* level 13.*/
124231 +    { 14, 15, 15,  6,  3, 64, ZSTD_btultra },  /* level 14.*/
124232 +    { 14, 15, 15,  7,  3,256, ZSTD_btultra },  /* level 15.*/
124233 +    { 14, 15, 15,  5,  3, 48, ZSTD_btultra2},  /* level 16.*/
124234 +    { 14, 15, 15,  6,  3,128, ZSTD_btultra2},  /* level 17.*/
124235 +    { 14, 15, 15,  7,  3,256, ZSTD_btultra2},  /* level 18.*/
124236 +    { 14, 15, 15,  8,  3,256, ZSTD_btultra2},  /* level 19.*/
124237 +    { 14, 15, 15,  8,  3,512, ZSTD_btultra2},  /* level 20.*/
124238 +    { 14, 15, 15,  9,  3,512, ZSTD_btultra2},  /* level 21.*/
124239 +    { 14, 15, 15, 10,  3,999, ZSTD_btultra2},  /* level 22.*/
124243 +static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int const compressionLevel, size_t const dictSize)
124245 +    ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, 0, dictSize, ZSTD_cpm_createCDict);
124246 +    switch (cParams.strategy) {
124247 +        case ZSTD_fast:
124248 +        case ZSTD_dfast:
124249 +            break;
124250 +        case ZSTD_greedy:
124251 +        case ZSTD_lazy:
124252 +        case ZSTD_lazy2:
124253 +            cParams.hashLog += ZSTD_LAZY_DDSS_BUCKET_LOG;
124254 +            break;
124255 +        case ZSTD_btlazy2:
124256 +        case ZSTD_btopt:
124257 +        case ZSTD_btultra:
124258 +        case ZSTD_btultra2:
124259 +            break;
124260 +    }
124261 +    return cParams;
124264 +static int ZSTD_dedicatedDictSearch_isSupported(
124265 +        ZSTD_compressionParameters const* cParams)
124267 +    return (cParams->strategy >= ZSTD_greedy)
124268 +        && (cParams->strategy <= ZSTD_lazy2)
124269 +        && (cParams->hashLog >= cParams->chainLog)
124270 +        && (cParams->chainLog <= 24);
124274 + * Reverses the adjustment applied to cparams when enabling dedicated dict
124275 + * search. This is used to recover the params set to be used in the working
124276 + * context. (Otherwise, those tables would also grow.)
124277 + */
124278 +static void ZSTD_dedicatedDictSearch_revertCParams(
124279 +        ZSTD_compressionParameters* cParams) {
124280 +    switch (cParams->strategy) {
124281 +        case ZSTD_fast:
124282 +        case ZSTD_dfast:
124283 +            break;
124284 +        case ZSTD_greedy:
124285 +        case ZSTD_lazy:
124286 +        case ZSTD_lazy2:
124287 +            cParams->hashLog -= ZSTD_LAZY_DDSS_BUCKET_LOG;
124288 +            break;
124289 +        case ZSTD_btlazy2:
124290 +        case ZSTD_btopt:
124291 +        case ZSTD_btultra:
124292 +        case ZSTD_btultra2:
124293 +            break;
124294 +    }
124297 +static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
124299 +    switch (mode) {
124300 +    case ZSTD_cpm_unknown:
124301 +    case ZSTD_cpm_noAttachDict:
124302 +    case ZSTD_cpm_createCDict:
124303 +        break;
124304 +    case ZSTD_cpm_attachDict:
124305 +        dictSize = 0;
124306 +        break;
124307 +    default:
124308 +        assert(0);
124309 +        break;
124310 +    }
124311 +    {   int const unknown = srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN;
124312 +        size_t const addedSize = unknown && dictSize > 0 ? 500 : 0;
124313 +        return unknown && dictSize == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : srcSizeHint+dictSize+addedSize;
124314 +    }
124317 +/*! ZSTD_getCParams_internal() :
124318 + * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
124319 + *  Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown.
124320 + *        Use dictSize == 0 for unknown or unused.
124321 + *  Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_cParamMode_e`. */
124322 +static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
124324 +    U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode);
124325 +    U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
124326 +    int row;
124327 +    DEBUGLOG(5, "ZSTD_getCParams_internal (cLevel=%i)", compressionLevel);
124329 +    /* row */
124330 +    if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT;   /* 0 == default */
124331 +    else if (compressionLevel < 0) row = 0;   /* entry 0 is baseline for fast mode */
124332 +    else if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
124333 +    else row = compressionLevel;
124335 +    {   ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
124336 +        /* acceleration factor */
124337 +        if (compressionLevel < 0) {
124338 +            int const clampedCompressionLevel = MAX(ZSTD_minCLevel(), compressionLevel);
124339 +            cp.targetLength = (unsigned)(-clampedCompressionLevel);
124340 +        }
124341 +        /* refine parameters based on srcSize & dictSize */
124342 +        return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode);
124343 +    }
124346 +/*! ZSTD_getCParams() :
124347 + * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
124348 + *  Size values are optional, provide 0 if not known or unused */
124349 +ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
124351 +    if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
124352 +    return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
124355 +/*! ZSTD_getParams() :
124356 + *  same idea as ZSTD_getCParams()
124357 + * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
124358 + *  Fields of `ZSTD_frameParameters` are set to default values */
124359 +static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) {
124360 +    ZSTD_parameters params;
124361 +    ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode);
124362 +    DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
124363 +    ZSTD_memset(&params, 0, sizeof(params));
124364 +    params.cParams = cParams;
124365 +    params.fParams.contentSizeFlag = 1;
124366 +    return params;
124369 +/*! ZSTD_getParams() :
124370 + *  same idea as ZSTD_getCParams()
124371 + * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
124372 + *  Fields of `ZSTD_frameParameters` are set to default values */
124373 +ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
124374 +    if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
124375 +    return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
124377 diff --git a/lib/zstd/compress/zstd_compress_internal.h b/lib/zstd/compress/zstd_compress_internal.h
124378 new file mode 100644
124379 index 000000000000..b56c482322ba
124380 --- /dev/null
124381 +++ b/lib/zstd/compress/zstd_compress_internal.h
124382 @@ -0,0 +1,1188 @@
124384 + * Copyright (c) Yann Collet, Facebook, Inc.
124385 + * All rights reserved.
124387 + * This source code is licensed under both the BSD-style license (found in the
124388 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
124389 + * in the COPYING file in the root directory of this source tree).
124390 + * You may select, at your option, one of the above-listed licenses.
124391 + */
124393 +/* This header contains definitions
124394 + * that shall **only** be used by modules within lib/compress.
124395 + */
124397 +#ifndef ZSTD_COMPRESS_H
124398 +#define ZSTD_COMPRESS_H
124400 +/*-*************************************
124401 +*  Dependencies
124402 +***************************************/
124403 +#include "../common/zstd_internal.h"
124404 +#include "zstd_cwksp.h"
124407 +/*-*************************************
124408 +*  Constants
124409 +***************************************/
124410 +#define kSearchStrength      8
124411 +#define HASH_READ_SIZE       8
124412 +#define ZSTD_DUBT_UNSORTED_MARK 1   /* For btlazy2 strategy, index ZSTD_DUBT_UNSORTED_MARK==1 means "unsorted".
124413 +                                       It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
124414 +                                       It's not a big deal though : candidate will just be sorted again.
124415 +                                       Additionally, candidate position 1 will be lost.
124416 +                                       But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
124417 +                                       The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy.
124418 +                                       This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
124421 +/*-*************************************
124422 +*  Context memory management
124423 +***************************************/
124424 +typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
124425 +typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage;
124427 +typedef struct ZSTD_prefixDict_s {
124428 +    const void* dict;
124429 +    size_t dictSize;
124430 +    ZSTD_dictContentType_e dictContentType;
124431 +} ZSTD_prefixDict;
124433 +typedef struct {
124434 +    void* dictBuffer;
124435 +    void const* dict;
124436 +    size_t dictSize;
124437 +    ZSTD_dictContentType_e dictContentType;
124438 +    ZSTD_CDict* cdict;
124439 +} ZSTD_localDict;
124441 +typedef struct {
124442 +    HUF_CElt CTable[HUF_CTABLE_SIZE_U32(255)];
124443 +    HUF_repeat repeatMode;
124444 +} ZSTD_hufCTables_t;
124446 +typedef struct {
124447 +    FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
124448 +    FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
124449 +    FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
124450 +    FSE_repeat offcode_repeatMode;
124451 +    FSE_repeat matchlength_repeatMode;
124452 +    FSE_repeat litlength_repeatMode;
124453 +} ZSTD_fseCTables_t;
124455 +typedef struct {
124456 +    ZSTD_hufCTables_t huf;
124457 +    ZSTD_fseCTables_t fse;
124458 +} ZSTD_entropyCTables_t;
124460 +typedef struct {
124461 +    U32 off;            /* Offset code (offset + ZSTD_REP_MOVE) for the match */
124462 +    U32 len;            /* Raw length of match */
124463 +} ZSTD_match_t;
124465 +typedef struct {
124466 +    U32 offset;         /* Offset of sequence */
124467 +    U32 litLength;      /* Length of literals prior to match */
124468 +    U32 matchLength;    /* Raw length of match */
124469 +} rawSeq;
124471 +typedef struct {
124472 +  rawSeq* seq;          /* The start of the sequences */
124473 +  size_t pos;           /* The index in seq where reading stopped. pos <= size. */
124474 +  size_t posInSequence; /* The position within the sequence at seq[pos] where reading
124475 +                           stopped. posInSequence <= seq[pos].litLength + seq[pos].matchLength */
124476 +  size_t size;          /* The number of sequences. <= capacity. */
124477 +  size_t capacity;      /* The capacity starting from `seq` pointer */
124478 +} rawSeqStore_t;
124480 +UNUSED_ATTR static const rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0};
124482 +typedef struct {
124483 +    int price;
124484 +    U32 off;
124485 +    U32 mlen;
124486 +    U32 litlen;
124487 +    U32 rep[ZSTD_REP_NUM];
124488 +} ZSTD_optimal_t;
124490 +typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e;
124492 +typedef struct {
124493 +    /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
124494 +    unsigned* litFreq;           /* table of literals statistics, of size 256 */
124495 +    unsigned* litLengthFreq;     /* table of litLength statistics, of size (MaxLL+1) */
124496 +    unsigned* matchLengthFreq;   /* table of matchLength statistics, of size (MaxML+1) */
124497 +    unsigned* offCodeFreq;       /* table of offCode statistics, of size (MaxOff+1) */
124498 +    ZSTD_match_t* matchTable;    /* list of found matches, of size ZSTD_OPT_NUM+1 */
124499 +    ZSTD_optimal_t* priceTable;  /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
124501 +    U32  litSum;                 /* nb of literals */
124502 +    U32  litLengthSum;           /* nb of litLength codes */
124503 +    U32  matchLengthSum;         /* nb of matchLength codes */
124504 +    U32  offCodeSum;             /* nb of offset codes */
124505 +    U32  litSumBasePrice;        /* to compare to log2(litfreq) */
124506 +    U32  litLengthSumBasePrice;  /* to compare to log2(llfreq)  */
124507 +    U32  matchLengthSumBasePrice;/* to compare to log2(mlfreq)  */
124508 +    U32  offCodeSumBasePrice;    /* to compare to log2(offreq)  */
124509 +    ZSTD_OptPrice_e priceType;   /* prices can be determined dynamically, or follow a pre-defined cost structure */
124510 +    const ZSTD_entropyCTables_t* symbolCosts;  /* pre-calculated dictionary statistics */
124511 +    ZSTD_literalCompressionMode_e literalCompressionMode;
124512 +} optState_t;
124514 +typedef struct {
124515 +  ZSTD_entropyCTables_t entropy;
124516 +  U32 rep[ZSTD_REP_NUM];
124517 +} ZSTD_compressedBlockState_t;
124519 +typedef struct {
124520 +    BYTE const* nextSrc;    /* next block here to continue on current prefix */
124521 +    BYTE const* base;       /* All regular indexes relative to this position */
124522 +    BYTE const* dictBase;   /* extDict indexes relative to this position */
124523 +    U32 dictLimit;          /* below that point, need extDict */
124524 +    U32 lowLimit;           /* below that point, no more valid data */
124525 +} ZSTD_window_t;
124527 +typedef struct ZSTD_matchState_t ZSTD_matchState_t;
124528 +struct ZSTD_matchState_t {
124529 +    ZSTD_window_t window;   /* State for window round buffer management */
124530 +    U32 loadedDictEnd;      /* index of end of dictionary, within context's referential.
124531 +                             * When loadedDictEnd != 0, a dictionary is in use, and still valid.
124532 +                             * This relies on a mechanism to set loadedDictEnd=0 when dictionary is no longer within distance.
124533 +                             * Such mechanism is provided within ZSTD_window_enforceMaxDist() and ZSTD_checkDictValidity().
124534 +                             * When dict referential is copied into active context (i.e. not attached),
124535 +                             * loadedDictEnd == dictSize, since referential starts from zero.
124536 +                             */
124537 +    U32 nextToUpdate;       /* index from which to continue table update */
124538 +    U32 hashLog3;           /* dispatch table for matches of len==3 : larger == faster, more memory */
124539 +    U32* hashTable;
124540 +    U32* hashTable3;
124541 +    U32* chainTable;
124542 +    int dedicatedDictSearch;  /* Indicates whether this matchState is using the
124543 +                               * dedicated dictionary search structure.
124544 +                               */
124545 +    optState_t opt;         /* optimal parser state */
124546 +    const ZSTD_matchState_t* dictMatchState;
124547 +    ZSTD_compressionParameters cParams;
124548 +    const rawSeqStore_t* ldmSeqStore;
124551 +typedef struct {
124552 +    ZSTD_compressedBlockState_t* prevCBlock;
124553 +    ZSTD_compressedBlockState_t* nextCBlock;
124554 +    ZSTD_matchState_t matchState;
124555 +} ZSTD_blockState_t;
124557 +typedef struct {
124558 +    U32 offset;
124559 +    U32 checksum;
124560 +} ldmEntry_t;
124562 +typedef struct {
124563 +    BYTE const* split;
124564 +    U32 hash;
124565 +    U32 checksum;
124566 +    ldmEntry_t* bucket;
124567 +} ldmMatchCandidate_t;
124569 +#define LDM_BATCH_SIZE 64
124571 +typedef struct {
124572 +    ZSTD_window_t window;   /* State for the window round buffer management */
124573 +    ldmEntry_t* hashTable;
124574 +    U32 loadedDictEnd;
124575 +    BYTE* bucketOffsets;    /* Next position in bucket to insert entry */
124576 +    size_t splitIndices[LDM_BATCH_SIZE];
124577 +    ldmMatchCandidate_t matchCandidates[LDM_BATCH_SIZE];
124578 +} ldmState_t;
124580 +typedef struct {
124581 +    U32 enableLdm;          /* 1 if enable long distance matching */
124582 +    U32 hashLog;            /* Log size of hashTable */
124583 +    U32 bucketSizeLog;      /* Log bucket size for collision resolution, at most 8 */
124584 +    U32 minMatchLength;     /* Minimum match length */
124585 +    U32 hashRateLog;       /* Log number of entries to skip */
124586 +    U32 windowLog;          /* Window log for the LDM */
124587 +} ldmParams_t;
124589 +typedef struct {
124590 +    int collectSequences;
124591 +    ZSTD_Sequence* seqStart;
124592 +    size_t seqIndex;
124593 +    size_t maxSequences;
124594 +} SeqCollector;
124596 +struct ZSTD_CCtx_params_s {
124597 +    ZSTD_format_e format;
124598 +    ZSTD_compressionParameters cParams;
124599 +    ZSTD_frameParameters fParams;
124601 +    int compressionLevel;
124602 +    int forceWindow;           /* force back-references to respect limit of
124603 +                                * 1<<wLog, even for dictionary */
124604 +    size_t targetCBlockSize;   /* Tries to fit compressed block size to be around targetCBlockSize.
124605 +                                * No target when targetCBlockSize == 0.
124606 +                                * There is no guarantee on compressed block size */
124607 +    int srcSizeHint;           /* User's best guess of source size.
124608 +                                * Hint is not valid when srcSizeHint == 0.
124609 +                                * There is no guarantee that hint is close to actual source size */
124611 +    ZSTD_dictAttachPref_e attachDictPref;
124612 +    ZSTD_literalCompressionMode_e literalCompressionMode;
124614 +    /* Multithreading: used to pass parameters to mtctx */
124615 +    int nbWorkers;
124616 +    size_t jobSize;
124617 +    int overlapLog;
124618 +    int rsyncable;
124620 +    /* Long distance matching parameters */
124621 +    ldmParams_t ldmParams;
124623 +    /* Dedicated dict search algorithm trigger */
124624 +    int enableDedicatedDictSearch;
124626 +    /* Input/output buffer modes */
124627 +    ZSTD_bufferMode_e inBufferMode;
124628 +    ZSTD_bufferMode_e outBufferMode;
124630 +    /* Sequence compression API */
124631 +    ZSTD_sequenceFormat_e blockDelimiters;
124632 +    int validateSequences;
124634 +    /* Internal use, for createCCtxParams() and freeCCtxParams() only */
124635 +    ZSTD_customMem customMem;
124636 +};  /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
124638 +#define COMPRESS_SEQUENCES_WORKSPACE_SIZE (sizeof(unsigned) * (MaxSeq + 2))
124639 +#define ENTROPY_WORKSPACE_SIZE (HUF_WORKSPACE_SIZE + COMPRESS_SEQUENCES_WORKSPACE_SIZE)
124642 + * Indicates whether this compression proceeds directly from user-provided
124643 + * source buffer to user-provided destination buffer (ZSTDb_not_buffered), or
124644 + * whether the context needs to buffer the input/output (ZSTDb_buffered).
124645 + */
124646 +typedef enum {
124647 +    ZSTDb_not_buffered,
124648 +    ZSTDb_buffered
124649 +} ZSTD_buffered_policy_e;
124651 +struct ZSTD_CCtx_s {
124652 +    ZSTD_compressionStage_e stage;
124653 +    int cParamsChanged;                  /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
124654 +    int bmi2;                            /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
124655 +    ZSTD_CCtx_params requestedParams;
124656 +    ZSTD_CCtx_params appliedParams;
124657 +    U32   dictID;
124658 +    size_t dictContentSize;
124660 +    ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */
124661 +    size_t blockSize;
124662 +    unsigned long long pledgedSrcSizePlusOne;  /* this way, 0 (default) == unknown */
124663 +    unsigned long long consumedSrcSize;
124664 +    unsigned long long producedCSize;
124665 +    struct xxh64_state xxhState;
124666 +    ZSTD_customMem customMem;
124667 +    ZSTD_threadPool* pool;
124668 +    size_t staticSize;
124669 +    SeqCollector seqCollector;
124670 +    int isFirstBlock;
124671 +    int initialized;
124673 +    seqStore_t seqStore;      /* sequences storage ptrs */
124674 +    ldmState_t ldmState;      /* long distance matching state */
124675 +    rawSeq* ldmSequences;     /* Storage for the ldm output sequences */
124676 +    size_t maxNbLdmSequences;
124677 +    rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
124678 +    ZSTD_blockState_t blockState;
124679 +    U32* entropyWorkspace;  /* entropy workspace of ENTROPY_WORKSPACE_SIZE bytes */
124681 +    /* Wether we are streaming or not */
124682 +    ZSTD_buffered_policy_e bufferedPolicy;
124684 +    /* streaming */
124685 +    char*  inBuff;
124686 +    size_t inBuffSize;
124687 +    size_t inToCompress;
124688 +    size_t inBuffPos;
124689 +    size_t inBuffTarget;
124690 +    char*  outBuff;
124691 +    size_t outBuffSize;
124692 +    size_t outBuffContentSize;
124693 +    size_t outBuffFlushedSize;
124694 +    ZSTD_cStreamStage streamStage;
124695 +    U32    frameEnded;
124697 +    /* Stable in/out buffer verification */
124698 +    ZSTD_inBuffer expectedInBuffer;
124699 +    size_t expectedOutBufferSize;
124701 +    /* Dictionary */
124702 +    ZSTD_localDict localDict;
124703 +    const ZSTD_CDict* cdict;
124704 +    ZSTD_prefixDict prefixDict;   /* single-usage dictionary */
124706 +    /* Multi-threading */
124708 +    /* Tracing */
124711 +typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
124713 +typedef enum {
124714 +    ZSTD_noDict = 0,
124715 +    ZSTD_extDict = 1,
124716 +    ZSTD_dictMatchState = 2,
124717 +    ZSTD_dedicatedDictSearch = 3
124718 +} ZSTD_dictMode_e;
124720 +typedef enum {
124721 +    ZSTD_cpm_noAttachDict = 0,  /* Compression with ZSTD_noDict or ZSTD_extDict.
124722 +                                 * In this mode we use both the srcSize and the dictSize
124723 +                                 * when selecting and adjusting parameters.
124724 +                                 */
124725 +    ZSTD_cpm_attachDict = 1,    /* Compression with ZSTD_dictMatchState or ZSTD_dedicatedDictSearch.
124726 +                                 * In this mode we only take the srcSize into account when selecting
124727 +                                 * and adjusting parameters.
124728 +                                 */
124729 +    ZSTD_cpm_createCDict = 2,   /* Creating a CDict.
124730 +                                 * In this mode we take both the source size and the dictionary size
124731 +                                 * into account when selecting and adjusting the parameters.
124732 +                                 */
124733 +    ZSTD_cpm_unknown = 3,       /* ZSTD_getCParams, ZSTD_getParams, ZSTD_adjustParams.
124734 +                                 * We don't know what these parameters are for. We default to the legacy
124735 +                                 * behavior of taking both the source size and the dict size into account
124736 +                                 * when selecting and adjusting parameters.
124737 +                                 */
124738 +} ZSTD_cParamMode_e;
124740 +typedef size_t (*ZSTD_blockCompressor) (
124741 +        ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124742 +        void const* src, size_t srcSize);
124743 +ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode);
124746 +MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
124748 +    static const BYTE LL_Code[64] = {  0,  1,  2,  3,  4,  5,  6,  7,
124749 +                                       8,  9, 10, 11, 12, 13, 14, 15,
124750 +                                      16, 16, 17, 17, 18, 18, 19, 19,
124751 +                                      20, 20, 20, 20, 21, 21, 21, 21,
124752 +                                      22, 22, 22, 22, 22, 22, 22, 22,
124753 +                                      23, 23, 23, 23, 23, 23, 23, 23,
124754 +                                      24, 24, 24, 24, 24, 24, 24, 24,
124755 +                                      24, 24, 24, 24, 24, 24, 24, 24 };
124756 +    static const U32 LL_deltaCode = 19;
124757 +    return (litLength > 63) ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
124760 +/* ZSTD_MLcode() :
124761 + * note : mlBase = matchLength - MINMATCH;
124762 + *        because it's the format it's stored in seqStore->sequences */
124763 +MEM_STATIC U32 ZSTD_MLcode(U32 mlBase)
124765 +    static const BYTE ML_Code[128] = { 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
124766 +                                      16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
124767 +                                      32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
124768 +                                      38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
124769 +                                      40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
124770 +                                      41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
124771 +                                      42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
124772 +                                      42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 };
124773 +    static const U32 ML_deltaCode = 36;
124774 +    return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase];
124777 +typedef struct repcodes_s {
124778 +    U32 rep[3];
124779 +} repcodes_t;
124781 +MEM_STATIC repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
124783 +    repcodes_t newReps;
124784 +    if (offset >= ZSTD_REP_NUM) {  /* full offset */
124785 +        newReps.rep[2] = rep[1];
124786 +        newReps.rep[1] = rep[0];
124787 +        newReps.rep[0] = offset - ZSTD_REP_MOVE;
124788 +    } else {   /* repcode */
124789 +        U32 const repCode = offset + ll0;
124790 +        if (repCode > 0) {  /* note : if repCode==0, no change */
124791 +            U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
124792 +            newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2];
124793 +            newReps.rep[1] = rep[0];
124794 +            newReps.rep[0] = currentOffset;
124795 +        } else {   /* repCode == 0 */
124796 +            ZSTD_memcpy(&newReps, rep, sizeof(newReps));
124797 +        }
124798 +    }
124799 +    return newReps;
124802 +/* ZSTD_cParam_withinBounds:
124803 + * @return 1 if value is within cParam bounds,
124804 + * 0 otherwise */
124805 +MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
124807 +    ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
124808 +    if (ZSTD_isError(bounds.error)) return 0;
124809 +    if (value < bounds.lowerBound) return 0;
124810 +    if (value > bounds.upperBound) return 0;
124811 +    return 1;
124814 +/* ZSTD_noCompressBlock() :
124815 + * Writes uncompressed block to dst buffer from given src.
124816 + * Returns the size of the block */
124817 +MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
124819 +    U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
124820 +    RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
124821 +                    dstSize_tooSmall, "dst buf too small for uncompressed block");
124822 +    MEM_writeLE24(dst, cBlockHeader24);
124823 +    ZSTD_memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
124824 +    return ZSTD_blockHeaderSize + srcSize;
124827 +MEM_STATIC size_t ZSTD_rleCompressBlock (void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock)
124829 +    BYTE* const op = (BYTE*)dst;
124830 +    U32 const cBlockHeader = lastBlock + (((U32)bt_rle)<<1) + (U32)(srcSize << 3);
124831 +    RETURN_ERROR_IF(dstCapacity < 4, dstSize_tooSmall, "");
124832 +    MEM_writeLE24(op, cBlockHeader);
124833 +    op[3] = src;
124834 +    return 4;
124838 +/* ZSTD_minGain() :
124839 + * minimum compression required
124840 + * to generate a compress block or a compressed literals section.
124841 + * note : use same formula for both situations */
124842 +MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
124844 +    U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;
124845 +    ZSTD_STATIC_ASSERT(ZSTD_btultra == 8);
124846 +    assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
124847 +    return (srcSize >> minlog) + 2;
124850 +MEM_STATIC int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams)
124852 +    switch (cctxParams->literalCompressionMode) {
124853 +    case ZSTD_lcm_huffman:
124854 +        return 0;
124855 +    case ZSTD_lcm_uncompressed:
124856 +        return 1;
124857 +    default:
124858 +        assert(0 /* impossible: pre-validated */);
124859 +        /* fall-through */
124860 +    case ZSTD_lcm_auto:
124861 +        return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
124862 +    }
124865 +/*! ZSTD_safecopyLiterals() :
124866 + *  memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w.
124867 + *  Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single
124868 + *  large copies.
124869 + */
124870 +static void ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w) {
124871 +    assert(iend > ilimit_w);
124872 +    if (ip <= ilimit_w) {
124873 +        ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap);
124874 +        op += ilimit_w - ip;
124875 +        ip = ilimit_w;
124876 +    }
124877 +    while (ip < iend) *op++ = *ip++;
124880 +/*! ZSTD_storeSeq() :
124881 + *  Store a sequence (litlen, litPtr, offCode and mlBase) into seqStore_t.
124882 + *  `offCode` : distance to match + ZSTD_REP_MOVE (values <= ZSTD_REP_MOVE are repCodes).
124883 + *  `mlBase` : matchLength - MINMATCH
124884 + *  Allowed to overread literals up to litLimit.
124886 +HINT_INLINE UNUSED_ATTR
124887 +void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, const BYTE* litLimit, U32 offCode, size_t mlBase)
124889 +    BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;
124890 +    BYTE const* const litEnd = literals + litLength;
124891 +#if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6)
124892 +    static const BYTE* g_start = NULL;
124893 +    if (g_start==NULL) g_start = (const BYTE*)literals;  /* note : index only works for compression within a single segment */
124894 +    {   U32 const pos = (U32)((const BYTE*)literals - g_start);
124895 +        DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u",
124896 +               pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offCode);
124897 +    }
124898 +#endif
124899 +    assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
124900 +    /* copy Literals */
124901 +    assert(seqStorePtr->maxNbLit <= 128 KB);
124902 +    assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);
124903 +    assert(literals + litLength <= litLimit);
124904 +    if (litEnd <= litLimit_w) {
124905 +        /* Common case we can use wildcopy.
124906 +        * First copy 16 bytes, because literals are likely short.
124907 +        */
124908 +        assert(WILDCOPY_OVERLENGTH >= 16);
124909 +        ZSTD_copy16(seqStorePtr->lit, literals);
124910 +        if (litLength > 16) {
124911 +            ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap);
124912 +        }
124913 +    } else {
124914 +        ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w);
124915 +    }
124916 +    seqStorePtr->lit += litLength;
124918 +    /* literal Length */
124919 +    if (litLength>0xFFFF) {
124920 +        assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
124921 +        seqStorePtr->longLengthID = 1;
124922 +        seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
124923 +    }
124924 +    seqStorePtr->sequences[0].litLength = (U16)litLength;
124926 +    /* match offset */
124927 +    seqStorePtr->sequences[0].offset = offCode + 1;
124929 +    /* match Length */
124930 +    if (mlBase>0xFFFF) {
124931 +        assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
124932 +        seqStorePtr->longLengthID = 2;
124933 +        seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
124934 +    }
124935 +    seqStorePtr->sequences[0].matchLength = (U16)mlBase;
124937 +    seqStorePtr->sequences++;
124941 +/*-*************************************
124942 +*  Match length counter
124943 +***************************************/
124944 +static unsigned ZSTD_NbCommonBytes (size_t val)
124946 +    if (MEM_isLittleEndian()) {
124947 +        if (MEM_64bits()) {
124948 +#       if (__GNUC__ >= 4)
124949 +            return (__builtin_ctzll((U64)val) >> 3);
124950 +#       else
124951 +            static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
124952 +                                                     0, 3, 1, 3, 1, 4, 2, 7,
124953 +                                                     0, 2, 3, 6, 1, 5, 3, 5,
124954 +                                                     1, 3, 4, 4, 2, 5, 6, 7,
124955 +                                                     7, 0, 1, 2, 3, 3, 4, 6,
124956 +                                                     2, 6, 5, 5, 3, 4, 5, 6,
124957 +                                                     7, 1, 2, 4, 6, 4, 4, 5,
124958 +                                                     7, 2, 6, 5, 7, 6, 7, 7 };
124959 +            return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
124960 +#       endif
124961 +        } else { /* 32 bits */
124962 +#       if (__GNUC__ >= 3)
124963 +            return (__builtin_ctz((U32)val) >> 3);
124964 +#       else
124965 +            static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
124966 +                                                     3, 2, 2, 1, 3, 2, 0, 1,
124967 +                                                     3, 3, 1, 2, 2, 2, 2, 0,
124968 +                                                     3, 1, 2, 0, 1, 0, 1, 1 };
124969 +            return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
124970 +#       endif
124971 +        }
124972 +    } else {  /* Big Endian CPU */
124973 +        if (MEM_64bits()) {
124974 +#       if (__GNUC__ >= 4)
124975 +            return (__builtin_clzll(val) >> 3);
124976 +#       else
124977 +            unsigned r;
124978 +            const unsigned n32 = sizeof(size_t)*4;   /* calculate this way due to compiler complaining in 32-bits mode */
124979 +            if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
124980 +            if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
124981 +            r += (!val);
124982 +            return r;
124983 +#       endif
124984 +        } else { /* 32 bits */
124985 +#       if (__GNUC__ >= 3)
124986 +            return (__builtin_clz((U32)val) >> 3);
124987 +#       else
124988 +            unsigned r;
124989 +            if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
124990 +            r += (!val);
124991 +            return r;
124992 +#       endif
124993 +    }   }
124997 +MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
124999 +    const BYTE* const pStart = pIn;
125000 +    const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
125002 +    if (pIn < pInLoopLimit) {
125003 +        { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
125004 +          if (diff) return ZSTD_NbCommonBytes(diff); }
125005 +        pIn+=sizeof(size_t); pMatch+=sizeof(size_t);
125006 +        while (pIn < pInLoopLimit) {
125007 +            size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
125008 +            if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
125009 +            pIn += ZSTD_NbCommonBytes(diff);
125010 +            return (size_t)(pIn - pStart);
125011 +    }   }
125012 +    if (MEM_64bits() && (pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }
125013 +    if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }
125014 +    if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
125015 +    return (size_t)(pIn - pStart);
125018 +/** ZSTD_count_2segments() :
125019 + *  can count match length with `ip` & `match` in 2 different segments.
125020 + *  convention : on reaching mEnd, match count continue starting from iStart
125021 + */
125022 +MEM_STATIC size_t
125023 +ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
125024 +                     const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
125026 +    const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
125027 +    size_t const matchLength = ZSTD_count(ip, match, vEnd);
125028 +    if (match + matchLength != mEnd) return matchLength;
125029 +    DEBUGLOG(7, "ZSTD_count_2segments: found a 2-parts match (current length==%zu)", matchLength);
125030 +    DEBUGLOG(7, "distance from match beginning to end dictionary = %zi", mEnd - match);
125031 +    DEBUGLOG(7, "distance from current pos to end buffer = %zi", iEnd - ip);
125032 +    DEBUGLOG(7, "next byte : ip==%02X, istart==%02X", ip[matchLength], *iStart);
125033 +    DEBUGLOG(7, "final match length = %zu", matchLength + ZSTD_count(ip+matchLength, iStart, iEnd));
125034 +    return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
125038 +/*-*************************************
125039 + *  Hashes
125040 + ***************************************/
125041 +static const U32 prime3bytes = 506832829U;
125042 +static U32    ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes)  >> (32-h) ; }
125043 +MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
125045 +static const U32 prime4bytes = 2654435761U;
125046 +static U32    ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
125047 +static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
125049 +static const U64 prime5bytes = 889523592379ULL;
125050 +static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u  << (64-40)) * prime5bytes) >> (64-h)) ; }
125051 +static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
125053 +static const U64 prime6bytes = 227718039650203ULL;
125054 +static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u  << (64-48)) * prime6bytes) >> (64-h)) ; }
125055 +static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
125057 +static const U64 prime7bytes = 58295818150454627ULL;
125058 +static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u  << (64-56)) * prime7bytes) >> (64-h)) ; }
125059 +static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
125061 +static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
125062 +static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
125063 +static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
125065 +MEM_STATIC FORCE_INLINE_ATTR
125066 +size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
125068 +    switch(mls)
125069 +    {
125070 +    default:
125071 +    case 4: return ZSTD_hash4Ptr(p, hBits);
125072 +    case 5: return ZSTD_hash5Ptr(p, hBits);
125073 +    case 6: return ZSTD_hash6Ptr(p, hBits);
125074 +    case 7: return ZSTD_hash7Ptr(p, hBits);
125075 +    case 8: return ZSTD_hash8Ptr(p, hBits);
125076 +    }
125079 +/** ZSTD_ipow() :
125080 + * Return base^exponent.
125081 + */
125082 +static U64 ZSTD_ipow(U64 base, U64 exponent)
125084 +    U64 power = 1;
125085 +    while (exponent) {
125086 +      if (exponent & 1) power *= base;
125087 +      exponent >>= 1;
125088 +      base *= base;
125089 +    }
125090 +    return power;
125093 +#define ZSTD_ROLL_HASH_CHAR_OFFSET 10
125095 +/** ZSTD_rollingHash_append() :
125096 + * Add the buffer to the hash value.
125097 + */
125098 +static U64 ZSTD_rollingHash_append(U64 hash, void const* buf, size_t size)
125100 +    BYTE const* istart = (BYTE const*)buf;
125101 +    size_t pos;
125102 +    for (pos = 0; pos < size; ++pos) {
125103 +        hash *= prime8bytes;
125104 +        hash += istart[pos] + ZSTD_ROLL_HASH_CHAR_OFFSET;
125105 +    }
125106 +    return hash;
125109 +/** ZSTD_rollingHash_compute() :
125110 + * Compute the rolling hash value of the buffer.
125111 + */
125112 +MEM_STATIC U64 ZSTD_rollingHash_compute(void const* buf, size_t size)
125114 +    return ZSTD_rollingHash_append(0, buf, size);
125117 +/** ZSTD_rollingHash_primePower() :
125118 + * Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash
125119 + * over a window of length bytes.
125120 + */
125121 +MEM_STATIC U64 ZSTD_rollingHash_primePower(U32 length)
125123 +    return ZSTD_ipow(prime8bytes, length - 1);
125126 +/** ZSTD_rollingHash_rotate() :
125127 + * Rotate the rolling hash by one byte.
125128 + */
125129 +MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64 primePower)
125131 +    hash -= (toRemove + ZSTD_ROLL_HASH_CHAR_OFFSET) * primePower;
125132 +    hash *= prime8bytes;
125133 +    hash += toAdd + ZSTD_ROLL_HASH_CHAR_OFFSET;
125134 +    return hash;
125137 +/*-*************************************
125138 +*  Round buffer management
125139 +***************************************/
125140 +#if (ZSTD_WINDOWLOG_MAX_64 > 31)
125141 +# error "ZSTD_WINDOWLOG_MAX is too large : would overflow ZSTD_CURRENT_MAX"
125142 +#endif
125143 +/* Max current allowed */
125144 +#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX))
125145 +/* Maximum chunk size before overflow correction needs to be called again */
125146 +#define ZSTD_CHUNKSIZE_MAX                                                     \
125147 +    ( ((U32)-1)                  /* Maximum ending current index */            \
125148 +    - ZSTD_CURRENT_MAX)          /* Maximum beginning lowLimit */
125151 + * ZSTD_window_clear():
125152 + * Clears the window containing the history by simply setting it to empty.
125153 + */
125154 +MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window)
125156 +    size_t const endT = (size_t)(window->nextSrc - window->base);
125157 +    U32 const end = (U32)endT;
125159 +    window->lowLimit = end;
125160 +    window->dictLimit = end;
125164 + * ZSTD_window_hasExtDict():
125165 + * Returns non-zero if the window has a non-empty extDict.
125166 + */
125167 +MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window)
125169 +    return window.lowLimit < window.dictLimit;
125173 + * ZSTD_matchState_dictMode():
125174 + * Inspects the provided matchState and figures out what dictMode should be
125175 + * passed to the compressor.
125176 + */
125177 +MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms)
125179 +    return ZSTD_window_hasExtDict(ms->window) ?
125180 +        ZSTD_extDict :
125181 +        ms->dictMatchState != NULL ?
125182 +            (ms->dictMatchState->dedicatedDictSearch ? ZSTD_dedicatedDictSearch : ZSTD_dictMatchState) :
125183 +            ZSTD_noDict;
125187 + * ZSTD_window_needOverflowCorrection():
125188 + * Returns non-zero if the indices are getting too large and need overflow
125189 + * protection.
125190 + */
125191 +MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
125192 +                                                  void const* srcEnd)
125194 +    U32 const curr = (U32)((BYTE const*)srcEnd - window.base);
125195 +    return curr > ZSTD_CURRENT_MAX;
125199 + * ZSTD_window_correctOverflow():
125200 + * Reduces the indices to protect from index overflow.
125201 + * Returns the correction made to the indices, which must be applied to every
125202 + * stored index.
125204 + * The least significant cycleLog bits of the indices must remain the same,
125205 + * which may be 0. Every index up to maxDist in the past must be valid.
125206 + * NOTE: (maxDist & cycleMask) must be zero.
125207 + */
125208 +MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
125209 +                                           U32 maxDist, void const* src)
125211 +    /* preemptive overflow correction:
125212 +     * 1. correction is large enough:
125213 +     *    lowLimit > (3<<29) ==> current > 3<<29 + 1<<windowLog
125214 +     *    1<<windowLog <= newCurrent < 1<<chainLog + 1<<windowLog
125215 +     *
125216 +     *    current - newCurrent
125217 +     *    > (3<<29 + 1<<windowLog) - (1<<windowLog + 1<<chainLog)
125218 +     *    > (3<<29) - (1<<chainLog)
125219 +     *    > (3<<29) - (1<<30)             (NOTE: chainLog <= 30)
125220 +     *    > 1<<29
125221 +     *
125222 +     * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow:
125223 +     *    After correction, current is less than (1<<chainLog + 1<<windowLog).
125224 +     *    In 64-bit mode we are safe, because we have 64-bit ptrdiff_t.
125225 +     *    In 32-bit mode we are safe, because (chainLog <= 29), so
125226 +     *    ip+ZSTD_CHUNKSIZE_MAX - cctx->base < 1<<32.
125227 +     * 3. (cctx->lowLimit + 1<<windowLog) < 1<<32:
125228 +     *    windowLog <= 31 ==> 3<<29 + 1<<windowLog < 7<<29 < 1<<32.
125229 +     */
125230 +    U32 const cycleMask = (1U << cycleLog) - 1;
125231 +    U32 const curr = (U32)((BYTE const*)src - window->base);
125232 +    U32 const currentCycle0 = curr & cycleMask;
125233 +    /* Exclude zero so that newCurrent - maxDist >= 1. */
125234 +    U32 const currentCycle1 = currentCycle0 == 0 ? (1U << cycleLog) : currentCycle0;
125235 +    U32 const newCurrent = currentCycle1 + maxDist;
125236 +    U32 const correction = curr - newCurrent;
125237 +    assert((maxDist & cycleMask) == 0);
125238 +    assert(curr > newCurrent);
125239 +    /* Loose bound, should be around 1<<29 (see above) */
125240 +    assert(correction > 1<<28);
125242 +    window->base += correction;
125243 +    window->dictBase += correction;
125244 +    if (window->lowLimit <= correction) window->lowLimit = 1;
125245 +    else window->lowLimit -= correction;
125246 +    if (window->dictLimit <= correction) window->dictLimit = 1;
125247 +    else window->dictLimit -= correction;
125249 +    /* Ensure we can still reference the full window. */
125250 +    assert(newCurrent >= maxDist);
125251 +    assert(newCurrent - maxDist >= 1);
125252 +    /* Ensure that lowLimit and dictLimit didn't underflow. */
125253 +    assert(window->lowLimit <= newCurrent);
125254 +    assert(window->dictLimit <= newCurrent);
125256 +    DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction,
125257 +             window->lowLimit);
125258 +    return correction;
125262 + * ZSTD_window_enforceMaxDist():
125263 + * Updates lowLimit so that:
125264 + *    (srcEnd - base) - lowLimit == maxDist + loadedDictEnd
125266 + * It ensures index is valid as long as index >= lowLimit.
125267 + * This must be called before a block compression call.
125269 + * loadedDictEnd is only defined if a dictionary is in use for current compression.
125270 + * As the name implies, loadedDictEnd represents the index at end of dictionary.
125271 + * The value lies within context's referential, it can be directly compared to blockEndIdx.
125273 + * If loadedDictEndPtr is NULL, no dictionary is in use, and we use loadedDictEnd == 0.
125274 + * If loadedDictEndPtr is not NULL, we set it to zero after updating lowLimit.
125275 + * This is because dictionaries are allowed to be referenced fully
125276 + * as long as the last byte of the dictionary is in the window.
125277 + * Once input has progressed beyond window size, dictionary cannot be referenced anymore.
125279 + * In normal dict mode, the dictionary lies between lowLimit and dictLimit.
125280 + * In dictMatchState mode, lowLimit and dictLimit are the same,
125281 + * and the dictionary is below them.
125282 + * forceWindow and dictMatchState are therefore incompatible.
125283 + */
125284 +MEM_STATIC void
125285 +ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
125286 +                     const void* blockEnd,
125287 +                           U32   maxDist,
125288 +                           U32*  loadedDictEndPtr,
125289 +                     const ZSTD_matchState_t** dictMatchStatePtr)
125291 +    U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
125292 +    U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
125293 +    DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
125294 +                (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
125296 +    /* - When there is no dictionary : loadedDictEnd == 0.
125297 +         In which case, the test (blockEndIdx > maxDist) is merely to avoid
125298 +         overflowing next operation `newLowLimit = blockEndIdx - maxDist`.
125299 +       - When there is a standard dictionary :
125300 +         Index referential is copied from the dictionary,
125301 +         which means it starts from 0.
125302 +         In which case, loadedDictEnd == dictSize,
125303 +         and it makes sense to compare `blockEndIdx > maxDist + dictSize`
125304 +         since `blockEndIdx` also starts from zero.
125305 +       - When there is an attached dictionary :
125306 +         loadedDictEnd is expressed within the referential of the context,
125307 +         so it can be directly compared against blockEndIdx.
125308 +    */
125309 +    if (blockEndIdx > maxDist + loadedDictEnd) {
125310 +        U32 const newLowLimit = blockEndIdx - maxDist;
125311 +        if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;
125312 +        if (window->dictLimit < window->lowLimit) {
125313 +            DEBUGLOG(5, "Update dictLimit to match lowLimit, from %u to %u",
125314 +                        (unsigned)window->dictLimit, (unsigned)window->lowLimit);
125315 +            window->dictLimit = window->lowLimit;
125316 +        }
125317 +        /* On reaching window size, dictionaries are invalidated */
125318 +        if (loadedDictEndPtr) *loadedDictEndPtr = 0;
125319 +        if (dictMatchStatePtr) *dictMatchStatePtr = NULL;
125320 +    }
125323 +/* Similar to ZSTD_window_enforceMaxDist(),
125324 + * but only invalidates dictionary
125325 + * when input progresses beyond window size.
125326 + * assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL)
125327 + *              loadedDictEnd uses same referential as window->base
125328 + *              maxDist is the window size */
125329 +MEM_STATIC void
125330 +ZSTD_checkDictValidity(const ZSTD_window_t* window,
125331 +                       const void* blockEnd,
125332 +                             U32   maxDist,
125333 +                             U32*  loadedDictEndPtr,
125334 +                       const ZSTD_matchState_t** dictMatchStatePtr)
125336 +    assert(loadedDictEndPtr != NULL);
125337 +    assert(dictMatchStatePtr != NULL);
125338 +    {   U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
125339 +        U32 const loadedDictEnd = *loadedDictEndPtr;
125340 +        DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
125341 +                    (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
125342 +        assert(blockEndIdx >= loadedDictEnd);
125344 +        if (blockEndIdx > loadedDictEnd + maxDist) {
125345 +            /* On reaching window size, dictionaries are invalidated.
125346 +             * For simplification, if window size is reached anywhere within next block,
125347 +             * the dictionary is invalidated for the full block.
125348 +             */
125349 +            DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)");
125350 +            *loadedDictEndPtr = 0;
125351 +            *dictMatchStatePtr = NULL;
125352 +        } else {
125353 +            if (*loadedDictEndPtr != 0) {
125354 +                DEBUGLOG(6, "dictionary considered valid for current block");
125355 +    }   }   }
125358 +MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) {
125359 +    ZSTD_memset(window, 0, sizeof(*window));
125360 +    window->base = (BYTE const*)"";
125361 +    window->dictBase = (BYTE const*)"";
125362 +    window->dictLimit = 1;    /* start from 1, so that 1st position is valid */
125363 +    window->lowLimit = 1;     /* it ensures first and later CCtx usages compress the same */
125364 +    window->nextSrc = window->base + 1;   /* see issue #1241 */
125368 + * ZSTD_window_update():
125369 + * Updates the window by appending [src, src + srcSize) to the window.
125370 + * If it is not contiguous, the current prefix becomes the extDict, and we
125371 + * forget about the extDict. Handles overlap of the prefix and extDict.
125372 + * Returns non-zero if the segment is contiguous.
125373 + */
125374 +MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
125375 +                                  void const* src, size_t srcSize)
125377 +    BYTE const* const ip = (BYTE const*)src;
125378 +    U32 contiguous = 1;
125379 +    DEBUGLOG(5, "ZSTD_window_update");
125380 +    if (srcSize == 0)
125381 +        return contiguous;
125382 +    assert(window->base != NULL);
125383 +    assert(window->dictBase != NULL);
125384 +    /* Check if blocks follow each other */
125385 +    if (src != window->nextSrc) {
125386 +        /* not contiguous */
125387 +        size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);
125388 +        DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit);
125389 +        window->lowLimit = window->dictLimit;
125390 +        assert(distanceFromBase == (size_t)(U32)distanceFromBase);  /* should never overflow */
125391 +        window->dictLimit = (U32)distanceFromBase;
125392 +        window->dictBase = window->base;
125393 +        window->base = ip - distanceFromBase;
125394 +        /* ms->nextToUpdate = window->dictLimit; */
125395 +        if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit;   /* too small extDict */
125396 +        contiguous = 0;
125397 +    }
125398 +    window->nextSrc = ip + srcSize;
125399 +    /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
125400 +    if ( (ip+srcSize > window->dictBase + window->lowLimit)
125401 +       & (ip < window->dictBase + window->dictLimit)) {
125402 +        ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;
125403 +        U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
125404 +        window->lowLimit = lowLimitMax;
125405 +        DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit);
125406 +    }
125407 +    return contiguous;
125411 + * Returns the lowest allowed match index. It may either be in the ext-dict or the prefix.
125412 + */
125413 +MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
125415 +    U32    const maxDistance = 1U << windowLog;
125416 +    U32    const lowestValid = ms->window.lowLimit;
125417 +    U32    const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
125418 +    U32    const isDictionary = (ms->loadedDictEnd != 0);
125419 +    /* When using a dictionary the entire dictionary is valid if a single byte of the dictionary
125420 +     * is within the window. We invalidate the dictionary (and set loadedDictEnd to 0) when it isn't
125421 +     * valid for the entire block. So this check is sufficient to find the lowest valid match index.
125422 +     */
125423 +    U32    const matchLowest = isDictionary ? lowestValid : withinWindow;
125424 +    return matchLowest;
125428 + * Returns the lowest allowed match index in the prefix.
125429 + */
125430 +MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
125432 +    U32    const maxDistance = 1U << windowLog;
125433 +    U32    const lowestValid = ms->window.dictLimit;
125434 +    U32    const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
125435 +    U32    const isDictionary = (ms->loadedDictEnd != 0);
125436 +    /* When computing the lowest prefix index we need to take the dictionary into account to handle
125437 +     * the edge case where the dictionary and the source are contiguous in memory.
125438 +     */
125439 +    U32    const matchLowest = isDictionary ? lowestValid : withinWindow;
125440 +    return matchLowest;
125445 +/* debug functions */
125446 +#if (DEBUGLEVEL>=2)
125448 +MEM_STATIC double ZSTD_fWeight(U32 rawStat)
125450 +    U32 const fp_accuracy = 8;
125451 +    U32 const fp_multiplier = (1 << fp_accuracy);
125452 +    U32 const newStat = rawStat + 1;
125453 +    U32 const hb = ZSTD_highbit32(newStat);
125454 +    U32 const BWeight = hb * fp_multiplier;
125455 +    U32 const FWeight = (newStat << fp_accuracy) >> hb;
125456 +    U32 const weight = BWeight + FWeight;
125457 +    assert(hb + fp_accuracy < 31);
125458 +    return (double)weight / fp_multiplier;
125461 +/* display a table content,
125462 + * listing each element, its frequency, and its predicted bit cost */
125463 +MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
125465 +    unsigned u, sum;
125466 +    for (u=0, sum=0; u<=max; u++) sum += table[u];
125467 +    DEBUGLOG(2, "total nb elts: %u", sum);
125468 +    for (u=0; u<=max; u++) {
125469 +        DEBUGLOG(2, "%2u: %5u  (%.2f)",
125470 +                u, table[u], ZSTD_fWeight(sum) - ZSTD_fWeight(table[u]) );
125471 +    }
125474 +#endif
125478 +/* ===============================================================
125479 + * Shared internal declarations
125480 + * These prototypes may be called from sources not in lib/compress
125481 + * =============================================================== */
125483 +/* ZSTD_loadCEntropy() :
125484 + * dict : must point at beginning of a valid zstd dictionary.
125485 + * return : size of dictionary header (size of magic number + dict ID + entropy tables)
125486 + * assumptions : magic number supposed already checked
125487 + *               and dictSize >= 8 */
125488 +size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
125489 +                         const void* const dict, size_t dictSize);
125491 +void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs);
125493 +/* ==============================================================
125494 + * Private declarations
125495 + * These prototypes shall only be called from within lib/compress
125496 + * ============================================================== */
125498 +/* ZSTD_getCParamsFromCCtxParams() :
125499 + * cParams are built depending on compressionLevel, src size hints,
125500 + * LDM and manually set compression parameters.
125501 + * Note: srcSizeHint == 0 means 0!
125502 + */
125503 +ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
125504 +        const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
125506 +/*! ZSTD_initCStream_internal() :
125507 + *  Private use only. Init streaming operation.
125508 + *  expects params to be valid.
125509 + *  must receive dict, or cdict, or none, but not both.
125510 + *  @return : 0, or an error code */
125511 +size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
125512 +                     const void* dict, size_t dictSize,
125513 +                     const ZSTD_CDict* cdict,
125514 +                     const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize);
125516 +void ZSTD_resetSeqStore(seqStore_t* ssPtr);
125518 +/*! ZSTD_getCParamsFromCDict() :
125519 + *  as the name implies */
125520 +ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
125522 +/* ZSTD_compressBegin_advanced_internal() :
125523 + * Private use only. To be called from zstdmt_compress.c. */
125524 +size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
125525 +                                    const void* dict, size_t dictSize,
125526 +                                    ZSTD_dictContentType_e dictContentType,
125527 +                                    ZSTD_dictTableLoadMethod_e dtlm,
125528 +                                    const ZSTD_CDict* cdict,
125529 +                                    const ZSTD_CCtx_params* params,
125530 +                                    unsigned long long pledgedSrcSize);
125532 +/* ZSTD_compress_advanced_internal() :
125533 + * Private use only. To be called from zstdmt_compress.c. */
125534 +size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
125535 +                                       void* dst, size_t dstCapacity,
125536 +                                 const void* src, size_t srcSize,
125537 +                                 const void* dict,size_t dictSize,
125538 +                                 const ZSTD_CCtx_params* params);
125541 +/* ZSTD_writeLastEmptyBlock() :
125542 + * output an empty Block with end-of-frame mark to complete a frame
125543 + * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
125544 + *           or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
125545 + */
125546 +size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
125549 +/* ZSTD_referenceExternalSequences() :
125550 + * Must be called before starting a compression operation.
125551 + * seqs must parse a prefix of the source.
125552 + * This cannot be used when long range matching is enabled.
125553 + * Zstd will use these sequences, and pass the literals to a secondary block
125554 + * compressor.
125555 + * @return : An error code on failure.
125556 + * NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory
125557 + * access and data corruption.
125558 + */
125559 +size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
125561 +/** ZSTD_cycleLog() :
125562 + *  condition for correct operation : hashLog > 1 */
125563 +U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat);
125565 +/** ZSTD_CCtx_trace() :
125566 + *  Trace the end of a compression call.
125567 + */
125568 +void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize);
125570 +#endif /* ZSTD_COMPRESS_H */
125571 diff --git a/lib/zstd/compress/zstd_compress_literals.c b/lib/zstd/compress/zstd_compress_literals.c
125572 new file mode 100644
125573 index 000000000000..655bcda4d1f1
125574 --- /dev/null
125575 +++ b/lib/zstd/compress/zstd_compress_literals.c
125576 @@ -0,0 +1,158 @@
125578 + * Copyright (c) Yann Collet, Facebook, Inc.
125579 + * All rights reserved.
125581 + * This source code is licensed under both the BSD-style license (found in the
125582 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
125583 + * in the COPYING file in the root directory of this source tree).
125584 + * You may select, at your option, one of the above-listed licenses.
125585 + */
125587 + /*-*************************************
125588 + *  Dependencies
125589 + ***************************************/
125590 +#include "zstd_compress_literals.h"
125592 +size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
125594 +    BYTE* const ostart = (BYTE*)dst;
125595 +    U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
125597 +    RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, "");
125599 +    switch(flSize)
125600 +    {
125601 +        case 1: /* 2 - 1 - 5 */
125602 +            ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
125603 +            break;
125604 +        case 2: /* 2 - 2 - 12 */
125605 +            MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
125606 +            break;
125607 +        case 3: /* 2 - 2 - 20 */
125608 +            MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
125609 +            break;
125610 +        default:   /* not necessary : flSize is {1,2,3} */
125611 +            assert(0);
125612 +    }
125614 +    ZSTD_memcpy(ostart + flSize, src, srcSize);
125615 +    DEBUGLOG(5, "Raw literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize));
125616 +    return srcSize + flSize;
125619 +size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
125621 +    BYTE* const ostart = (BYTE*)dst;
125622 +    U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
125624 +    (void)dstCapacity;  /* dstCapacity already guaranteed to be >=4, hence large enough */
125626 +    switch(flSize)
125627 +    {
125628 +        case 1: /* 2 - 1 - 5 */
125629 +            ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
125630 +            break;
125631 +        case 2: /* 2 - 2 - 12 */
125632 +            MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
125633 +            break;
125634 +        case 3: /* 2 - 2 - 20 */
125635 +            MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
125636 +            break;
125637 +        default:   /* not necessary : flSize is {1,2,3} */
125638 +            assert(0);
125639 +    }
125641 +    ostart[flSize] = *(const BYTE*)src;
125642 +    DEBUGLOG(5, "RLE literals: %u -> %u", (U32)srcSize, (U32)flSize + 1);
125643 +    return flSize+1;
125646 +size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
125647 +                              ZSTD_hufCTables_t* nextHuf,
125648 +                              ZSTD_strategy strategy, int disableLiteralCompression,
125649 +                              void* dst, size_t dstCapacity,
125650 +                        const void* src, size_t srcSize,
125651 +                              void* entropyWorkspace, size_t entropyWorkspaceSize,
125652 +                        const int bmi2)
125654 +    size_t const minGain = ZSTD_minGain(srcSize, strategy);
125655 +    size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
125656 +    BYTE*  const ostart = (BYTE*)dst;
125657 +    U32 singleStream = srcSize < 256;
125658 +    symbolEncodingType_e hType = set_compressed;
125659 +    size_t cLitSize;
125661 +    DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i srcSize=%u)",
125662 +                disableLiteralCompression, (U32)srcSize);
125664 +    /* Prepare nextEntropy assuming reusing the existing table */
125665 +    ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
125667 +    if (disableLiteralCompression)
125668 +        return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
125670 +    /* small ? don't even attempt compression (speed opt) */
125671 +#   define COMPRESS_LITERALS_SIZE_MIN 63
125672 +    {   size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
125673 +        if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
125674 +    }
125676 +    RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression");
125677 +    {   HUF_repeat repeat = prevHuf->repeatMode;
125678 +        int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
125679 +        if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
125680 +        cLitSize = singleStream ?
125681 +            HUF_compress1X_repeat(
125682 +                ostart+lhSize, dstCapacity-lhSize, src, srcSize,
125683 +                HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
125684 +                (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2) :
125685 +            HUF_compress4X_repeat(
125686 +                ostart+lhSize, dstCapacity-lhSize, src, srcSize,
125687 +                HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
125688 +                (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
125689 +        if (repeat != HUF_repeat_none) {
125690 +            /* reused the existing table */
125691 +            DEBUGLOG(5, "Reusing previous huffman table");
125692 +            hType = set_repeat;
125693 +        }
125694 +    }
125696 +    if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
125697 +        ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
125698 +        return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
125699 +    }
125700 +    if (cLitSize==1) {
125701 +        ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
125702 +        return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
125703 +    }
125705 +    if (hType == set_compressed) {
125706 +        /* using a newly constructed table */
125707 +        nextHuf->repeatMode = HUF_repeat_check;
125708 +    }
125710 +    /* Build header */
125711 +    switch(lhSize)
125712 +    {
125713 +    case 3: /* 2 - 2 - 10 - 10 */
125714 +        {   U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
125715 +            MEM_writeLE24(ostart, lhc);
125716 +            break;
125717 +        }
125718 +    case 4: /* 2 - 2 - 14 - 14 */
125719 +        {   U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
125720 +            MEM_writeLE32(ostart, lhc);
125721 +            break;
125722 +        }
125723 +    case 5: /* 2 - 2 - 18 - 18 */
125724 +        {   U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
125725 +            MEM_writeLE32(ostart, lhc);
125726 +            ostart[4] = (BYTE)(cLitSize >> 10);
125727 +            break;
125728 +        }
125729 +    default:  /* not possible : lhSize is {3,4,5} */
125730 +        assert(0);
125731 +    }
125732 +    DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)srcSize, (U32)(lhSize+cLitSize));
125733 +    return lhSize+cLitSize;
125735 diff --git a/lib/zstd/compress/zstd_compress_literals.h b/lib/zstd/compress/zstd_compress_literals.h
125736 new file mode 100644
125737 index 000000000000..9904c0cd30a0
125738 --- /dev/null
125739 +++ b/lib/zstd/compress/zstd_compress_literals.h
125740 @@ -0,0 +1,29 @@
125742 + * Copyright (c) Yann Collet, Facebook, Inc.
125743 + * All rights reserved.
125745 + * This source code is licensed under both the BSD-style license (found in the
125746 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
125747 + * in the COPYING file in the root directory of this source tree).
125748 + * You may select, at your option, one of the above-listed licenses.
125749 + */
125751 +#ifndef ZSTD_COMPRESS_LITERALS_H
125752 +#define ZSTD_COMPRESS_LITERALS_H
125754 +#include "zstd_compress_internal.h" /* ZSTD_hufCTables_t, ZSTD_minGain() */
125757 +size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
125759 +size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
125761 +size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
125762 +                              ZSTD_hufCTables_t* nextHuf,
125763 +                              ZSTD_strategy strategy, int disableLiteralCompression,
125764 +                              void* dst, size_t dstCapacity,
125765 +                        const void* src, size_t srcSize,
125766 +                              void* entropyWorkspace, size_t entropyWorkspaceSize,
125767 +                        const int bmi2);
125769 +#endif /* ZSTD_COMPRESS_LITERALS_H */
125770 diff --git a/lib/zstd/compress/zstd_compress_sequences.c b/lib/zstd/compress/zstd_compress_sequences.c
125771 new file mode 100644
125772 index 000000000000..08a5b89019dd
125773 --- /dev/null
125774 +++ b/lib/zstd/compress/zstd_compress_sequences.c
125775 @@ -0,0 +1,439 @@
125777 + * Copyright (c) Yann Collet, Facebook, Inc.
125778 + * All rights reserved.
125780 + * This source code is licensed under both the BSD-style license (found in the
125781 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
125782 + * in the COPYING file in the root directory of this source tree).
125783 + * You may select, at your option, one of the above-listed licenses.
125784 + */
125786 + /*-*************************************
125787 + *  Dependencies
125788 + ***************************************/
125789 +#include "zstd_compress_sequences.h"
125792 + * -log2(x / 256) lookup table for x in [0, 256).
125793 + * If x == 0: Return 0
125794 + * Else: Return floor(-log2(x / 256) * 256)
125795 + */
125796 +static unsigned const kInverseProbabilityLog256[256] = {
125797 +    0,    2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,
125798 +    1130, 1100, 1073, 1047, 1024, 1001, 980,  960,  941,  923,  906,  889,
125799 +    874,  859,  844,  830,  817,  804,  791,  779,  768,  756,  745,  734,
125800 +    724,  714,  704,  694,  685,  676,  667,  658,  650,  642,  633,  626,
125801 +    618,  610,  603,  595,  588,  581,  574,  567,  561,  554,  548,  542,
125802 +    535,  529,  523,  517,  512,  506,  500,  495,  489,  484,  478,  473,
125803 +    468,  463,  458,  453,  448,  443,  438,  434,  429,  424,  420,  415,
125804 +    411,  407,  402,  398,  394,  390,  386,  382,  377,  373,  370,  366,
125805 +    362,  358,  354,  350,  347,  343,  339,  336,  332,  329,  325,  322,
125806 +    318,  315,  311,  308,  305,  302,  298,  295,  292,  289,  286,  282,
125807 +    279,  276,  273,  270,  267,  264,  261,  258,  256,  253,  250,  247,
125808 +    244,  241,  239,  236,  233,  230,  228,  225,  222,  220,  217,  215,
125809 +    212,  209,  207,  204,  202,  199,  197,  194,  192,  190,  187,  185,
125810 +    182,  180,  178,  175,  173,  171,  168,  166,  164,  162,  159,  157,
125811 +    155,  153,  151,  149,  146,  144,  142,  140,  138,  136,  134,  132,
125812 +    130,  128,  126,  123,  121,  119,  117,  115,  114,  112,  110,  108,
125813 +    106,  104,  102,  100,  98,   96,   94,   93,   91,   89,   87,   85,
125814 +    83,   82,   80,   78,   76,   74,   73,   71,   69,   67,   66,   64,
125815 +    62,   61,   59,   57,   55,   54,   52,   50,   49,   47,   46,   44,
125816 +    42,   41,   39,   37,   36,   34,   33,   31,   30,   28,   26,   25,
125817 +    23,   22,   20,   19,   17,   16,   14,   13,   11,   10,   8,    7,
125818 +    5,    4,    2,    1,
125821 +static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
125822 +  void const* ptr = ctable;
125823 +  U16 const* u16ptr = (U16 const*)ptr;
125824 +  U32 const maxSymbolValue = MEM_read16(u16ptr + 1);
125825 +  return maxSymbolValue;
125829 + * Returns true if we should use ncount=-1 else we should
125830 + * use ncount=1 for low probability symbols instead.
125831 + */
125832 +static unsigned ZSTD_useLowProbCount(size_t const nbSeq)
125834 +    /* Heuristic: This should cover most blocks <= 16K and
125835 +     * start to fade out after 16K to about 32K depending on
125836 +     * comprssibility.
125837 +     */
125838 +    return nbSeq >= 2048;
125842 + * Returns the cost in bytes of encoding the normalized count header.
125843 + * Returns an error if any of the helper functions return an error.
125844 + */
125845 +static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
125846 +                              size_t const nbSeq, unsigned const FSELog)
125848 +    BYTE wksp[FSE_NCOUNTBOUND];
125849 +    S16 norm[MaxSeq + 1];
125850 +    const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
125851 +    FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max, ZSTD_useLowProbCount(nbSeq)), "");
125852 +    return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
125856 + * Returns the cost in bits of encoding the distribution described by count
125857 + * using the entropy bound.
125858 + */
125859 +static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total)
125861 +    unsigned cost = 0;
125862 +    unsigned s;
125863 +    for (s = 0; s <= max; ++s) {
125864 +        unsigned norm = (unsigned)((256 * count[s]) / total);
125865 +        if (count[s] != 0 && norm == 0)
125866 +            norm = 1;
125867 +        assert(count[s] < total);
125868 +        cost += count[s] * kInverseProbabilityLog256[norm];
125869 +    }
125870 +    return cost >> 8;
125874 + * Returns the cost in bits of encoding the distribution in count using ctable.
125875 + * Returns an error if ctable cannot represent all the symbols in count.
125876 + */
125877 +size_t ZSTD_fseBitCost(
125878 +    FSE_CTable const* ctable,
125879 +    unsigned const* count,
125880 +    unsigned const max)
125882 +    unsigned const kAccuracyLog = 8;
125883 +    size_t cost = 0;
125884 +    unsigned s;
125885 +    FSE_CState_t cstate;
125886 +    FSE_initCState(&cstate, ctable);
125887 +    if (ZSTD_getFSEMaxSymbolValue(ctable) < max) {
125888 +        DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u",
125889 +                    ZSTD_getFSEMaxSymbolValue(ctable), max);
125890 +        return ERROR(GENERIC);
125891 +    }
125892 +    for (s = 0; s <= max; ++s) {
125893 +        unsigned const tableLog = cstate.stateLog;
125894 +        unsigned const badCost = (tableLog + 1) << kAccuracyLog;
125895 +        unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
125896 +        if (count[s] == 0)
125897 +            continue;
125898 +        if (bitCost >= badCost) {
125899 +            DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s);
125900 +            return ERROR(GENERIC);
125901 +        }
125902 +        cost += (size_t)count[s] * bitCost;
125903 +    }
125904 +    return cost >> kAccuracyLog;
125908 + * Returns the cost in bits of encoding the distribution in count using the
125909 + * table described by norm. The max symbol support by norm is assumed >= max.
125910 + * norm must be valid for every symbol with non-zero probability in count.
125911 + */
125912 +size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
125913 +                             unsigned const* count, unsigned const max)
125915 +    unsigned const shift = 8 - accuracyLog;
125916 +    size_t cost = 0;
125917 +    unsigned s;
125918 +    assert(accuracyLog <= 8);
125919 +    for (s = 0; s <= max; ++s) {
125920 +        unsigned const normAcc = (norm[s] != -1) ? (unsigned)norm[s] : 1;
125921 +        unsigned const norm256 = normAcc << shift;
125922 +        assert(norm256 > 0);
125923 +        assert(norm256 < 256);
125924 +        cost += count[s] * kInverseProbabilityLog256[norm256];
125925 +    }
125926 +    return cost >> 8;
125929 +symbolEncodingType_e
125930 +ZSTD_selectEncodingType(
125931 +        FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
125932 +        size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
125933 +        FSE_CTable const* prevCTable,
125934 +        short const* defaultNorm, U32 defaultNormLog,
125935 +        ZSTD_defaultPolicy_e const isDefaultAllowed,
125936 +        ZSTD_strategy const strategy)
125938 +    ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
125939 +    if (mostFrequent == nbSeq) {
125940 +        *repeatMode = FSE_repeat_none;
125941 +        if (isDefaultAllowed && nbSeq <= 2) {
125942 +            /* Prefer set_basic over set_rle when there are 2 or less symbols,
125943 +             * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
125944 +             * If basic encoding isn't possible, always choose RLE.
125945 +             */
125946 +            DEBUGLOG(5, "Selected set_basic");
125947 +            return set_basic;
125948 +        }
125949 +        DEBUGLOG(5, "Selected set_rle");
125950 +        return set_rle;
125951 +    }
125952 +    if (strategy < ZSTD_lazy) {
125953 +        if (isDefaultAllowed) {
125954 +            size_t const staticFse_nbSeq_max = 1000;
125955 +            size_t const mult = 10 - strategy;
125956 +            size_t const baseLog = 3;
125957 +            size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog;  /* 28-36 for offset, 56-72 for lengths */
125958 +            assert(defaultNormLog >= 5 && defaultNormLog <= 6);  /* xx_DEFAULTNORMLOG */
125959 +            assert(mult <= 9 && mult >= 7);
125960 +            if ( (*repeatMode == FSE_repeat_valid)
125961 +              && (nbSeq < staticFse_nbSeq_max) ) {
125962 +                DEBUGLOG(5, "Selected set_repeat");
125963 +                return set_repeat;
125964 +            }
125965 +            if ( (nbSeq < dynamicFse_nbSeq_min)
125966 +              || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) {
125967 +                DEBUGLOG(5, "Selected set_basic");
125968 +                /* The format allows default tables to be repeated, but it isn't useful.
125969 +                 * When using simple heuristics to select encoding type, we don't want
125970 +                 * to confuse these tables with dictionaries. When running more careful
125971 +                 * analysis, we don't need to waste time checking both repeating tables
125972 +                 * and default tables.
125973 +                 */
125974 +                *repeatMode = FSE_repeat_none;
125975 +                return set_basic;
125976 +            }
125977 +        }
125978 +    } else {
125979 +        size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC);
125980 +        size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC);
125981 +        size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog);
125982 +        size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq);
125984 +        if (isDefaultAllowed) {
125985 +            assert(!ZSTD_isError(basicCost));
125986 +            assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost)));
125987 +        }
125988 +        assert(!ZSTD_isError(NCountCost));
125989 +        assert(compressedCost < ERROR(maxCode));
125990 +        DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u",
125991 +                    (unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost);
125992 +        if (basicCost <= repeatCost && basicCost <= compressedCost) {
125993 +            DEBUGLOG(5, "Selected set_basic");
125994 +            assert(isDefaultAllowed);
125995 +            *repeatMode = FSE_repeat_none;
125996 +            return set_basic;
125997 +        }
125998 +        if (repeatCost <= compressedCost) {
125999 +            DEBUGLOG(5, "Selected set_repeat");
126000 +            assert(!ZSTD_isError(repeatCost));
126001 +            return set_repeat;
126002 +        }
126003 +        assert(compressedCost < basicCost && compressedCost < repeatCost);
126004 +    }
126005 +    DEBUGLOG(5, "Selected set_compressed");
126006 +    *repeatMode = FSE_repeat_check;
126007 +    return set_compressed;
126010 +typedef struct {
126011 +    S16 norm[MaxSeq + 1];
126012 +    U32 wksp[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(MaxSeq, MaxFSELog)];
126013 +} ZSTD_BuildCTableWksp;
126015 +size_t
126016 +ZSTD_buildCTable(void* dst, size_t dstCapacity,
126017 +                FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
126018 +                unsigned* count, U32 max,
126019 +                const BYTE* codeTable, size_t nbSeq,
126020 +                const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
126021 +                const FSE_CTable* prevCTable, size_t prevCTableSize,
126022 +                void* entropyWorkspace, size_t entropyWorkspaceSize)
126024 +    BYTE* op = (BYTE*)dst;
126025 +    const BYTE* const oend = op + dstCapacity;
126026 +    DEBUGLOG(6, "ZSTD_buildCTable (dstCapacity=%u)", (unsigned)dstCapacity);
126028 +    switch (type) {
126029 +    case set_rle:
126030 +        FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max), "");
126031 +        RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall, "not enough space");
126032 +        *op = codeTable[0];
126033 +        return 1;
126034 +    case set_repeat:
126035 +        ZSTD_memcpy(nextCTable, prevCTable, prevCTableSize);
126036 +        return 0;
126037 +    case set_basic:
126038 +        FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize), "");  /* note : could be pre-calculated */
126039 +        return 0;
126040 +    case set_compressed: {
126041 +        ZSTD_BuildCTableWksp* wksp = (ZSTD_BuildCTableWksp*)entropyWorkspace;
126042 +        size_t nbSeq_1 = nbSeq;
126043 +        const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
126044 +        if (count[codeTable[nbSeq-1]] > 1) {
126045 +            count[codeTable[nbSeq-1]]--;
126046 +            nbSeq_1--;
126047 +        }
126048 +        assert(nbSeq_1 > 1);
126049 +        assert(entropyWorkspaceSize >= sizeof(ZSTD_BuildCTableWksp));
126050 +        (void)entropyWorkspaceSize;
126051 +        FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "");
126052 +        {   size_t const NCountSize = FSE_writeNCount(op, oend - op, wksp->norm, max, tableLog);   /* overflow protected */
126053 +            FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed");
126054 +            FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "");
126055 +            return NCountSize;
126056 +        }
126057 +    }
126058 +    default: assert(0); RETURN_ERROR(GENERIC, "impossible to reach");
126059 +    }
126062 +FORCE_INLINE_TEMPLATE size_t
126063 +ZSTD_encodeSequences_body(
126064 +            void* dst, size_t dstCapacity,
126065 +            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
126066 +            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
126067 +            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
126068 +            seqDef const* sequences, size_t nbSeq, int longOffsets)
126070 +    BIT_CStream_t blockStream;
126071 +    FSE_CState_t  stateMatchLength;
126072 +    FSE_CState_t  stateOffsetBits;
126073 +    FSE_CState_t  stateLitLength;
126075 +    RETURN_ERROR_IF(
126076 +        ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)),
126077 +        dstSize_tooSmall, "not enough space remaining");
126078 +    DEBUGLOG(6, "available space for bitstream : %i  (dstCapacity=%u)",
126079 +                (int)(blockStream.endPtr - blockStream.startPtr),
126080 +                (unsigned)dstCapacity);
126082 +    /* first symbols */
126083 +    FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
126084 +    FSE_initCState2(&stateOffsetBits,  CTable_OffsetBits,  ofCodeTable[nbSeq-1]);
126085 +    FSE_initCState2(&stateLitLength,   CTable_LitLength,   llCodeTable[nbSeq-1]);
126086 +    BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
126087 +    if (MEM_32bits()) BIT_flushBits(&blockStream);
126088 +    BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
126089 +    if (MEM_32bits()) BIT_flushBits(&blockStream);
126090 +    if (longOffsets) {
126091 +        U32 const ofBits = ofCodeTable[nbSeq-1];
126092 +        unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
126093 +        if (extraBits) {
126094 +            BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
126095 +            BIT_flushBits(&blockStream);
126096 +        }
126097 +        BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,
126098 +                    ofBits - extraBits);
126099 +    } else {
126100 +        BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
126101 +    }
126102 +    BIT_flushBits(&blockStream);
126104 +    {   size_t n;
126105 +        for (n=nbSeq-2 ; n<nbSeq ; n--) {      /* intentional underflow */
126106 +            BYTE const llCode = llCodeTable[n];
126107 +            BYTE const ofCode = ofCodeTable[n];
126108 +            BYTE const mlCode = mlCodeTable[n];
126109 +            U32  const llBits = LL_bits[llCode];
126110 +            U32  const ofBits = ofCode;
126111 +            U32  const mlBits = ML_bits[mlCode];
126112 +            DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
126113 +                        (unsigned)sequences[n].litLength,
126114 +                        (unsigned)sequences[n].matchLength + MINMATCH,
126115 +                        (unsigned)sequences[n].offset);
126116 +                                                                            /* 32b*/  /* 64b*/
126117 +                                                                            /* (7)*/  /* (7)*/
126118 +            FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode);       /* 15 */  /* 15 */
126119 +            FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode);      /* 24 */  /* 24 */
126120 +            if (MEM_32bits()) BIT_flushBits(&blockStream);                  /* (7)*/
126121 +            FSE_encodeSymbol(&blockStream, &stateLitLength, llCode);        /* 16 */  /* 33 */
126122 +            if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
126123 +                BIT_flushBits(&blockStream);                                /* (7)*/
126124 +            BIT_addBits(&blockStream, sequences[n].litLength, llBits);
126125 +            if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
126126 +            BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
126127 +            if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
126128 +            if (longOffsets) {
126129 +                unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
126130 +                if (extraBits) {
126131 +                    BIT_addBits(&blockStream, sequences[n].offset, extraBits);
126132 +                    BIT_flushBits(&blockStream);                            /* (7)*/
126133 +                }
126134 +                BIT_addBits(&blockStream, sequences[n].offset >> extraBits,
126135 +                            ofBits - extraBits);                            /* 31 */
126136 +            } else {
126137 +                BIT_addBits(&blockStream, sequences[n].offset, ofBits);     /* 31 */
126138 +            }
126139 +            BIT_flushBits(&blockStream);                                    /* (7)*/
126140 +            DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr));
126141 +    }   }
126143 +    DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog);
126144 +    FSE_flushCState(&blockStream, &stateMatchLength);
126145 +    DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog);
126146 +    FSE_flushCState(&blockStream, &stateOffsetBits);
126147 +    DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog);
126148 +    FSE_flushCState(&blockStream, &stateLitLength);
126150 +    {   size_t const streamSize = BIT_closeCStream(&blockStream);
126151 +        RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, "not enough space");
126152 +        return streamSize;
126153 +    }
126156 +static size_t
126157 +ZSTD_encodeSequences_default(
126158 +            void* dst, size_t dstCapacity,
126159 +            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
126160 +            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
126161 +            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
126162 +            seqDef const* sequences, size_t nbSeq, int longOffsets)
126164 +    return ZSTD_encodeSequences_body(dst, dstCapacity,
126165 +                                    CTable_MatchLength, mlCodeTable,
126166 +                                    CTable_OffsetBits, ofCodeTable,
126167 +                                    CTable_LitLength, llCodeTable,
126168 +                                    sequences, nbSeq, longOffsets);
126172 +#if DYNAMIC_BMI2
126174 +static TARGET_ATTRIBUTE("bmi2") size_t
126175 +ZSTD_encodeSequences_bmi2(
126176 +            void* dst, size_t dstCapacity,
126177 +            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
126178 +            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
126179 +            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
126180 +            seqDef const* sequences, size_t nbSeq, int longOffsets)
126182 +    return ZSTD_encodeSequences_body(dst, dstCapacity,
126183 +                                    CTable_MatchLength, mlCodeTable,
126184 +                                    CTable_OffsetBits, ofCodeTable,
126185 +                                    CTable_LitLength, llCodeTable,
126186 +                                    sequences, nbSeq, longOffsets);
126189 +#endif
126191 +size_t ZSTD_encodeSequences(
126192 +            void* dst, size_t dstCapacity,
126193 +            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
126194 +            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
126195 +            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
126196 +            seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
126198 +    DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity);
126199 +#if DYNAMIC_BMI2
126200 +    if (bmi2) {
126201 +        return ZSTD_encodeSequences_bmi2(dst, dstCapacity,
126202 +                                         CTable_MatchLength, mlCodeTable,
126203 +                                         CTable_OffsetBits, ofCodeTable,
126204 +                                         CTable_LitLength, llCodeTable,
126205 +                                         sequences, nbSeq, longOffsets);
126206 +    }
126207 +#endif
126208 +    (void)bmi2;
126209 +    return ZSTD_encodeSequences_default(dst, dstCapacity,
126210 +                                        CTable_MatchLength, mlCodeTable,
126211 +                                        CTable_OffsetBits, ofCodeTable,
126212 +                                        CTable_LitLength, llCodeTable,
126213 +                                        sequences, nbSeq, longOffsets);
126215 diff --git a/lib/zstd/compress/zstd_compress_sequences.h b/lib/zstd/compress/zstd_compress_sequences.h
126216 new file mode 100644
126217 index 000000000000..7991364c2f71
126218 --- /dev/null
126219 +++ b/lib/zstd/compress/zstd_compress_sequences.h
126220 @@ -0,0 +1,54 @@
126222 + * Copyright (c) Yann Collet, Facebook, Inc.
126223 + * All rights reserved.
126225 + * This source code is licensed under both the BSD-style license (found in the
126226 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
126227 + * in the COPYING file in the root directory of this source tree).
126228 + * You may select, at your option, one of the above-listed licenses.
126229 + */
126231 +#ifndef ZSTD_COMPRESS_SEQUENCES_H
126232 +#define ZSTD_COMPRESS_SEQUENCES_H
126234 +#include "../common/fse.h" /* FSE_repeat, FSE_CTable */
126235 +#include "../common/zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */
126237 +typedef enum {
126238 +    ZSTD_defaultDisallowed = 0,
126239 +    ZSTD_defaultAllowed = 1
126240 +} ZSTD_defaultPolicy_e;
126242 +symbolEncodingType_e
126243 +ZSTD_selectEncodingType(
126244 +        FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
126245 +        size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
126246 +        FSE_CTable const* prevCTable,
126247 +        short const* defaultNorm, U32 defaultNormLog,
126248 +        ZSTD_defaultPolicy_e const isDefaultAllowed,
126249 +        ZSTD_strategy const strategy);
126251 +size_t
126252 +ZSTD_buildCTable(void* dst, size_t dstCapacity,
126253 +                FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
126254 +                unsigned* count, U32 max,
126255 +                const BYTE* codeTable, size_t nbSeq,
126256 +                const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
126257 +                const FSE_CTable* prevCTable, size_t prevCTableSize,
126258 +                void* entropyWorkspace, size_t entropyWorkspaceSize);
126260 +size_t ZSTD_encodeSequences(
126261 +            void* dst, size_t dstCapacity,
126262 +            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
126263 +            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
126264 +            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
126265 +            seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2);
126267 +size_t ZSTD_fseBitCost(
126268 +    FSE_CTable const* ctable,
126269 +    unsigned const* count,
126270 +    unsigned const max);
126272 +size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
126273 +                             unsigned const* count, unsigned const max);
126274 +#endif /* ZSTD_COMPRESS_SEQUENCES_H */
126275 diff --git a/lib/zstd/compress/zstd_compress_superblock.c b/lib/zstd/compress/zstd_compress_superblock.c
126276 new file mode 100644
126277 index 000000000000..767f73f5bf3d
126278 --- /dev/null
126279 +++ b/lib/zstd/compress/zstd_compress_superblock.c
126280 @@ -0,0 +1,850 @@
126282 + * Copyright (c) Yann Collet, Facebook, Inc.
126283 + * All rights reserved.
126285 + * This source code is licensed under both the BSD-style license (found in the
126286 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
126287 + * in the COPYING file in the root directory of this source tree).
126288 + * You may select, at your option, one of the above-listed licenses.
126289 + */
126291 + /*-*************************************
126292 + *  Dependencies
126293 + ***************************************/
126294 +#include "zstd_compress_superblock.h"
126296 +#include "../common/zstd_internal.h"  /* ZSTD_getSequenceLength */
126297 +#include "hist.h"                     /* HIST_countFast_wksp */
126298 +#include "zstd_compress_internal.h"
126299 +#include "zstd_compress_sequences.h"
126300 +#include "zstd_compress_literals.h"
126302 +/*-*************************************
126303 +*  Superblock entropy buffer structs
126304 +***************************************/
126305 +/** ZSTD_hufCTablesMetadata_t :
126306 + *  Stores Literals Block Type for a super-block in hType, and
126307 + *  huffman tree description in hufDesBuffer.
126308 + *  hufDesSize refers to the size of huffman tree description in bytes.
126309 + *  This metadata is populated in ZSTD_buildSuperBlockEntropy_literal() */
126310 +typedef struct {
126311 +    symbolEncodingType_e hType;
126312 +    BYTE hufDesBuffer[ZSTD_MAX_HUF_HEADER_SIZE];
126313 +    size_t hufDesSize;
126314 +} ZSTD_hufCTablesMetadata_t;
126316 +/** ZSTD_fseCTablesMetadata_t :
126317 + *  Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and
126318 + *  fse tables in fseTablesBuffer.
126319 + *  fseTablesSize refers to the size of fse tables in bytes.
126320 + *  This metadata is populated in ZSTD_buildSuperBlockEntropy_sequences() */
126321 +typedef struct {
126322 +    symbolEncodingType_e llType;
126323 +    symbolEncodingType_e ofType;
126324 +    symbolEncodingType_e mlType;
126325 +    BYTE fseTablesBuffer[ZSTD_MAX_FSE_HEADERS_SIZE];
126326 +    size_t fseTablesSize;
126327 +    size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_compressSubBlock_sequences() */
126328 +} ZSTD_fseCTablesMetadata_t;
126330 +typedef struct {
126331 +    ZSTD_hufCTablesMetadata_t hufMetadata;
126332 +    ZSTD_fseCTablesMetadata_t fseMetadata;
126333 +} ZSTD_entropyCTablesMetadata_t;
126336 +/** ZSTD_buildSuperBlockEntropy_literal() :
126337 + *  Builds entropy for the super-block literals.
126338 + *  Stores literals block type (raw, rle, compressed, repeat) and
126339 + *  huffman description table to hufMetadata.
126340 + *  @return : size of huffman description table or error code */
126341 +static size_t ZSTD_buildSuperBlockEntropy_literal(void* const src, size_t srcSize,
126342 +                                            const ZSTD_hufCTables_t* prevHuf,
126343 +                                                  ZSTD_hufCTables_t* nextHuf,
126344 +                                                  ZSTD_hufCTablesMetadata_t* hufMetadata,
126345 +                                                  const int disableLiteralsCompression,
126346 +                                                  void* workspace, size_t wkspSize)
126348 +    BYTE* const wkspStart = (BYTE*)workspace;
126349 +    BYTE* const wkspEnd = wkspStart + wkspSize;
126350 +    BYTE* const countWkspStart = wkspStart;
126351 +    unsigned* const countWksp = (unsigned*)workspace;
126352 +    const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned);
126353 +    BYTE* const nodeWksp = countWkspStart + countWkspSize;
126354 +    const size_t nodeWkspSize = wkspEnd-nodeWksp;
126355 +    unsigned maxSymbolValue = 255;
126356 +    unsigned huffLog = HUF_TABLELOG_DEFAULT;
126357 +    HUF_repeat repeat = prevHuf->repeatMode;
126359 +    DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_literal (srcSize=%zu)", srcSize);
126361 +    /* Prepare nextEntropy assuming reusing the existing table */
126362 +    ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
126364 +    if (disableLiteralsCompression) {
126365 +        DEBUGLOG(5, "set_basic - disabled");
126366 +        hufMetadata->hType = set_basic;
126367 +        return 0;
126368 +    }
126370 +    /* small ? don't even attempt compression (speed opt) */
126371 +#   define COMPRESS_LITERALS_SIZE_MIN 63
126372 +    {   size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
126373 +        if (srcSize <= minLitSize) {
126374 +            DEBUGLOG(5, "set_basic - too small");
126375 +            hufMetadata->hType = set_basic;
126376 +            return 0;
126377 +        }
126378 +    }
126380 +    /* Scan input and build symbol stats */
126381 +    {   size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize);
126382 +        FORWARD_IF_ERROR(largest, "HIST_count_wksp failed");
126383 +        if (largest == srcSize) {
126384 +            DEBUGLOG(5, "set_rle");
126385 +            hufMetadata->hType = set_rle;
126386 +            return 0;
126387 +        }
126388 +        if (largest <= (srcSize >> 7)+4) {
126389 +            DEBUGLOG(5, "set_basic - no gain");
126390 +            hufMetadata->hType = set_basic;
126391 +            return 0;
126392 +        }
126393 +    }
126395 +    /* Validate the previous Huffman table */
126396 +    if (repeat == HUF_repeat_check && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) {
126397 +        repeat = HUF_repeat_none;
126398 +    }
126400 +    /* Build Huffman Tree */
126401 +    ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable));
126402 +    huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
126403 +    {   size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp,
126404 +                                                    maxSymbolValue, huffLog,
126405 +                                                    nodeWksp, nodeWkspSize);
126406 +        FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp");
126407 +        huffLog = (U32)maxBits;
126408 +        {   /* Build and write the CTable */
126409 +            size_t const newCSize = HUF_estimateCompressedSize(
126410 +                    (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue);
126411 +            size_t const hSize = HUF_writeCTable_wksp(
126412 +                    hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer),
126413 +                    (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog,
126414 +                    nodeWksp, nodeWkspSize);
126415 +            /* Check against repeating the previous CTable */
126416 +            if (repeat != HUF_repeat_none) {
126417 +                size_t const oldCSize = HUF_estimateCompressedSize(
126418 +                        (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue);
126419 +                if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) {
126420 +                    DEBUGLOG(5, "set_repeat - smaller");
126421 +                    ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
126422 +                    hufMetadata->hType = set_repeat;
126423 +                    return 0;
126424 +                }
126425 +            }
126426 +            if (newCSize + hSize >= srcSize) {
126427 +                DEBUGLOG(5, "set_basic - no gains");
126428 +                ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
126429 +                hufMetadata->hType = set_basic;
126430 +                return 0;
126431 +            }
126432 +            DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize);
126433 +            hufMetadata->hType = set_compressed;
126434 +            nextHuf->repeatMode = HUF_repeat_check;
126435 +            return hSize;
126436 +        }
126437 +    }
126440 +/** ZSTD_buildSuperBlockEntropy_sequences() :
126441 + *  Builds entropy for the super-block sequences.
126442 + *  Stores symbol compression modes and fse table to fseMetadata.
126443 + *  @return : size of fse tables or error code */
126444 +static size_t ZSTD_buildSuperBlockEntropy_sequences(seqStore_t* seqStorePtr,
126445 +                                              const ZSTD_fseCTables_t* prevEntropy,
126446 +                                                    ZSTD_fseCTables_t* nextEntropy,
126447 +                                              const ZSTD_CCtx_params* cctxParams,
126448 +                                                    ZSTD_fseCTablesMetadata_t* fseMetadata,
126449 +                                                    void* workspace, size_t wkspSize)
126451 +    BYTE* const wkspStart = (BYTE*)workspace;
126452 +    BYTE* const wkspEnd = wkspStart + wkspSize;
126453 +    BYTE* const countWkspStart = wkspStart;
126454 +    unsigned* const countWksp = (unsigned*)workspace;
126455 +    const size_t countWkspSize = (MaxSeq + 1) * sizeof(unsigned);
126456 +    BYTE* const cTableWksp = countWkspStart + countWkspSize;
126457 +    const size_t cTableWkspSize = wkspEnd-cTableWksp;
126458 +    ZSTD_strategy const strategy = cctxParams->cParams.strategy;
126459 +    FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable;
126460 +    FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable;
126461 +    FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable;
126462 +    const BYTE* const ofCodeTable = seqStorePtr->ofCode;
126463 +    const BYTE* const llCodeTable = seqStorePtr->llCode;
126464 +    const BYTE* const mlCodeTable = seqStorePtr->mlCode;
126465 +    size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
126466 +    BYTE* const ostart = fseMetadata->fseTablesBuffer;
126467 +    BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer);
126468 +    BYTE* op = ostart;
126470 +    assert(cTableWkspSize >= (1 << MaxFSELog) * sizeof(FSE_FUNCTION_TYPE));
126471 +    DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_sequences (nbSeq=%zu)", nbSeq);
126472 +    ZSTD_memset(workspace, 0, wkspSize);
126474 +    fseMetadata->lastCountSize = 0;
126475 +    /* convert length/distances into codes */
126476 +    ZSTD_seqToCodes(seqStorePtr);
126477 +    /* build CTable for Literal Lengths */
126478 +    {   U32 LLtype;
126479 +        unsigned max = MaxLL;
126480 +        size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, llCodeTable, nbSeq, workspace, wkspSize);  /* can't fail */
126481 +        DEBUGLOG(5, "Building LL table");
126482 +        nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
126483 +        LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode,
126484 +                                        countWksp, max, mostFrequent, nbSeq,
126485 +                                        LLFSELog, prevEntropy->litlengthCTable,
126486 +                                        LL_defaultNorm, LL_defaultNormLog,
126487 +                                        ZSTD_defaultAllowed, strategy);
126488 +        assert(set_basic < set_compressed && set_rle < set_compressed);
126489 +        assert(!(LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
126490 +        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
126491 +                                                    countWksp, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
126492 +                                                    prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable),
126493 +                                                    cTableWksp, cTableWkspSize);
126494 +            FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed");
126495 +            if (LLtype == set_compressed)
126496 +                fseMetadata->lastCountSize = countSize;
126497 +            op += countSize;
126498 +            fseMetadata->llType = (symbolEncodingType_e) LLtype;
126499 +    }   }
126500 +    /* build CTable for Offsets */
126501 +    {   U32 Offtype;
126502 +        unsigned max = MaxOff;
126503 +        size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, ofCodeTable, nbSeq, workspace, wkspSize);  /* can't fail */
126504 +        /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
126505 +        ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
126506 +        DEBUGLOG(5, "Building OF table");
126507 +        nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
126508 +        Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode,
126509 +                                        countWksp, max, mostFrequent, nbSeq,
126510 +                                        OffFSELog, prevEntropy->offcodeCTable,
126511 +                                        OF_defaultNorm, OF_defaultNormLog,
126512 +                                        defaultPolicy, strategy);
126513 +        assert(!(Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
126514 +        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
126515 +                                                    countWksp, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
126516 +                                                    prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable),
126517 +                                                    cTableWksp, cTableWkspSize);
126518 +            FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed");
126519 +            if (Offtype == set_compressed)
126520 +                fseMetadata->lastCountSize = countSize;
126521 +            op += countSize;
126522 +            fseMetadata->ofType = (symbolEncodingType_e) Offtype;
126523 +    }   }
126524 +    /* build CTable for MatchLengths */
126525 +    {   U32 MLtype;
126526 +        unsigned max = MaxML;
126527 +        size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, mlCodeTable, nbSeq, workspace, wkspSize);   /* can't fail */
126528 +        DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
126529 +        nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
126530 +        MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode,
126531 +                                        countWksp, max, mostFrequent, nbSeq,
126532 +                                        MLFSELog, prevEntropy->matchlengthCTable,
126533 +                                        ML_defaultNorm, ML_defaultNormLog,
126534 +                                        ZSTD_defaultAllowed, strategy);
126535 +        assert(!(MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
126536 +        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
126537 +                                                    countWksp, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
126538 +                                                    prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable),
126539 +                                                    cTableWksp, cTableWkspSize);
126540 +            FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed");
126541 +            if (MLtype == set_compressed)
126542 +                fseMetadata->lastCountSize = countSize;
126543 +            op += countSize;
126544 +            fseMetadata->mlType = (symbolEncodingType_e) MLtype;
126545 +    }   }
126546 +    assert((size_t) (op-ostart) <= sizeof(fseMetadata->fseTablesBuffer));
126547 +    return op-ostart;
126551 +/** ZSTD_buildSuperBlockEntropy() :
126552 + *  Builds entropy for the super-block.
126553 + *  @return : 0 on success or error code */
126554 +static size_t
126555 +ZSTD_buildSuperBlockEntropy(seqStore_t* seqStorePtr,
126556 +                      const ZSTD_entropyCTables_t* prevEntropy,
126557 +                            ZSTD_entropyCTables_t* nextEntropy,
126558 +                      const ZSTD_CCtx_params* cctxParams,
126559 +                            ZSTD_entropyCTablesMetadata_t* entropyMetadata,
126560 +                            void* workspace, size_t wkspSize)
126562 +    size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart;
126563 +    DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy");
126564 +    entropyMetadata->hufMetadata.hufDesSize =
126565 +        ZSTD_buildSuperBlockEntropy_literal(seqStorePtr->litStart, litSize,
126566 +                                            &prevEntropy->huf, &nextEntropy->huf,
126567 +                                            &entropyMetadata->hufMetadata,
126568 +                                            ZSTD_disableLiteralsCompression(cctxParams),
126569 +                                            workspace, wkspSize);
126570 +    FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildSuperBlockEntropy_literal failed");
126571 +    entropyMetadata->fseMetadata.fseTablesSize =
126572 +        ZSTD_buildSuperBlockEntropy_sequences(seqStorePtr,
126573 +                                              &prevEntropy->fse, &nextEntropy->fse,
126574 +                                              cctxParams,
126575 +                                              &entropyMetadata->fseMetadata,
126576 +                                              workspace, wkspSize);
126577 +    FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildSuperBlockEntropy_sequences failed");
126578 +    return 0;
126581 +/** ZSTD_compressSubBlock_literal() :
126582 + *  Compresses literals section for a sub-block.
126583 + *  When we have to write the Huffman table we will sometimes choose a header
126584 + *  size larger than necessary. This is because we have to pick the header size
126585 + *  before we know the table size + compressed size, so we have a bound on the
126586 + *  table size. If we guessed incorrectly, we fall back to uncompressed literals.
126588 + *  We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded
126589 + *  in writing the header, otherwise it is set to 0.
126591 + *  hufMetadata->hType has literals block type info.
126592 + *      If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block.
126593 + *      If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block.
126594 + *      If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block
126595 + *      If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block
126596 + *      and the following sub-blocks' literals sections will be Treeless_Literals_Block.
126597 + *  @return : compressed size of literals section of a sub-block
126598 + *            Or 0 if it unable to compress.
126599 + *            Or error code */
126600 +static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
126601 +                                    const ZSTD_hufCTablesMetadata_t* hufMetadata,
126602 +                                    const BYTE* literals, size_t litSize,
126603 +                                    void* dst, size_t dstSize,
126604 +                                    const int bmi2, int writeEntropy, int* entropyWritten)
126606 +    size_t const header = writeEntropy ? 200 : 0;
126607 +    size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header));
126608 +    BYTE* const ostart = (BYTE*)dst;
126609 +    BYTE* const oend = ostart + dstSize;
126610 +    BYTE* op = ostart + lhSize;
126611 +    U32 const singleStream = lhSize == 3;
126612 +    symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat;
126613 +    size_t cLitSize = 0;
126615 +    (void)bmi2; /* TODO bmi2... */
126617 +    DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy);
126619 +    *entropyWritten = 0;
126620 +    if (litSize == 0 || hufMetadata->hType == set_basic) {
126621 +      DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal");
126622 +      return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
126623 +    } else if (hufMetadata->hType == set_rle) {
126624 +      DEBUGLOG(5, "ZSTD_compressSubBlock_literal using rle literal");
126625 +      return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize);
126626 +    }
126628 +    assert(litSize > 0);
126629 +    assert(hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat);
126631 +    if (writeEntropy && hufMetadata->hType == set_compressed) {
126632 +        ZSTD_memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize);
126633 +        op += hufMetadata->hufDesSize;
126634 +        cLitSize += hufMetadata->hufDesSize;
126635 +        DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize);
126636 +    }
126638 +    /* TODO bmi2 */
126639 +    {   const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable)
126640 +                                          : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable);
126641 +        op += cSize;
126642 +        cLitSize += cSize;
126643 +        if (cSize == 0 || ERR_isError(cSize)) {
126644 +            DEBUGLOG(5, "Failed to write entropy tables %s", ZSTD_getErrorName(cSize));
126645 +            return 0;
126646 +        }
126647 +        /* If we expand and we aren't writing a header then emit uncompressed */
126648 +        if (!writeEntropy && cLitSize >= litSize) {
126649 +            DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal because uncompressible");
126650 +            return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
126651 +        }
126652 +        /* If we are writing headers then allow expansion that doesn't change our header size. */
126653 +        if (lhSize < (size_t)(3 + (cLitSize >= 1 KB) + (cLitSize >= 16 KB))) {
126654 +            assert(cLitSize > litSize);
126655 +            DEBUGLOG(5, "Literals expanded beyond allowed header size");
126656 +            return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
126657 +        }
126658 +        DEBUGLOG(5, "ZSTD_compressSubBlock_literal (cSize=%zu)", cSize);
126659 +    }
126661 +    /* Build header */
126662 +    switch(lhSize)
126663 +    {
126664 +    case 3: /* 2 - 2 - 10 - 10 */
126665 +        {   U32 const lhc = hType + ((!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14);
126666 +            MEM_writeLE24(ostart, lhc);
126667 +            break;
126668 +        }
126669 +    case 4: /* 2 - 2 - 14 - 14 */
126670 +        {   U32 const lhc = hType + (2 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<18);
126671 +            MEM_writeLE32(ostart, lhc);
126672 +            break;
126673 +        }
126674 +    case 5: /* 2 - 2 - 18 - 18 */
126675 +        {   U32 const lhc = hType + (3 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<22);
126676 +            MEM_writeLE32(ostart, lhc);
126677 +            ostart[4] = (BYTE)(cLitSize >> 10);
126678 +            break;
126679 +        }
126680 +    default:  /* not possible : lhSize is {3,4,5} */
126681 +        assert(0);
126682 +    }
126683 +    *entropyWritten = 1;
126684 +    DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart));
126685 +    return op-ostart;
126688 +static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* sequences, size_t nbSeq, size_t litSize, int lastSequence) {
126689 +    const seqDef* const sstart = sequences;
126690 +    const seqDef* const send = sequences + nbSeq;
126691 +    const seqDef* sp = sstart;
126692 +    size_t matchLengthSum = 0;
126693 +    size_t litLengthSum = 0;
126694 +    while (send-sp > 0) {
126695 +        ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp);
126696 +        litLengthSum += seqLen.litLength;
126697 +        matchLengthSum += seqLen.matchLength;
126698 +        sp++;
126699 +    }
126700 +    assert(litLengthSum <= litSize);
126701 +    if (!lastSequence) {
126702 +        assert(litLengthSum == litSize);
126703 +    }
126704 +    return matchLengthSum + litSize;
126707 +/** ZSTD_compressSubBlock_sequences() :
126708 + *  Compresses sequences section for a sub-block.
126709 + *  fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have
126710 + *  symbol compression modes for the super-block.
126711 + *  The first successfully compressed block will have these in its header.
126712 + *  We set entropyWritten=1 when we succeed in compressing the sequences.
126713 + *  The following sub-blocks will always have repeat mode.
126714 + *  @return : compressed size of sequences section of a sub-block
126715 + *            Or 0 if it is unable to compress
126716 + *            Or error code. */
126717 +static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables,
126718 +                                              const ZSTD_fseCTablesMetadata_t* fseMetadata,
126719 +                                              const seqDef* sequences, size_t nbSeq,
126720 +                                              const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
126721 +                                              const ZSTD_CCtx_params* cctxParams,
126722 +                                              void* dst, size_t dstCapacity,
126723 +                                              const int bmi2, int writeEntropy, int* entropyWritten)
126725 +    const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
126726 +    BYTE* const ostart = (BYTE*)dst;
126727 +    BYTE* const oend = ostart + dstCapacity;
126728 +    BYTE* op = ostart;
126729 +    BYTE* seqHead;
126731 +    DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (nbSeq=%zu, writeEntropy=%d, longOffsets=%d)", nbSeq, writeEntropy, longOffsets);
126733 +    *entropyWritten = 0;
126734 +    /* Sequences Header */
126735 +    RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
126736 +                    dstSize_tooSmall, "");
126737 +    if (nbSeq < 0x7F)
126738 +        *op++ = (BYTE)nbSeq;
126739 +    else if (nbSeq < LONGNBSEQ)
126740 +        op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
126741 +    else
126742 +        op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
126743 +    if (nbSeq==0) {
126744 +        return op - ostart;
126745 +    }
126747 +    /* seqHead : flags for FSE encoding type */
126748 +    seqHead = op++;
126750 +    DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (seqHeadSize=%u)", (unsigned)(op-ostart));
126752 +    if (writeEntropy) {
126753 +        const U32 LLtype = fseMetadata->llType;
126754 +        const U32 Offtype = fseMetadata->ofType;
126755 +        const U32 MLtype = fseMetadata->mlType;
126756 +        DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)", fseMetadata->fseTablesSize);
126757 +        *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
126758 +        ZSTD_memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize);
126759 +        op += fseMetadata->fseTablesSize;
126760 +    } else {
126761 +        const U32 repeat = set_repeat;
126762 +        *seqHead = (BYTE)((repeat<<6) + (repeat<<4) + (repeat<<2));
126763 +    }
126765 +    {   size_t const bitstreamSize = ZSTD_encodeSequences(
126766 +                                        op, oend - op,
126767 +                                        fseTables->matchlengthCTable, mlCode,
126768 +                                        fseTables->offcodeCTable, ofCode,
126769 +                                        fseTables->litlengthCTable, llCode,
126770 +                                        sequences, nbSeq,
126771 +                                        longOffsets, bmi2);
126772 +        FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
126773 +        op += bitstreamSize;
126774 +        /* zstd versions <= 1.3.4 mistakenly report corruption when
126775 +         * FSE_readNCount() receives a buffer < 4 bytes.
126776 +         * Fixed by https://github.com/facebook/zstd/pull/1146.
126777 +         * This can happen when the last set_compressed table present is 2
126778 +         * bytes and the bitstream is only one byte.
126779 +         * In this exceedingly rare case, we will simply emit an uncompressed
126780 +         * block, since it isn't worth optimizing.
126781 +         */
126782 +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
126783 +        if (writeEntropy && fseMetadata->lastCountSize && fseMetadata->lastCountSize + bitstreamSize < 4) {
126784 +            /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
126785 +            assert(fseMetadata->lastCountSize + bitstreamSize == 3);
126786 +            DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
126787 +                        "emitting an uncompressed block.");
126788 +            return 0;
126789 +        }
126790 +#endif
126791 +        DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (bitstreamSize=%zu)", bitstreamSize);
126792 +    }
126794 +    /* zstd versions <= 1.4.0 mistakenly report error when
126795 +     * sequences section body size is less than 3 bytes.
126796 +     * Fixed by https://github.com/facebook/zstd/pull/1664.
126797 +     * This can happen when the previous sequences section block is compressed
126798 +     * with rle mode and the current block's sequences section is compressed
126799 +     * with repeat mode where sequences section body size can be 1 byte.
126800 +     */
126801 +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
126802 +    if (op-seqHead < 4) {
126803 +        DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.4.0 by emitting "
126804 +                    "an uncompressed block when sequences are < 4 bytes");
126805 +        return 0;
126806 +    }
126807 +#endif
126809 +    *entropyWritten = 1;
126810 +    return op - ostart;
126813 +/** ZSTD_compressSubBlock() :
126814 + *  Compresses a single sub-block.
126815 + *  @return : compressed size of the sub-block
126816 + *            Or 0 if it failed to compress. */
126817 +static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy,
126818 +                                    const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
126819 +                                    const seqDef* sequences, size_t nbSeq,
126820 +                                    const BYTE* literals, size_t litSize,
126821 +                                    const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
126822 +                                    const ZSTD_CCtx_params* cctxParams,
126823 +                                    void* dst, size_t dstCapacity,
126824 +                                    const int bmi2,
126825 +                                    int writeLitEntropy, int writeSeqEntropy,
126826 +                                    int* litEntropyWritten, int* seqEntropyWritten,
126827 +                                    U32 lastBlock)
126829 +    BYTE* const ostart = (BYTE*)dst;
126830 +    BYTE* const oend = ostart + dstCapacity;
126831 +    BYTE* op = ostart + ZSTD_blockHeaderSize;
126832 +    DEBUGLOG(5, "ZSTD_compressSubBlock (litSize=%zu, nbSeq=%zu, writeLitEntropy=%d, writeSeqEntropy=%d, lastBlock=%d)",
126833 +                litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock);
126834 +    {   size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable,
126835 +                                                        &entropyMetadata->hufMetadata, literals, litSize,
126836 +                                                        op, oend-op, bmi2, writeLitEntropy, litEntropyWritten);
126837 +        FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed");
126838 +        if (cLitSize == 0) return 0;
126839 +        op += cLitSize;
126840 +    }
126841 +    {   size_t cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse,
126842 +                                                  &entropyMetadata->fseMetadata,
126843 +                                                  sequences, nbSeq,
126844 +                                                  llCode, mlCode, ofCode,
126845 +                                                  cctxParams,
126846 +                                                  op, oend-op,
126847 +                                                  bmi2, writeSeqEntropy, seqEntropyWritten);
126848 +        FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed");
126849 +        if (cSeqSize == 0) return 0;
126850 +        op += cSeqSize;
126851 +    }
126852 +    /* Write block header */
126853 +    {   size_t cSize = (op-ostart)-ZSTD_blockHeaderSize;
126854 +        U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
126855 +        MEM_writeLE24(ostart, cBlockHeader24);
126856 +    }
126857 +    return op-ostart;
126860 +static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize,
126861 +                                                const ZSTD_hufCTables_t* huf,
126862 +                                                const ZSTD_hufCTablesMetadata_t* hufMetadata,
126863 +                                                void* workspace, size_t wkspSize,
126864 +                                                int writeEntropy)
126866 +    unsigned* const countWksp = (unsigned*)workspace;
126867 +    unsigned maxSymbolValue = 255;
126868 +    size_t literalSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
126870 +    if (hufMetadata->hType == set_basic) return litSize;
126871 +    else if (hufMetadata->hType == set_rle) return 1;
126872 +    else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) {
126873 +        size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize);
126874 +        if (ZSTD_isError(largest)) return litSize;
126875 +        {   size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue);
126876 +            if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize;
126877 +            return cLitSizeEstimate + literalSectionHeaderSize;
126878 +    }   }
126879 +    assert(0); /* impossible */
126880 +    return 0;
126883 +static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type,
126884 +                        const BYTE* codeTable, unsigned maxCode,
126885 +                        size_t nbSeq, const FSE_CTable* fseCTable,
126886 +                        const U32* additionalBits,
126887 +                        short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
126888 +                        void* workspace, size_t wkspSize)
126890 +    unsigned* const countWksp = (unsigned*)workspace;
126891 +    const BYTE* ctp = codeTable;
126892 +    const BYTE* const ctStart = ctp;
126893 +    const BYTE* const ctEnd = ctStart + nbSeq;
126894 +    size_t cSymbolTypeSizeEstimateInBits = 0;
126895 +    unsigned max = maxCode;
126897 +    HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize);  /* can't fail */
126898 +    if (type == set_basic) {
126899 +        /* We selected this encoding type, so it must be valid. */
126900 +        assert(max <= defaultMax);
126901 +        cSymbolTypeSizeEstimateInBits = max <= defaultMax
126902 +                ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max)
126903 +                : ERROR(GENERIC);
126904 +    } else if (type == set_rle) {
126905 +        cSymbolTypeSizeEstimateInBits = 0;
126906 +    } else if (type == set_compressed || type == set_repeat) {
126907 +        cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max);
126908 +    }
126909 +    if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) return nbSeq * 10;
126910 +    while (ctp < ctEnd) {
126911 +        if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp];
126912 +        else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */
126913 +        ctp++;
126914 +    }
126915 +    return cSymbolTypeSizeEstimateInBits / 8;
126918 +static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable,
126919 +                                                  const BYTE* llCodeTable,
126920 +                                                  const BYTE* mlCodeTable,
126921 +                                                  size_t nbSeq,
126922 +                                                  const ZSTD_fseCTables_t* fseTables,
126923 +                                                  const ZSTD_fseCTablesMetadata_t* fseMetadata,
126924 +                                                  void* workspace, size_t wkspSize,
126925 +                                                  int writeEntropy)
126927 +    size_t sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
126928 +    size_t cSeqSizeEstimate = 0;
126929 +    cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff,
126930 +                                         nbSeq, fseTables->offcodeCTable, NULL,
126931 +                                         OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
126932 +                                         workspace, wkspSize);
126933 +    cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL,
126934 +                                         nbSeq, fseTables->litlengthCTable, LL_bits,
126935 +                                         LL_defaultNorm, LL_defaultNormLog, MaxLL,
126936 +                                         workspace, wkspSize);
126937 +    cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML,
126938 +                                         nbSeq, fseTables->matchlengthCTable, ML_bits,
126939 +                                         ML_defaultNorm, ML_defaultNormLog, MaxML,
126940 +                                         workspace, wkspSize);
126941 +    if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;
126942 +    return cSeqSizeEstimate + sequencesSectionHeaderSize;
126945 +static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize,
126946 +                                        const BYTE* ofCodeTable,
126947 +                                        const BYTE* llCodeTable,
126948 +                                        const BYTE* mlCodeTable,
126949 +                                        size_t nbSeq,
126950 +                                        const ZSTD_entropyCTables_t* entropy,
126951 +                                        const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
126952 +                                        void* workspace, size_t wkspSize,
126953 +                                        int writeLitEntropy, int writeSeqEntropy) {
126954 +    size_t cSizeEstimate = 0;
126955 +    cSizeEstimate += ZSTD_estimateSubBlockSize_literal(literals, litSize,
126956 +                                                         &entropy->huf, &entropyMetadata->hufMetadata,
126957 +                                                         workspace, wkspSize, writeLitEntropy);
126958 +    cSizeEstimate += ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
126959 +                                                         nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
126960 +                                                         workspace, wkspSize, writeSeqEntropy);
126961 +    return cSizeEstimate + ZSTD_blockHeaderSize;
126964 +static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata)
126966 +    if (fseMetadata->llType == set_compressed || fseMetadata->llType == set_rle)
126967 +        return 1;
126968 +    if (fseMetadata->mlType == set_compressed || fseMetadata->mlType == set_rle)
126969 +        return 1;
126970 +    if (fseMetadata->ofType == set_compressed || fseMetadata->ofType == set_rle)
126971 +        return 1;
126972 +    return 0;
126975 +/** ZSTD_compressSubBlock_multi() :
126976 + *  Breaks super-block into multiple sub-blocks and compresses them.
126977 + *  Entropy will be written to the first block.
126978 + *  The following blocks will use repeat mode to compress.
126979 + *  All sub-blocks are compressed blocks (no raw or rle blocks).
126980 + *  @return : compressed size of the super block (which is multiple ZSTD blocks)
126981 + *            Or 0 if it failed to compress. */
126982 +static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
126983 +                            const ZSTD_compressedBlockState_t* prevCBlock,
126984 +                            ZSTD_compressedBlockState_t* nextCBlock,
126985 +                            const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
126986 +                            const ZSTD_CCtx_params* cctxParams,
126987 +                                  void* dst, size_t dstCapacity,
126988 +                            const void* src, size_t srcSize,
126989 +                            const int bmi2, U32 lastBlock,
126990 +                            void* workspace, size_t wkspSize)
126992 +    const seqDef* const sstart = seqStorePtr->sequencesStart;
126993 +    const seqDef* const send = seqStorePtr->sequences;
126994 +    const seqDef* sp = sstart;
126995 +    const BYTE* const lstart = seqStorePtr->litStart;
126996 +    const BYTE* const lend = seqStorePtr->lit;
126997 +    const BYTE* lp = lstart;
126998 +    BYTE const* ip = (BYTE const*)src;
126999 +    BYTE const* const iend = ip + srcSize;
127000 +    BYTE* const ostart = (BYTE*)dst;
127001 +    BYTE* const oend = ostart + dstCapacity;
127002 +    BYTE* op = ostart;
127003 +    const BYTE* llCodePtr = seqStorePtr->llCode;
127004 +    const BYTE* mlCodePtr = seqStorePtr->mlCode;
127005 +    const BYTE* ofCodePtr = seqStorePtr->ofCode;
127006 +    size_t targetCBlockSize = cctxParams->targetCBlockSize;
127007 +    size_t litSize, seqCount;
127008 +    int writeLitEntropy = entropyMetadata->hufMetadata.hType == set_compressed;
127009 +    int writeSeqEntropy = 1;
127010 +    int lastSequence = 0;
127012 +    DEBUGLOG(5, "ZSTD_compressSubBlock_multi (litSize=%u, nbSeq=%u)",
127013 +                (unsigned)(lend-lp), (unsigned)(send-sstart));
127015 +    litSize = 0;
127016 +    seqCount = 0;
127017 +    do {
127018 +        size_t cBlockSizeEstimate = 0;
127019 +        if (sstart == send) {
127020 +            lastSequence = 1;
127021 +        } else {
127022 +            const seqDef* const sequence = sp + seqCount;
127023 +            lastSequence = sequence == send - 1;
127024 +            litSize += ZSTD_getSequenceLength(seqStorePtr, sequence).litLength;
127025 +            seqCount++;
127026 +        }
127027 +        if (lastSequence) {
127028 +            assert(lp <= lend);
127029 +            assert(litSize <= (size_t)(lend - lp));
127030 +            litSize = (size_t)(lend - lp);
127031 +        }
127032 +        /* I think there is an optimization opportunity here.
127033 +         * Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful
127034 +         * since it recalculates estimate from scratch.
127035 +         * For example, it would recount literal distribution and symbol codes everytime.
127036 +         */
127037 +        cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount,
127038 +                                                       &nextCBlock->entropy, entropyMetadata,
127039 +                                                       workspace, wkspSize, writeLitEntropy, writeSeqEntropy);
127040 +        if (cBlockSizeEstimate > targetCBlockSize || lastSequence) {
127041 +            int litEntropyWritten = 0;
127042 +            int seqEntropyWritten = 0;
127043 +            const size_t decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, lastSequence);
127044 +            const size_t cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata,
127045 +                                                       sp, seqCount,
127046 +                                                       lp, litSize,
127047 +                                                       llCodePtr, mlCodePtr, ofCodePtr,
127048 +                                                       cctxParams,
127049 +                                                       op, oend-op,
127050 +                                                       bmi2, writeLitEntropy, writeSeqEntropy,
127051 +                                                       &litEntropyWritten, &seqEntropyWritten,
127052 +                                                       lastBlock && lastSequence);
127053 +            FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed");
127054 +            if (cSize > 0 && cSize < decompressedSize) {
127055 +                DEBUGLOG(5, "Committed the sub-block");
127056 +                assert(ip + decompressedSize <= iend);
127057 +                ip += decompressedSize;
127058 +                sp += seqCount;
127059 +                lp += litSize;
127060 +                op += cSize;
127061 +                llCodePtr += seqCount;
127062 +                mlCodePtr += seqCount;
127063 +                ofCodePtr += seqCount;
127064 +                litSize = 0;
127065 +                seqCount = 0;
127066 +                /* Entropy only needs to be written once */
127067 +                if (litEntropyWritten) {
127068 +                    writeLitEntropy = 0;
127069 +                }
127070 +                if (seqEntropyWritten) {
127071 +                    writeSeqEntropy = 0;
127072 +                }
127073 +            }
127074 +        }
127075 +    } while (!lastSequence);
127076 +    if (writeLitEntropy) {
127077 +        DEBUGLOG(5, "ZSTD_compressSubBlock_multi has literal entropy tables unwritten");
127078 +        ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf));
127079 +    }
127080 +    if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) {
127081 +        /* If we haven't written our entropy tables, then we've violated our contract and
127082 +         * must emit an uncompressed block.
127083 +         */
127084 +        DEBUGLOG(5, "ZSTD_compressSubBlock_multi has sequence entropy tables unwritten");
127085 +        return 0;
127086 +    }
127087 +    if (ip < iend) {
127088 +        size_t const cSize = ZSTD_noCompressBlock(op, oend - op, ip, iend - ip, lastBlock);
127089 +        DEBUGLOG(5, "ZSTD_compressSubBlock_multi last sub-block uncompressed, %zu bytes", (size_t)(iend - ip));
127090 +        FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
127091 +        assert(cSize != 0);
127092 +        op += cSize;
127093 +        /* We have to regenerate the repcodes because we've skipped some sequences */
127094 +        if (sp < send) {
127095 +            seqDef const* seq;
127096 +            repcodes_t rep;
127097 +            ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep));
127098 +            for (seq = sstart; seq < sp; ++seq) {
127099 +                rep = ZSTD_updateRep(rep.rep, seq->offset - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
127100 +            }
127101 +            ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep));
127102 +        }
127103 +    }
127104 +    DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed");
127105 +    return op-ostart;
127108 +size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
127109 +                               void* dst, size_t dstCapacity,
127110 +                               void const* src, size_t srcSize,
127111 +                               unsigned lastBlock) {
127112 +    ZSTD_entropyCTablesMetadata_t entropyMetadata;
127114 +    FORWARD_IF_ERROR(ZSTD_buildSuperBlockEntropy(&zc->seqStore,
127115 +          &zc->blockState.prevCBlock->entropy,
127116 +          &zc->blockState.nextCBlock->entropy,
127117 +          &zc->appliedParams,
127118 +          &entropyMetadata,
127119 +          zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
127121 +    return ZSTD_compressSubBlock_multi(&zc->seqStore,
127122 +            zc->blockState.prevCBlock,
127123 +            zc->blockState.nextCBlock,
127124 +            &entropyMetadata,
127125 +            &zc->appliedParams,
127126 +            dst, dstCapacity,
127127 +            src, srcSize,
127128 +            zc->bmi2, lastBlock,
127129 +            zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */);
127131 diff --git a/lib/zstd/compress/zstd_compress_superblock.h b/lib/zstd/compress/zstd_compress_superblock.h
127132 new file mode 100644
127133 index 000000000000..224ece79546e
127134 --- /dev/null
127135 +++ b/lib/zstd/compress/zstd_compress_superblock.h
127136 @@ -0,0 +1,32 @@
127138 + * Copyright (c) Yann Collet, Facebook, Inc.
127139 + * All rights reserved.
127141 + * This source code is licensed under both the BSD-style license (found in the
127142 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
127143 + * in the COPYING file in the root directory of this source tree).
127144 + * You may select, at your option, one of the above-listed licenses.
127145 + */
127147 +#ifndef ZSTD_COMPRESS_ADVANCED_H
127148 +#define ZSTD_COMPRESS_ADVANCED_H
127150 +/*-*************************************
127151 +*  Dependencies
127152 +***************************************/
127154 +#include <linux/zstd.h> /* ZSTD_CCtx */
127156 +/*-*************************************
127157 +*  Target Compressed Block Size
127158 +***************************************/
127160 +/* ZSTD_compressSuperBlock() :
127161 + * Used to compress a super block when targetCBlockSize is being used.
127162 + * The given block will be compressed into multiple sub blocks that are around targetCBlockSize. */
127163 +size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
127164 +                               void* dst, size_t dstCapacity,
127165 +                               void const* src, size_t srcSize,
127166 +                               unsigned lastBlock);
127168 +#endif /* ZSTD_COMPRESS_ADVANCED_H */
127169 diff --git a/lib/zstd/compress/zstd_cwksp.h b/lib/zstd/compress/zstd_cwksp.h
127170 new file mode 100644
127171 index 000000000000..c231cc500ef5
127172 --- /dev/null
127173 +++ b/lib/zstd/compress/zstd_cwksp.h
127174 @@ -0,0 +1,482 @@
127176 + * Copyright (c) Yann Collet, Facebook, Inc.
127177 + * All rights reserved.
127179 + * This source code is licensed under both the BSD-style license (found in the
127180 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
127181 + * in the COPYING file in the root directory of this source tree).
127182 + * You may select, at your option, one of the above-listed licenses.
127183 + */
127185 +#ifndef ZSTD_CWKSP_H
127186 +#define ZSTD_CWKSP_H
127188 +/*-*************************************
127189 +*  Dependencies
127190 +***************************************/
127191 +#include "../common/zstd_internal.h"
127194 +/*-*************************************
127195 +*  Constants
127196 +***************************************/
127198 +/* Since the workspace is effectively its own little malloc implementation /
127199 + * arena, when we run under ASAN, we should similarly insert redzones between
127200 + * each internal element of the workspace, so ASAN will catch overruns that
127201 + * reach outside an object but that stay inside the workspace.
127203 + * This defines the size of that redzone.
127204 + */
127205 +#ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
127206 +#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
127207 +#endif
127209 +/*-*************************************
127210 +*  Structures
127211 +***************************************/
127212 +typedef enum {
127213 +    ZSTD_cwksp_alloc_objects,
127214 +    ZSTD_cwksp_alloc_buffers,
127215 +    ZSTD_cwksp_alloc_aligned
127216 +} ZSTD_cwksp_alloc_phase_e;
127219 + * Used to describe whether the workspace is statically allocated (and will not
127220 + * necessarily ever be freed), or if it's dynamically allocated and we can
127221 + * expect a well-formed caller to free this.
127222 + */
127223 +typedef enum {
127224 +    ZSTD_cwksp_dynamic_alloc,
127225 +    ZSTD_cwksp_static_alloc
127226 +} ZSTD_cwksp_static_alloc_e;
127229 + * Zstd fits all its internal datastructures into a single continuous buffer,
127230 + * so that it only needs to perform a single OS allocation (or so that a buffer
127231 + * can be provided to it and it can perform no allocations at all). This buffer
127232 + * is called the workspace.
127234 + * Several optimizations complicate that process of allocating memory ranges
127235 + * from this workspace for each internal datastructure:
127237 + * - These different internal datastructures have different setup requirements:
127239 + *   - The static objects need to be cleared once and can then be trivially
127240 + *     reused for each compression.
127242 + *   - Various buffers don't need to be initialized at all--they are always
127243 + *     written into before they're read.
127245 + *   - The matchstate tables have a unique requirement that they don't need
127246 + *     their memory to be totally cleared, but they do need the memory to have
127247 + *     some bound, i.e., a guarantee that all values in the memory they've been
127248 + *     allocated is less than some maximum value (which is the starting value
127249 + *     for the indices that they will then use for compression). When this
127250 + *     guarantee is provided to them, they can use the memory without any setup
127251 + *     work. When it can't, they have to clear the area.
127253 + * - These buffers also have different alignment requirements.
127255 + * - We would like to reuse the objects in the workspace for multiple
127256 + *   compressions without having to perform any expensive reallocation or
127257 + *   reinitialization work.
127259 + * - We would like to be able to efficiently reuse the workspace across
127260 + *   multiple compressions **even when the compression parameters change** and
127261 + *   we need to resize some of the objects (where possible).
127263 + * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
127264 + * abstraction was created. It works as follows:
127266 + * Workspace Layout:
127268 + * [                        ... workspace ...                         ]
127269 + * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
127271 + * The various objects that live in the workspace are divided into the
127272 + * following categories, and are allocated separately:
127274 + * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
127275 + *   so that literally everything fits in a single buffer. Note: if present,
127276 + *   this must be the first object in the workspace, since ZSTD_customFree{CCtx,
127277 + *   CDict}() rely on a pointer comparison to see whether one or two frees are
127278 + *   required.
127280 + * - Fixed size objects: these are fixed-size, fixed-count objects that are
127281 + *   nonetheless "dynamically" allocated in the workspace so that we can
127282 + *   control how they're initialized separately from the broader ZSTD_CCtx.
127283 + *   Examples:
127284 + *   - Entropy Workspace
127285 + *   - 2 x ZSTD_compressedBlockState_t
127286 + *   - CDict dictionary contents
127288 + * - Tables: these are any of several different datastructures (hash tables,
127289 + *   chain tables, binary trees) that all respect a common format: they are
127290 + *   uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
127291 + *   Their sizes depend on the cparams.
127293 + * - Aligned: these buffers are used for various purposes that require 4 byte
127294 + *   alignment, but don't require any initialization before they're used.
127296 + * - Buffers: these buffers are used for various purposes that don't require
127297 + *   any alignment or initialization before they're used. This means they can
127298 + *   be moved around at no cost for a new compression.
127300 + * Allocating Memory:
127302 + * The various types of objects must be allocated in order, so they can be
127303 + * correctly packed into the workspace buffer. That order is:
127305 + * 1. Objects
127306 + * 2. Buffers
127307 + * 3. Aligned
127308 + * 4. Tables
127310 + * Attempts to reserve objects of different types out of order will fail.
127311 + */
127312 +typedef struct {
127313 +    void* workspace;
127314 +    void* workspaceEnd;
127316 +    void* objectEnd;
127317 +    void* tableEnd;
127318 +    void* tableValidEnd;
127319 +    void* allocStart;
127321 +    BYTE allocFailed;
127322 +    int workspaceOversizedDuration;
127323 +    ZSTD_cwksp_alloc_phase_e phase;
127324 +    ZSTD_cwksp_static_alloc_e isStatic;
127325 +} ZSTD_cwksp;
127327 +/*-*************************************
127328 +*  Functions
127329 +***************************************/
127331 +MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
127333 +MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
127334 +    (void)ws;
127335 +    assert(ws->workspace <= ws->objectEnd);
127336 +    assert(ws->objectEnd <= ws->tableEnd);
127337 +    assert(ws->objectEnd <= ws->tableValidEnd);
127338 +    assert(ws->tableEnd <= ws->allocStart);
127339 +    assert(ws->tableValidEnd <= ws->allocStart);
127340 +    assert(ws->allocStart <= ws->workspaceEnd);
127344 + * Align must be a power of 2.
127345 + */
127346 +MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
127347 +    size_t const mask = align - 1;
127348 +    assert((align & mask) == 0);
127349 +    return (size + mask) & ~mask;
127353 + * Use this to determine how much space in the workspace we will consume to
127354 + * allocate this object. (Normally it should be exactly the size of the object,
127355 + * but under special conditions, like ASAN, where we pad each object, it might
127356 + * be larger.)
127358 + * Since tables aren't currently redzoned, you don't need to call through this
127359 + * to figure out how much space you need for the matchState tables. Everything
127360 + * else is though.
127361 + */
127362 +MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
127363 +    if (size == 0)
127364 +        return 0;
127365 +    return size;
127368 +MEM_STATIC void ZSTD_cwksp_internal_advance_phase(
127369 +        ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
127370 +    assert(phase >= ws->phase);
127371 +    if (phase > ws->phase) {
127372 +        if (ws->phase < ZSTD_cwksp_alloc_buffers &&
127373 +                phase >= ZSTD_cwksp_alloc_buffers) {
127374 +            ws->tableValidEnd = ws->objectEnd;
127375 +        }
127376 +        if (ws->phase < ZSTD_cwksp_alloc_aligned &&
127377 +                phase >= ZSTD_cwksp_alloc_aligned) {
127378 +            /* If unaligned allocations down from a too-large top have left us
127379 +             * unaligned, we need to realign our alloc ptr. Technically, this
127380 +             * can consume space that is unaccounted for in the neededSpace
127381 +             * calculation. However, I believe this can only happen when the
127382 +             * workspace is too large, and specifically when it is too large
127383 +             * by a larger margin than the space that will be consumed. */
127384 +            /* TODO: cleaner, compiler warning friendly way to do this??? */
127385 +            ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
127386 +            if (ws->allocStart < ws->tableValidEnd) {
127387 +                ws->tableValidEnd = ws->allocStart;
127388 +            }
127389 +        }
127390 +        ws->phase = phase;
127391 +    }
127395 + * Returns whether this object/buffer/etc was allocated in this workspace.
127396 + */
127397 +MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
127398 +    return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
127402 + * Internal function. Do not use directly.
127403 + */
127404 +MEM_STATIC void* ZSTD_cwksp_reserve_internal(
127405 +        ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
127406 +    void* alloc;
127407 +    void* bottom = ws->tableEnd;
127408 +    ZSTD_cwksp_internal_advance_phase(ws, phase);
127409 +    alloc = (BYTE *)ws->allocStart - bytes;
127411 +    if (bytes == 0)
127412 +        return NULL;
127415 +    DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
127416 +        alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
127417 +    ZSTD_cwksp_assert_internal_consistency(ws);
127418 +    assert(alloc >= bottom);
127419 +    if (alloc < bottom) {
127420 +        DEBUGLOG(4, "cwksp: alloc failed!");
127421 +        ws->allocFailed = 1;
127422 +        return NULL;
127423 +    }
127424 +    if (alloc < ws->tableValidEnd) {
127425 +        ws->tableValidEnd = alloc;
127426 +    }
127427 +    ws->allocStart = alloc;
127430 +    return alloc;
127434 + * Reserves and returns unaligned memory.
127435 + */
127436 +MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
127437 +    return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
127441 + * Reserves and returns memory sized on and aligned on sizeof(unsigned).
127442 + */
127443 +MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
127444 +    assert((bytes & (sizeof(U32)-1)) == 0);
127445 +    return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
127449 + * Aligned on sizeof(unsigned). These buffers have the special property that
127450 + * their values remain constrained, allowing us to re-use them without
127451 + * memset()-ing them.
127452 + */
127453 +MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
127454 +    const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
127455 +    void* alloc = ws->tableEnd;
127456 +    void* end = (BYTE *)alloc + bytes;
127457 +    void* top = ws->allocStart;
127459 +    DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
127460 +        alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
127461 +    assert((bytes & (sizeof(U32)-1)) == 0);
127462 +    ZSTD_cwksp_internal_advance_phase(ws, phase);
127463 +    ZSTD_cwksp_assert_internal_consistency(ws);
127464 +    assert(end <= top);
127465 +    if (end > top) {
127466 +        DEBUGLOG(4, "cwksp: table alloc failed!");
127467 +        ws->allocFailed = 1;
127468 +        return NULL;
127469 +    }
127470 +    ws->tableEnd = end;
127473 +    return alloc;
127477 + * Aligned on sizeof(void*).
127478 + */
127479 +MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
127480 +    size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
127481 +    void* alloc = ws->objectEnd;
127482 +    void* end = (BYTE*)alloc + roundedBytes;
127485 +    DEBUGLOG(5,
127486 +        "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
127487 +        alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
127488 +    assert(((size_t)alloc & (sizeof(void*)-1)) == 0);
127489 +    assert((bytes & (sizeof(void*)-1)) == 0);
127490 +    ZSTD_cwksp_assert_internal_consistency(ws);
127491 +    /* we must be in the first phase, no advance is possible */
127492 +    if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
127493 +        DEBUGLOG(4, "cwksp: object alloc failed!");
127494 +        ws->allocFailed = 1;
127495 +        return NULL;
127496 +    }
127497 +    ws->objectEnd = end;
127498 +    ws->tableEnd = end;
127499 +    ws->tableValidEnd = end;
127502 +    return alloc;
127505 +MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
127506 +    DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
127509 +    assert(ws->tableValidEnd >= ws->objectEnd);
127510 +    assert(ws->tableValidEnd <= ws->allocStart);
127511 +    ws->tableValidEnd = ws->objectEnd;
127512 +    ZSTD_cwksp_assert_internal_consistency(ws);
127515 +MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
127516 +    DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
127517 +    assert(ws->tableValidEnd >= ws->objectEnd);
127518 +    assert(ws->tableValidEnd <= ws->allocStart);
127519 +    if (ws->tableValidEnd < ws->tableEnd) {
127520 +        ws->tableValidEnd = ws->tableEnd;
127521 +    }
127522 +    ZSTD_cwksp_assert_internal_consistency(ws);
127526 + * Zero the part of the allocated tables not already marked clean.
127527 + */
127528 +MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
127529 +    DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
127530 +    assert(ws->tableValidEnd >= ws->objectEnd);
127531 +    assert(ws->tableValidEnd <= ws->allocStart);
127532 +    if (ws->tableValidEnd < ws->tableEnd) {
127533 +        ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
127534 +    }
127535 +    ZSTD_cwksp_mark_tables_clean(ws);
127539 + * Invalidates table allocations.
127540 + * All other allocations remain valid.
127541 + */
127542 +MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
127543 +    DEBUGLOG(4, "cwksp: clearing tables!");
127546 +    ws->tableEnd = ws->objectEnd;
127547 +    ZSTD_cwksp_assert_internal_consistency(ws);
127551 + * Invalidates all buffer, aligned, and table allocations.
127552 + * Object allocations remain valid.
127553 + */
127554 +MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
127555 +    DEBUGLOG(4, "cwksp: clearing!");
127559 +    ws->tableEnd = ws->objectEnd;
127560 +    ws->allocStart = ws->workspaceEnd;
127561 +    ws->allocFailed = 0;
127562 +    if (ws->phase > ZSTD_cwksp_alloc_buffers) {
127563 +        ws->phase = ZSTD_cwksp_alloc_buffers;
127564 +    }
127565 +    ZSTD_cwksp_assert_internal_consistency(ws);
127569 + * The provided workspace takes ownership of the buffer [start, start+size).
127570 + * Any existing values in the workspace are ignored (the previously managed
127571 + * buffer, if present, must be separately freed).
127572 + */
127573 +MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
127574 +    DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
127575 +    assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
127576 +    ws->workspace = start;
127577 +    ws->workspaceEnd = (BYTE*)start + size;
127578 +    ws->objectEnd = ws->workspace;
127579 +    ws->tableValidEnd = ws->objectEnd;
127580 +    ws->phase = ZSTD_cwksp_alloc_objects;
127581 +    ws->isStatic = isStatic;
127582 +    ZSTD_cwksp_clear(ws);
127583 +    ws->workspaceOversizedDuration = 0;
127584 +    ZSTD_cwksp_assert_internal_consistency(ws);
127587 +MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
127588 +    void* workspace = ZSTD_customMalloc(size, customMem);
127589 +    DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
127590 +    RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
127591 +    ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
127592 +    return 0;
127595 +MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
127596 +    void *ptr = ws->workspace;
127597 +    DEBUGLOG(4, "cwksp: freeing workspace");
127598 +    ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
127599 +    ZSTD_customFree(ptr, customMem);
127603 + * Moves the management of a workspace from one cwksp to another. The src cwksp
127604 + * is left in an invalid state (src must be re-init()'ed before it's used again).
127605 + */
127606 +MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
127607 +    *dst = *src;
127608 +    ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
127611 +MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
127612 +    return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
127615 +MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
127616 +    return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
127617 +         + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
127620 +MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
127621 +    return ws->allocFailed;
127624 +/*-*************************************
127625 +*  Functions Checking Free Space
127626 +***************************************/
127628 +MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
127629 +    return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
127632 +MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
127633 +    return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
127636 +MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
127637 +    return ZSTD_cwksp_check_available(
127638 +        ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
127641 +MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
127642 +    return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
127643 +        && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
127646 +MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
127647 +        ZSTD_cwksp* ws, size_t additionalNeededSpace) {
127648 +    if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
127649 +        ws->workspaceOversizedDuration++;
127650 +    } else {
127651 +        ws->workspaceOversizedDuration = 0;
127652 +    }
127656 +#endif /* ZSTD_CWKSP_H */
127657 diff --git a/lib/zstd/compress/zstd_double_fast.c b/lib/zstd/compress/zstd_double_fast.c
127658 new file mode 100644
127659 index 000000000000..b99172e9d2e4
127660 --- /dev/null
127661 +++ b/lib/zstd/compress/zstd_double_fast.c
127662 @@ -0,0 +1,521 @@
127664 + * Copyright (c) Yann Collet, Facebook, Inc.
127665 + * All rights reserved.
127667 + * This source code is licensed under both the BSD-style license (found in the
127668 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
127669 + * in the COPYING file in the root directory of this source tree).
127670 + * You may select, at your option, one of the above-listed licenses.
127671 + */
127673 +#include "zstd_compress_internal.h"
127674 +#include "zstd_double_fast.h"
127677 +void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
127678 +                              void const* end, ZSTD_dictTableLoadMethod_e dtlm)
127680 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
127681 +    U32* const hashLarge = ms->hashTable;
127682 +    U32  const hBitsL = cParams->hashLog;
127683 +    U32  const mls = cParams->minMatch;
127684 +    U32* const hashSmall = ms->chainTable;
127685 +    U32  const hBitsS = cParams->chainLog;
127686 +    const BYTE* const base = ms->window.base;
127687 +    const BYTE* ip = base + ms->nextToUpdate;
127688 +    const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
127689 +    const U32 fastHashFillStep = 3;
127691 +    /* Always insert every fastHashFillStep position into the hash tables.
127692 +     * Insert the other positions into the large hash table if their entry
127693 +     * is empty.
127694 +     */
127695 +    for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
127696 +        U32 const curr = (U32)(ip - base);
127697 +        U32 i;
127698 +        for (i = 0; i < fastHashFillStep; ++i) {
127699 +            size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls);
127700 +            size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8);
127701 +            if (i == 0)
127702 +                hashSmall[smHash] = curr + i;
127703 +            if (i == 0 || hashLarge[lgHash] == 0)
127704 +                hashLarge[lgHash] = curr + i;
127705 +            /* Only load extra positions for ZSTD_dtlm_full */
127706 +            if (dtlm == ZSTD_dtlm_fast)
127707 +                break;
127708 +    }   }
127712 +FORCE_INLINE_TEMPLATE
127713 +size_t ZSTD_compressBlock_doubleFast_generic(
127714 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
127715 +        void const* src, size_t srcSize,
127716 +        U32 const mls /* template */, ZSTD_dictMode_e const dictMode)
127718 +    ZSTD_compressionParameters const* cParams = &ms->cParams;
127719 +    U32* const hashLong = ms->hashTable;
127720 +    const U32 hBitsL = cParams->hashLog;
127721 +    U32* const hashSmall = ms->chainTable;
127722 +    const U32 hBitsS = cParams->chainLog;
127723 +    const BYTE* const base = ms->window.base;
127724 +    const BYTE* const istart = (const BYTE*)src;
127725 +    const BYTE* ip = istart;
127726 +    const BYTE* anchor = istart;
127727 +    const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
127728 +    /* presumes that, if there is a dictionary, it must be using Attach mode */
127729 +    const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
127730 +    const BYTE* const prefixLowest = base + prefixLowestIndex;
127731 +    const BYTE* const iend = istart + srcSize;
127732 +    const BYTE* const ilimit = iend - HASH_READ_SIZE;
127733 +    U32 offset_1=rep[0], offset_2=rep[1];
127734 +    U32 offsetSaved = 0;
127736 +    const ZSTD_matchState_t* const dms = ms->dictMatchState;
127737 +    const ZSTD_compressionParameters* const dictCParams =
127738 +                                     dictMode == ZSTD_dictMatchState ?
127739 +                                     &dms->cParams : NULL;
127740 +    const U32* const dictHashLong  = dictMode == ZSTD_dictMatchState ?
127741 +                                     dms->hashTable : NULL;
127742 +    const U32* const dictHashSmall = dictMode == ZSTD_dictMatchState ?
127743 +                                     dms->chainTable : NULL;
127744 +    const U32 dictStartIndex       = dictMode == ZSTD_dictMatchState ?
127745 +                                     dms->window.dictLimit : 0;
127746 +    const BYTE* const dictBase     = dictMode == ZSTD_dictMatchState ?
127747 +                                     dms->window.base : NULL;
127748 +    const BYTE* const dictStart    = dictMode == ZSTD_dictMatchState ?
127749 +                                     dictBase + dictStartIndex : NULL;
127750 +    const BYTE* const dictEnd      = dictMode == ZSTD_dictMatchState ?
127751 +                                     dms->window.nextSrc : NULL;
127752 +    const U32 dictIndexDelta       = dictMode == ZSTD_dictMatchState ?
127753 +                                     prefixLowestIndex - (U32)(dictEnd - dictBase) :
127754 +                                     0;
127755 +    const U32 dictHBitsL           = dictMode == ZSTD_dictMatchState ?
127756 +                                     dictCParams->hashLog : hBitsL;
127757 +    const U32 dictHBitsS           = dictMode == ZSTD_dictMatchState ?
127758 +                                     dictCParams->chainLog : hBitsS;
127759 +    const U32 dictAndPrefixLength  = (U32)((ip - prefixLowest) + (dictEnd - dictStart));
127761 +    DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_generic");
127763 +    assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState);
127765 +    /* if a dictionary is attached, it must be within window range */
127766 +    if (dictMode == ZSTD_dictMatchState) {
127767 +        assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex);
127768 +    }
127770 +    /* init */
127771 +    ip += (dictAndPrefixLength == 0);
127772 +    if (dictMode == ZSTD_noDict) {
127773 +        U32 const curr = (U32)(ip - base);
127774 +        U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
127775 +        U32 const maxRep = curr - windowLow;
127776 +        if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
127777 +        if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
127778 +    }
127779 +    if (dictMode == ZSTD_dictMatchState) {
127780 +        /* dictMatchState repCode checks don't currently handle repCode == 0
127781 +         * disabling. */
127782 +        assert(offset_1 <= dictAndPrefixLength);
127783 +        assert(offset_2 <= dictAndPrefixLength);
127784 +    }
127786 +    /* Main Search Loop */
127787 +    while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */
127788 +        size_t mLength;
127789 +        U32 offset;
127790 +        size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
127791 +        size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
127792 +        size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8);
127793 +        size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls);
127794 +        U32 const curr = (U32)(ip-base);
127795 +        U32 const matchIndexL = hashLong[h2];
127796 +        U32 matchIndexS = hashSmall[h];
127797 +        const BYTE* matchLong = base + matchIndexL;
127798 +        const BYTE* match = base + matchIndexS;
127799 +        const U32 repIndex = curr + 1 - offset_1;
127800 +        const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
127801 +                            && repIndex < prefixLowestIndex) ?
127802 +                               dictBase + (repIndex - dictIndexDelta) :
127803 +                               base + repIndex;
127804 +        hashLong[h2] = hashSmall[h] = curr;   /* update hash tables */
127806 +        /* check dictMatchState repcode */
127807 +        if (dictMode == ZSTD_dictMatchState
127808 +            && ((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
127809 +            && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
127810 +            const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
127811 +            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
127812 +            ip++;
127813 +            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
127814 +            goto _match_stored;
127815 +        }
127817 +        /* check noDict repcode */
127818 +        if ( dictMode == ZSTD_noDict
127819 +          && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
127820 +            mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
127821 +            ip++;
127822 +            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
127823 +            goto _match_stored;
127824 +        }
127826 +        if (matchIndexL > prefixLowestIndex) {
127827 +            /* check prefix long match */
127828 +            if (MEM_read64(matchLong) == MEM_read64(ip)) {
127829 +                mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
127830 +                offset = (U32)(ip-matchLong);
127831 +                while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
127832 +                goto _match_found;
127833 +            }
127834 +        } else if (dictMode == ZSTD_dictMatchState) {
127835 +            /* check dictMatchState long match */
127836 +            U32 const dictMatchIndexL = dictHashLong[dictHL];
127837 +            const BYTE* dictMatchL = dictBase + dictMatchIndexL;
127838 +            assert(dictMatchL < dictEnd);
127840 +            if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) {
127841 +                mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8;
127842 +                offset = (U32)(curr - dictMatchIndexL - dictIndexDelta);
127843 +                while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */
127844 +                goto _match_found;
127845 +        }   }
127847 +        if (matchIndexS > prefixLowestIndex) {
127848 +            /* check prefix short match */
127849 +            if (MEM_read32(match) == MEM_read32(ip)) {
127850 +                goto _search_next_long;
127851 +            }
127852 +        } else if (dictMode == ZSTD_dictMatchState) {
127853 +            /* check dictMatchState short match */
127854 +            U32 const dictMatchIndexS = dictHashSmall[dictHS];
127855 +            match = dictBase + dictMatchIndexS;
127856 +            matchIndexS = dictMatchIndexS + dictIndexDelta;
127858 +            if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) {
127859 +                goto _search_next_long;
127860 +        }   }
127862 +        ip += ((ip-anchor) >> kSearchStrength) + 1;
127863 +#if defined(__aarch64__)
127864 +        PREFETCH_L1(ip+256);
127865 +#endif
127866 +        continue;
127868 +_search_next_long:
127870 +        {   size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
127871 +            size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8);
127872 +            U32 const matchIndexL3 = hashLong[hl3];
127873 +            const BYTE* matchL3 = base + matchIndexL3;
127874 +            hashLong[hl3] = curr + 1;
127876 +            /* check prefix long +1 match */
127877 +            if (matchIndexL3 > prefixLowestIndex) {
127878 +                if (MEM_read64(matchL3) == MEM_read64(ip+1)) {
127879 +                    mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
127880 +                    ip++;
127881 +                    offset = (U32)(ip-matchL3);
127882 +                    while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
127883 +                    goto _match_found;
127884 +                }
127885 +            } else if (dictMode == ZSTD_dictMatchState) {
127886 +                /* check dict long +1 match */
127887 +                U32 const dictMatchIndexL3 = dictHashLong[dictHLNext];
127888 +                const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3;
127889 +                assert(dictMatchL3 < dictEnd);
127890 +                if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) {
127891 +                    mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8;
127892 +                    ip++;
127893 +                    offset = (U32)(curr + 1 - dictMatchIndexL3 - dictIndexDelta);
127894 +                    while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */
127895 +                    goto _match_found;
127896 +        }   }   }
127898 +        /* if no long +1 match, explore the short match we found */
127899 +        if (dictMode == ZSTD_dictMatchState && matchIndexS < prefixLowestIndex) {
127900 +            mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4;
127901 +            offset = (U32)(curr - matchIndexS);
127902 +            while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
127903 +        } else {
127904 +            mLength = ZSTD_count(ip+4, match+4, iend) + 4;
127905 +            offset = (U32)(ip - match);
127906 +            while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
127907 +        }
127909 +        /* fall-through */
127911 +_match_found:
127912 +        offset_2 = offset_1;
127913 +        offset_1 = offset;
127915 +        ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
127917 +_match_stored:
127918 +        /* match found */
127919 +        ip += mLength;
127920 +        anchor = ip;
127922 +        if (ip <= ilimit) {
127923 +            /* Complementary insertion */
127924 +            /* done after iLimit test, as candidates could be > iend-8 */
127925 +            {   U32 const indexToInsert = curr+2;
127926 +                hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
127927 +                hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
127928 +                hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
127929 +                hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
127930 +            }
127932 +            /* check immediate repcode */
127933 +            if (dictMode == ZSTD_dictMatchState) {
127934 +                while (ip <= ilimit) {
127935 +                    U32 const current2 = (U32)(ip-base);
127936 +                    U32 const repIndex2 = current2 - offset_2;
127937 +                    const BYTE* repMatch2 = dictMode == ZSTD_dictMatchState
127938 +                        && repIndex2 < prefixLowestIndex ?
127939 +                            dictBase + repIndex2 - dictIndexDelta :
127940 +                            base + repIndex2;
127941 +                    if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
127942 +                       && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
127943 +                        const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
127944 +                        size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
127945 +                        U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
127946 +                        ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
127947 +                        hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
127948 +                        hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
127949 +                        ip += repLength2;
127950 +                        anchor = ip;
127951 +                        continue;
127952 +                    }
127953 +                    break;
127954 +            }   }
127956 +            if (dictMode == ZSTD_noDict) {
127957 +                while ( (ip <= ilimit)
127958 +                     && ( (offset_2>0)
127959 +                        & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
127960 +                    /* store sequence */
127961 +                    size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
127962 +                    U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff;  /* swap offset_2 <=> offset_1 */
127963 +                    hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
127964 +                    hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
127965 +                    ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, rLength-MINMATCH);
127966 +                    ip += rLength;
127967 +                    anchor = ip;
127968 +                    continue;   /* faster when present ... (?) */
127969 +        }   }   }
127970 +    }   /* while (ip < ilimit) */
127972 +    /* save reps for next block */
127973 +    rep[0] = offset_1 ? offset_1 : offsetSaved;
127974 +    rep[1] = offset_2 ? offset_2 : offsetSaved;
127976 +    /* Return the last literals size */
127977 +    return (size_t)(iend - anchor);
127981 +size_t ZSTD_compressBlock_doubleFast(
127982 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
127983 +        void const* src, size_t srcSize)
127985 +    const U32 mls = ms->cParams.minMatch;
127986 +    switch(mls)
127987 +    {
127988 +    default: /* includes case 3 */
127989 +    case 4 :
127990 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict);
127991 +    case 5 :
127992 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict);
127993 +    case 6 :
127994 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict);
127995 +    case 7 :
127996 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict);
127997 +    }
128001 +size_t ZSTD_compressBlock_doubleFast_dictMatchState(
128002 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
128003 +        void const* src, size_t srcSize)
128005 +    const U32 mls = ms->cParams.minMatch;
128006 +    switch(mls)
128007 +    {
128008 +    default: /* includes case 3 */
128009 +    case 4 :
128010 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState);
128011 +    case 5 :
128012 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState);
128013 +    case 6 :
128014 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState);
128015 +    case 7 :
128016 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState);
128017 +    }
128021 +static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
128022 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
128023 +        void const* src, size_t srcSize,
128024 +        U32 const mls /* template */)
128026 +    ZSTD_compressionParameters const* cParams = &ms->cParams;
128027 +    U32* const hashLong = ms->hashTable;
128028 +    U32  const hBitsL = cParams->hashLog;
128029 +    U32* const hashSmall = ms->chainTable;
128030 +    U32  const hBitsS = cParams->chainLog;
128031 +    const BYTE* const istart = (const BYTE*)src;
128032 +    const BYTE* ip = istart;
128033 +    const BYTE* anchor = istart;
128034 +    const BYTE* const iend = istart + srcSize;
128035 +    const BYTE* const ilimit = iend - 8;
128036 +    const BYTE* const base = ms->window.base;
128037 +    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
128038 +    const U32   lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
128039 +    const U32   dictStartIndex = lowLimit;
128040 +    const U32   dictLimit = ms->window.dictLimit;
128041 +    const U32   prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit;
128042 +    const BYTE* const prefixStart = base + prefixStartIndex;
128043 +    const BYTE* const dictBase = ms->window.dictBase;
128044 +    const BYTE* const dictStart = dictBase + dictStartIndex;
128045 +    const BYTE* const dictEnd = dictBase + prefixStartIndex;
128046 +    U32 offset_1=rep[0], offset_2=rep[1];
128048 +    DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize);
128050 +    /* if extDict is invalidated due to maxDistance, switch to "regular" variant */
128051 +    if (prefixStartIndex == dictStartIndex)
128052 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, mls, ZSTD_noDict);
128054 +    /* Search Loop */
128055 +    while (ip < ilimit) {  /* < instead of <=, because (ip+1) */
128056 +        const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
128057 +        const U32 matchIndex = hashSmall[hSmall];
128058 +        const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
128059 +        const BYTE* match = matchBase + matchIndex;
128061 +        const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
128062 +        const U32 matchLongIndex = hashLong[hLong];
128063 +        const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base;
128064 +        const BYTE* matchLong = matchLongBase + matchLongIndex;
128066 +        const U32 curr = (U32)(ip-base);
128067 +        const U32 repIndex = curr + 1 - offset_1;   /* offset_1 expected <= curr +1 */
128068 +        const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
128069 +        const BYTE* const repMatch = repBase + repIndex;
128070 +        size_t mLength;
128071 +        hashSmall[hSmall] = hashLong[hLong] = curr;   /* update hash table */
128073 +        if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
128074 +            & (repIndex > dictStartIndex))
128075 +          && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
128076 +            const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
128077 +            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
128078 +            ip++;
128079 +            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
128080 +        } else {
128081 +            if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
128082 +                const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
128083 +                const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart;
128084 +                U32 offset;
128085 +                mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8;
128086 +                offset = curr - matchLongIndex;
128087 +                while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; }   /* catch up */
128088 +                offset_2 = offset_1;
128089 +                offset_1 = offset;
128090 +                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
128092 +            } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
128093 +                size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
128094 +                U32 const matchIndex3 = hashLong[h3];
128095 +                const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base;
128096 +                const BYTE* match3 = match3Base + matchIndex3;
128097 +                U32 offset;
128098 +                hashLong[h3] = curr + 1;
128099 +                if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
128100 +                    const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend;
128101 +                    const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart;
128102 +                    mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8;
128103 +                    ip++;
128104 +                    offset = curr+1 - matchIndex3;
128105 +                    while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
128106 +                } else {
128107 +                    const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
128108 +                    const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
128109 +                    mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
128110 +                    offset = curr - matchIndex;
128111 +                    while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
128112 +                }
128113 +                offset_2 = offset_1;
128114 +                offset_1 = offset;
128115 +                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
128117 +            } else {
128118 +                ip += ((ip-anchor) >> kSearchStrength) + 1;
128119 +                continue;
128120 +        }   }
128122 +        /* move to next sequence start */
128123 +        ip += mLength;
128124 +        anchor = ip;
128126 +        if (ip <= ilimit) {
128127 +            /* Complementary insertion */
128128 +            /* done after iLimit test, as candidates could be > iend-8 */
128129 +            {   U32 const indexToInsert = curr+2;
128130 +                hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
128131 +                hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
128132 +                hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
128133 +                hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
128134 +            }
128136 +            /* check immediate repcode */
128137 +            while (ip <= ilimit) {
128138 +                U32 const current2 = (U32)(ip-base);
128139 +                U32 const repIndex2 = current2 - offset_2;
128140 +                const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
128141 +                if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3)   /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
128142 +                    & (repIndex2 > dictStartIndex))
128143 +                  && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
128144 +                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
128145 +                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
128146 +                    U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
128147 +                    ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
128148 +                    hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
128149 +                    hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
128150 +                    ip += repLength2;
128151 +                    anchor = ip;
128152 +                    continue;
128153 +                }
128154 +                break;
128155 +    }   }   }
128157 +    /* save reps for next block */
128158 +    rep[0] = offset_1;
128159 +    rep[1] = offset_2;
128161 +    /* Return the last literals size */
128162 +    return (size_t)(iend - anchor);
128166 +size_t ZSTD_compressBlock_doubleFast_extDict(
128167 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
128168 +        void const* src, size_t srcSize)
128170 +    U32 const mls = ms->cParams.minMatch;
128171 +    switch(mls)
128172 +    {
128173 +    default: /* includes case 3 */
128174 +    case 4 :
128175 +        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
128176 +    case 5 :
128177 +        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
128178 +    case 6 :
128179 +        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
128180 +    case 7 :
128181 +        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
128182 +    }
128184 diff --git a/lib/zstd/compress/zstd_double_fast.h b/lib/zstd/compress/zstd_double_fast.h
128185 new file mode 100644
128186 index 000000000000..6822bde65a1d
128187 --- /dev/null
128188 +++ b/lib/zstd/compress/zstd_double_fast.h
128189 @@ -0,0 +1,32 @@
128191 + * Copyright (c) Yann Collet, Facebook, Inc.
128192 + * All rights reserved.
128194 + * This source code is licensed under both the BSD-style license (found in the
128195 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
128196 + * in the COPYING file in the root directory of this source tree).
128197 + * You may select, at your option, one of the above-listed licenses.
128198 + */
128200 +#ifndef ZSTD_DOUBLE_FAST_H
128201 +#define ZSTD_DOUBLE_FAST_H
128204 +#include "../common/mem.h"      /* U32 */
128205 +#include "zstd_compress_internal.h"     /* ZSTD_CCtx, size_t */
128207 +void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
128208 +                              void const* end, ZSTD_dictTableLoadMethod_e dtlm);
128209 +size_t ZSTD_compressBlock_doubleFast(
128210 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
128211 +        void const* src, size_t srcSize);
128212 +size_t ZSTD_compressBlock_doubleFast_dictMatchState(
128213 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
128214 +        void const* src, size_t srcSize);
128215 +size_t ZSTD_compressBlock_doubleFast_extDict(
128216 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
128217 +        void const* src, size_t srcSize);
128221 +#endif /* ZSTD_DOUBLE_FAST_H */
128222 diff --git a/lib/zstd/compress/zstd_fast.c b/lib/zstd/compress/zstd_fast.c
128223 new file mode 100644
128224 index 000000000000..96b7d48e2868
128225 --- /dev/null
128226 +++ b/lib/zstd/compress/zstd_fast.c
128227 @@ -0,0 +1,496 @@
128229 + * Copyright (c) Yann Collet, Facebook, Inc.
128230 + * All rights reserved.
128232 + * This source code is licensed under both the BSD-style license (found in the
128233 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
128234 + * in the COPYING file in the root directory of this source tree).
128235 + * You may select, at your option, one of the above-listed licenses.
128236 + */
128238 +#include "zstd_compress_internal.h"  /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
128239 +#include "zstd_fast.h"
128242 +void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
128243 +                        const void* const end,
128244 +                        ZSTD_dictTableLoadMethod_e dtlm)
128246 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
128247 +    U32* const hashTable = ms->hashTable;
128248 +    U32  const hBits = cParams->hashLog;
128249 +    U32  const mls = cParams->minMatch;
128250 +    const BYTE* const base = ms->window.base;
128251 +    const BYTE* ip = base + ms->nextToUpdate;
128252 +    const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
128253 +    const U32 fastHashFillStep = 3;
128255 +    /* Always insert every fastHashFillStep position into the hash table.
128256 +     * Insert the other positions if their hash entry is empty.
128257 +     */
128258 +    for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
128259 +        U32 const curr = (U32)(ip - base);
128260 +        size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
128261 +        hashTable[hash0] = curr;
128262 +        if (dtlm == ZSTD_dtlm_fast) continue;
128263 +        /* Only load extra positions for ZSTD_dtlm_full */
128264 +        {   U32 p;
128265 +            for (p = 1; p < fastHashFillStep; ++p) {
128266 +                size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);
128267 +                if (hashTable[hash] == 0) {  /* not yet filled */
128268 +                    hashTable[hash] = curr + p;
128269 +    }   }   }   }
128273 +FORCE_INLINE_TEMPLATE size_t
128274 +ZSTD_compressBlock_fast_generic(
128275 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
128276 +        void const* src, size_t srcSize,
128277 +        U32 const mls)
128279 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
128280 +    U32* const hashTable = ms->hashTable;
128281 +    U32 const hlog = cParams->hashLog;
128282 +    /* support stepSize of 0 */
128283 +    size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
128284 +    const BYTE* const base = ms->window.base;
128285 +    const BYTE* const istart = (const BYTE*)src;
128286 +    /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */
128287 +    const BYTE* ip0 = istart;
128288 +    const BYTE* ip1;
128289 +    const BYTE* anchor = istart;
128290 +    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
128291 +    const U32   prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
128292 +    const BYTE* const prefixStart = base + prefixStartIndex;
128293 +    const BYTE* const iend = istart + srcSize;
128294 +    const BYTE* const ilimit = iend - HASH_READ_SIZE;
128295 +    U32 offset_1=rep[0], offset_2=rep[1];
128296 +    U32 offsetSaved = 0;
128298 +    /* init */
128299 +    DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
128300 +    ip0 += (ip0 == prefixStart);
128301 +    ip1 = ip0 + 1;
128302 +    {   U32 const curr = (U32)(ip0 - base);
128303 +        U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
128304 +        U32 const maxRep = curr - windowLow;
128305 +        if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
128306 +        if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
128307 +    }
128309 +    /* Main Search Loop */
128310 +#ifdef __INTEL_COMPILER
128311 +    /* From intel 'The vector pragma indicates that the loop should be
128312 +     * vectorized if it is legal to do so'. Can be used together with
128313 +     * #pragma ivdep (but have opted to exclude that because intel
128314 +     * warns against using it).*/
128315 +    #pragma vector always
128316 +#endif
128317 +    while (ip1 < ilimit) {   /* < instead of <=, because check at ip0+2 */
128318 +        size_t mLength;
128319 +        BYTE const* ip2 = ip0 + 2;
128320 +        size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls);
128321 +        U32 const val0 = MEM_read32(ip0);
128322 +        size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls);
128323 +        U32 const val1 = MEM_read32(ip1);
128324 +        U32 const current0 = (U32)(ip0-base);
128325 +        U32 const current1 = (U32)(ip1-base);
128326 +        U32 const matchIndex0 = hashTable[h0];
128327 +        U32 const matchIndex1 = hashTable[h1];
128328 +        BYTE const* repMatch = ip2 - offset_1;
128329 +        const BYTE* match0 = base + matchIndex0;
128330 +        const BYTE* match1 = base + matchIndex1;
128331 +        U32 offcode;
128333 +#if defined(__aarch64__)
128334 +        PREFETCH_L1(ip0+256);
128335 +#endif
128337 +        hashTable[h0] = current0;   /* update hash table */
128338 +        hashTable[h1] = current1;   /* update hash table */
128340 +        assert(ip0 + 1 == ip1);
128342 +        if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) {
128343 +            mLength = (ip2[-1] == repMatch[-1]) ? 1 : 0;
128344 +            ip0 = ip2 - mLength;
128345 +            match0 = repMatch - mLength;
128346 +            mLength += 4;
128347 +            offcode = 0;
128348 +            goto _match;
128349 +        }
128350 +        if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) {
128351 +            /* found a regular match */
128352 +            goto _offset;
128353 +        }
128354 +        if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) {
128355 +            /* found a regular match after one literal */
128356 +            ip0 = ip1;
128357 +            match0 = match1;
128358 +            goto _offset;
128359 +        }
128360 +        {   size_t const step = ((size_t)(ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
128361 +            assert(step >= 2);
128362 +            ip0 += step;
128363 +            ip1 += step;
128364 +            continue;
128365 +        }
128366 +_offset: /* Requires: ip0, match0 */
128367 +        /* Compute the offset code */
128368 +        offset_2 = offset_1;
128369 +        offset_1 = (U32)(ip0-match0);
128370 +        offcode = offset_1 + ZSTD_REP_MOVE;
128371 +        mLength = 4;
128372 +        /* Count the backwards match length */
128373 +        while (((ip0>anchor) & (match0>prefixStart))
128374 +             && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */
128376 +_match: /* Requires: ip0, match0, offcode */
128377 +        /* Count the forward length */
128378 +        mLength += ZSTD_count(ip0+mLength, match0+mLength, iend);
128379 +        ZSTD_storeSeq(seqStore, (size_t)(ip0-anchor), anchor, iend, offcode, mLength-MINMATCH);
128380 +        /* match found */
128381 +        ip0 += mLength;
128382 +        anchor = ip0;
128384 +        if (ip0 <= ilimit) {
128385 +            /* Fill Table */
128386 +            assert(base+current0+2 > istart);  /* check base overflow */
128387 +            hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2;  /* here because current+2 could be > iend-8 */
128388 +            hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
128390 +            if (offset_2 > 0) { /* offset_2==0 means offset_2 is invalidated */
128391 +                while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) {
128392 +                    /* store sequence */
128393 +                    size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;
128394 +                    { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
128395 +                    hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
128396 +                    ip0 += rLength;
128397 +                    ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH);
128398 +                    anchor = ip0;
128399 +                    continue;   /* faster when present (confirmed on gcc-8) ... (?) */
128400 +        }   }   }
128401 +        ip1 = ip0 + 1;
128402 +    }
128404 +    /* save reps for next block */
128405 +    rep[0] = offset_1 ? offset_1 : offsetSaved;
128406 +    rep[1] = offset_2 ? offset_2 : offsetSaved;
128408 +    /* Return the last literals size */
128409 +    return (size_t)(iend - anchor);
128413 +size_t ZSTD_compressBlock_fast(
128414 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
128415 +        void const* src, size_t srcSize)
128417 +    U32 const mls = ms->cParams.minMatch;
128418 +    assert(ms->dictMatchState == NULL);
128419 +    switch(mls)
128420 +    {
128421 +    default: /* includes case 3 */
128422 +    case 4 :
128423 +        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4);
128424 +    case 5 :
128425 +        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5);
128426 +    case 6 :
128427 +        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6);
128428 +    case 7 :
128429 +        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7);
128430 +    }
128433 +FORCE_INLINE_TEMPLATE
128434 +size_t ZSTD_compressBlock_fast_dictMatchState_generic(
128435 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
128436 +        void const* src, size_t srcSize, U32 const mls)
128438 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
128439 +    U32* const hashTable = ms->hashTable;
128440 +    U32 const hlog = cParams->hashLog;
128441 +    /* support stepSize of 0 */
128442 +    U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
128443 +    const BYTE* const base = ms->window.base;
128444 +    const BYTE* const istart = (const BYTE*)src;
128445 +    const BYTE* ip = istart;
128446 +    const BYTE* anchor = istart;
128447 +    const U32   prefixStartIndex = ms->window.dictLimit;
128448 +    const BYTE* const prefixStart = base + prefixStartIndex;
128449 +    const BYTE* const iend = istart + srcSize;
128450 +    const BYTE* const ilimit = iend - HASH_READ_SIZE;
128451 +    U32 offset_1=rep[0], offset_2=rep[1];
128452 +    U32 offsetSaved = 0;
128454 +    const ZSTD_matchState_t* const dms = ms->dictMatchState;
128455 +    const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
128456 +    const U32* const dictHashTable = dms->hashTable;
128457 +    const U32 dictStartIndex       = dms->window.dictLimit;
128458 +    const BYTE* const dictBase     = dms->window.base;
128459 +    const BYTE* const dictStart    = dictBase + dictStartIndex;
128460 +    const BYTE* const dictEnd      = dms->window.nextSrc;
128461 +    const U32 dictIndexDelta       = prefixStartIndex - (U32)(dictEnd - dictBase);
128462 +    const U32 dictAndPrefixLength  = (U32)(ip - prefixStart + dictEnd - dictStart);
128463 +    const U32 dictHLog             = dictCParams->hashLog;
128465 +    /* if a dictionary is still attached, it necessarily means that
128466 +     * it is within window size. So we just check it. */
128467 +    const U32 maxDistance = 1U << cParams->windowLog;
128468 +    const U32 endIndex = (U32)((size_t)(ip - base) + srcSize);
128469 +    assert(endIndex - prefixStartIndex <= maxDistance);
128470 +    (void)maxDistance; (void)endIndex;   /* these variables are not used when assert() is disabled */
128472 +    /* ensure there will be no underflow
128473 +     * when translating a dict index into a local index */
128474 +    assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
128476 +    /* init */
128477 +    DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
128478 +    ip += (dictAndPrefixLength == 0);
128479 +    /* dictMatchState repCode checks don't currently handle repCode == 0
128480 +     * disabling. */
128481 +    assert(offset_1 <= dictAndPrefixLength);
128482 +    assert(offset_2 <= dictAndPrefixLength);
128484 +    /* Main Search Loop */
128485 +    while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */
128486 +        size_t mLength;
128487 +        size_t const h = ZSTD_hashPtr(ip, hlog, mls);
128488 +        U32 const curr = (U32)(ip-base);
128489 +        U32 const matchIndex = hashTable[h];
128490 +        const BYTE* match = base + matchIndex;
128491 +        const U32 repIndex = curr + 1 - offset_1;
128492 +        const BYTE* repMatch = (repIndex < prefixStartIndex) ?
128493 +                               dictBase + (repIndex - dictIndexDelta) :
128494 +                               base + repIndex;
128495 +        hashTable[h] = curr;   /* update hash table */
128497 +        if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
128498 +          && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
128499 +            const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
128500 +            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
128501 +            ip++;
128502 +            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
128503 +        } else if ( (matchIndex <= prefixStartIndex) ) {
128504 +            size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
128505 +            U32 const dictMatchIndex = dictHashTable[dictHash];
128506 +            const BYTE* dictMatch = dictBase + dictMatchIndex;
128507 +            if (dictMatchIndex <= dictStartIndex ||
128508 +                MEM_read32(dictMatch) != MEM_read32(ip)) {
128509 +                assert(stepSize >= 1);
128510 +                ip += ((ip-anchor) >> kSearchStrength) + stepSize;
128511 +                continue;
128512 +            } else {
128513 +                /* found a dict match */
128514 +                U32 const offset = (U32)(curr-dictMatchIndex-dictIndexDelta);
128515 +                mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
128516 +                while (((ip>anchor) & (dictMatch>dictStart))
128517 +                     && (ip[-1] == dictMatch[-1])) {
128518 +                    ip--; dictMatch--; mLength++;
128519 +                } /* catch up */
128520 +                offset_2 = offset_1;
128521 +                offset_1 = offset;
128522 +                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
128523 +            }
128524 +        } else if (MEM_read32(match) != MEM_read32(ip)) {
128525 +            /* it's not a match, and we're not going to check the dictionary */
128526 +            assert(stepSize >= 1);
128527 +            ip += ((ip-anchor) >> kSearchStrength) + stepSize;
128528 +            continue;
128529 +        } else {
128530 +            /* found a regular match */
128531 +            U32 const offset = (U32)(ip-match);
128532 +            mLength = ZSTD_count(ip+4, match+4, iend) + 4;
128533 +            while (((ip>anchor) & (match>prefixStart))
128534 +                 && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
128535 +            offset_2 = offset_1;
128536 +            offset_1 = offset;
128537 +            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
128538 +        }
128540 +        /* match found */
128541 +        ip += mLength;
128542 +        anchor = ip;
128544 +        if (ip <= ilimit) {
128545 +            /* Fill Table */
128546 +            assert(base+curr+2 > istart);  /* check base overflow */
128547 +            hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2;  /* here because curr+2 could be > iend-8 */
128548 +            hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
128550 +            /* check immediate repcode */
128551 +            while (ip <= ilimit) {
128552 +                U32 const current2 = (U32)(ip-base);
128553 +                U32 const repIndex2 = current2 - offset_2;
128554 +                const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
128555 +                        dictBase - dictIndexDelta + repIndex2 :
128556 +                        base + repIndex2;
128557 +                if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
128558 +                   && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
128559 +                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
128560 +                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
128561 +                    U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
128562 +                    ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
128563 +                    hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
128564 +                    ip += repLength2;
128565 +                    anchor = ip;
128566 +                    continue;
128567 +                }
128568 +                break;
128569 +            }
128570 +        }
128571 +    }
128573 +    /* save reps for next block */
128574 +    rep[0] = offset_1 ? offset_1 : offsetSaved;
128575 +    rep[1] = offset_2 ? offset_2 : offsetSaved;
128577 +    /* Return the last literals size */
128578 +    return (size_t)(iend - anchor);
128581 +size_t ZSTD_compressBlock_fast_dictMatchState(
128582 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
128583 +        void const* src, size_t srcSize)
128585 +    U32 const mls = ms->cParams.minMatch;
128586 +    assert(ms->dictMatchState != NULL);
128587 +    switch(mls)
128588 +    {
128589 +    default: /* includes case 3 */
128590 +    case 4 :
128591 +        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4);
128592 +    case 5 :
128593 +        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5);
128594 +    case 6 :
128595 +        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6);
128596 +    case 7 :
128597 +        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7);
128598 +    }
128602 +static size_t ZSTD_compressBlock_fast_extDict_generic(
128603 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
128604 +        void const* src, size_t srcSize, U32 const mls)
128606 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
128607 +    U32* const hashTable = ms->hashTable;
128608 +    U32 const hlog = cParams->hashLog;
128609 +    /* support stepSize of 0 */
128610 +    U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
128611 +    const BYTE* const base = ms->window.base;
128612 +    const BYTE* const dictBase = ms->window.dictBase;
128613 +    const BYTE* const istart = (const BYTE*)src;
128614 +    const BYTE* ip = istart;
128615 +    const BYTE* anchor = istart;
128616 +    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
128617 +    const U32   lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
128618 +    const U32   dictStartIndex = lowLimit;
128619 +    const BYTE* const dictStart = dictBase + dictStartIndex;
128620 +    const U32   dictLimit = ms->window.dictLimit;
128621 +    const U32   prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit;
128622 +    const BYTE* const prefixStart = base + prefixStartIndex;
128623 +    const BYTE* const dictEnd = dictBase + prefixStartIndex;
128624 +    const BYTE* const iend = istart + srcSize;
128625 +    const BYTE* const ilimit = iend - 8;
128626 +    U32 offset_1=rep[0], offset_2=rep[1];
128628 +    DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);
128630 +    /* switch to "regular" variant if extDict is invalidated due to maxDistance */
128631 +    if (prefixStartIndex == dictStartIndex)
128632 +        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls);
128634 +    /* Search Loop */
128635 +    while (ip < ilimit) {  /* < instead of <=, because (ip+1) */
128636 +        const size_t h = ZSTD_hashPtr(ip, hlog, mls);
128637 +        const U32    matchIndex = hashTable[h];
128638 +        const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
128639 +        const BYTE*  match = matchBase + matchIndex;
128640 +        const U32    curr = (U32)(ip-base);
128641 +        const U32    repIndex = curr + 1 - offset_1;
128642 +        const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
128643 +        const BYTE* const repMatch = repBase + repIndex;
128644 +        hashTable[h] = curr;   /* update hash table */
128645 +        DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr);
128646 +        assert(offset_1 <= curr +1);   /* check repIndex */
128648 +        if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))
128649 +           && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
128650 +            const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
128651 +            size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
128652 +            ip++;
128653 +            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, rLength-MINMATCH);
128654 +            ip += rLength;
128655 +            anchor = ip;
128656 +        } else {
128657 +            if ( (matchIndex < dictStartIndex) ||
128658 +                 (MEM_read32(match) != MEM_read32(ip)) ) {
128659 +                assert(stepSize >= 1);
128660 +                ip += ((ip-anchor) >> kSearchStrength) + stepSize;
128661 +                continue;
128662 +            }
128663 +            {   const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
128664 +                const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
128665 +                U32 const offset = curr - matchIndex;
128666 +                size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
128667 +                while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
128668 +                offset_2 = offset_1; offset_1 = offset;  /* update offset history */
128669 +                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
128670 +                ip += mLength;
128671 +                anchor = ip;
128672 +        }   }
128674 +        if (ip <= ilimit) {
128675 +            /* Fill Table */
128676 +            hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2;
128677 +            hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
128678 +            /* check immediate repcode */
128679 +            while (ip <= ilimit) {
128680 +                U32 const current2 = (U32)(ip-base);
128681 +                U32 const repIndex2 = current2 - offset_2;
128682 +                const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
128683 +                if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex))  /* intentional overflow */
128684 +                   && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
128685 +                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
128686 +                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
128687 +                    { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; }  /* swap offset_2 <=> offset_1 */
128688 +                    ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, 0 /*offcode*/, repLength2-MINMATCH);
128689 +                    hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
128690 +                    ip += repLength2;
128691 +                    anchor = ip;
128692 +                    continue;
128693 +                }
128694 +                break;
128695 +    }   }   }
128697 +    /* save reps for next block */
128698 +    rep[0] = offset_1;
128699 +    rep[1] = offset_2;
128701 +    /* Return the last literals size */
128702 +    return (size_t)(iend - anchor);
128706 +size_t ZSTD_compressBlock_fast_extDict(
128707 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
128708 +        void const* src, size_t srcSize)
128710 +    U32 const mls = ms->cParams.minMatch;
128711 +    switch(mls)
128712 +    {
128713 +    default: /* includes case 3 */
128714 +    case 4 :
128715 +        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
128716 +    case 5 :
128717 +        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
128718 +    case 6 :
128719 +        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
128720 +    case 7 :
128721 +        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
128722 +    }
128724 diff --git a/lib/zstd/compress/zstd_fast.h b/lib/zstd/compress/zstd_fast.h
128725 new file mode 100644
128726 index 000000000000..fddc2f532d21
128727 --- /dev/null
128728 +++ b/lib/zstd/compress/zstd_fast.h
128729 @@ -0,0 +1,31 @@
128731 + * Copyright (c) Yann Collet, Facebook, Inc.
128732 + * All rights reserved.
128734 + * This source code is licensed under both the BSD-style license (found in the
128735 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
128736 + * in the COPYING file in the root directory of this source tree).
128737 + * You may select, at your option, one of the above-listed licenses.
128738 + */
128740 +#ifndef ZSTD_FAST_H
128741 +#define ZSTD_FAST_H
128744 +#include "../common/mem.h"      /* U32 */
128745 +#include "zstd_compress_internal.h"
128747 +void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
128748 +                        void const* end, ZSTD_dictTableLoadMethod_e dtlm);
128749 +size_t ZSTD_compressBlock_fast(
128750 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
128751 +        void const* src, size_t srcSize);
128752 +size_t ZSTD_compressBlock_fast_dictMatchState(
128753 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
128754 +        void const* src, size_t srcSize);
128755 +size_t ZSTD_compressBlock_fast_extDict(
128756 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
128757 +        void const* src, size_t srcSize);
128760 +#endif /* ZSTD_FAST_H */
128761 diff --git a/lib/zstd/compress/zstd_lazy.c b/lib/zstd/compress/zstd_lazy.c
128762 new file mode 100644
128763 index 000000000000..39aa2569aabc
128764 --- /dev/null
128765 +++ b/lib/zstd/compress/zstd_lazy.c
128766 @@ -0,0 +1,1412 @@
128768 + * Copyright (c) Yann Collet, Facebook, Inc.
128769 + * All rights reserved.
128771 + * This source code is licensed under both the BSD-style license (found in the
128772 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
128773 + * in the COPYING file in the root directory of this source tree).
128774 + * You may select, at your option, one of the above-listed licenses.
128775 + */
128777 +#include "zstd_compress_internal.h"
128778 +#include "zstd_lazy.h"
128781 +/*-*************************************
128782 +*  Binary Tree search
128783 +***************************************/
128785 +static void
128786 +ZSTD_updateDUBT(ZSTD_matchState_t* ms,
128787 +                const BYTE* ip, const BYTE* iend,
128788 +                U32 mls)
128790 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
128791 +    U32* const hashTable = ms->hashTable;
128792 +    U32  const hashLog = cParams->hashLog;
128794 +    U32* const bt = ms->chainTable;
128795 +    U32  const btLog  = cParams->chainLog - 1;
128796 +    U32  const btMask = (1 << btLog) - 1;
128798 +    const BYTE* const base = ms->window.base;
128799 +    U32 const target = (U32)(ip - base);
128800 +    U32 idx = ms->nextToUpdate;
128802 +    if (idx != target)
128803 +        DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)",
128804 +                    idx, target, ms->window.dictLimit);
128805 +    assert(ip + 8 <= iend);   /* condition for ZSTD_hashPtr */
128806 +    (void)iend;
128808 +    assert(idx >= ms->window.dictLimit);   /* condition for valid base+idx */
128809 +    for ( ; idx < target ; idx++) {
128810 +        size_t const h  = ZSTD_hashPtr(base + idx, hashLog, mls);   /* assumption : ip + 8 <= iend */
128811 +        U32    const matchIndex = hashTable[h];
128813 +        U32*   const nextCandidatePtr = bt + 2*(idx&btMask);
128814 +        U32*   const sortMarkPtr  = nextCandidatePtr + 1;
128816 +        DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx);
128817 +        hashTable[h] = idx;   /* Update Hash Table */
128818 +        *nextCandidatePtr = matchIndex;   /* update BT like a chain */
128819 +        *sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK;
128820 +    }
128821 +    ms->nextToUpdate = target;
128825 +/** ZSTD_insertDUBT1() :
128826 + *  sort one already inserted but unsorted position
128827 + *  assumption : curr >= btlow == (curr - btmask)
128828 + *  doesn't fail */
128829 +static void
128830 +ZSTD_insertDUBT1(ZSTD_matchState_t* ms,
128831 +                 U32 curr, const BYTE* inputEnd,
128832 +                 U32 nbCompares, U32 btLow,
128833 +                 const ZSTD_dictMode_e dictMode)
128835 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
128836 +    U32* const bt = ms->chainTable;
128837 +    U32  const btLog  = cParams->chainLog - 1;
128838 +    U32  const btMask = (1 << btLog) - 1;
128839 +    size_t commonLengthSmaller=0, commonLengthLarger=0;
128840 +    const BYTE* const base = ms->window.base;
128841 +    const BYTE* const dictBase = ms->window.dictBase;
128842 +    const U32 dictLimit = ms->window.dictLimit;
128843 +    const BYTE* const ip = (curr>=dictLimit) ? base + curr : dictBase + curr;
128844 +    const BYTE* const iend = (curr>=dictLimit) ? inputEnd : dictBase + dictLimit;
128845 +    const BYTE* const dictEnd = dictBase + dictLimit;
128846 +    const BYTE* const prefixStart = base + dictLimit;
128847 +    const BYTE* match;
128848 +    U32* smallerPtr = bt + 2*(curr&btMask);
128849 +    U32* largerPtr  = smallerPtr + 1;
128850 +    U32 matchIndex = *smallerPtr;   /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */
128851 +    U32 dummy32;   /* to be nullified at the end */
128852 +    U32 const windowValid = ms->window.lowLimit;
128853 +    U32 const maxDistance = 1U << cParams->windowLog;
128854 +    U32 const windowLow = (curr - windowValid > maxDistance) ? curr - maxDistance : windowValid;
128857 +    DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
128858 +                curr, dictLimit, windowLow);
128859 +    assert(curr >= btLow);
128860 +    assert(ip < iend);   /* condition for ZSTD_count */
128862 +    while (nbCompares-- && (matchIndex > windowLow)) {
128863 +        U32* const nextPtr = bt + 2*(matchIndex & btMask);
128864 +        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
128865 +        assert(matchIndex < curr);
128866 +        /* note : all candidates are now supposed sorted,
128867 +         * but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK
128868 +         * when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */
128870 +        if ( (dictMode != ZSTD_extDict)
128871 +          || (matchIndex+matchLength >= dictLimit)  /* both in current segment*/
128872 +          || (curr < dictLimit) /* both in extDict */) {
128873 +            const BYTE* const mBase = ( (dictMode != ZSTD_extDict)
128874 +                                     || (matchIndex+matchLength >= dictLimit)) ?
128875 +                                        base : dictBase;
128876 +            assert( (matchIndex+matchLength >= dictLimit)   /* might be wrong if extDict is incorrectly set to 0 */
128877 +                 || (curr < dictLimit) );
128878 +            match = mBase + matchIndex;
128879 +            matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
128880 +        } else {
128881 +            match = dictBase + matchIndex;
128882 +            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
128883 +            if (matchIndex+matchLength >= dictLimit)
128884 +                match = base + matchIndex;   /* preparation for next read of match[matchLength] */
128885 +        }
128887 +        DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
128888 +                    curr, matchIndex, (U32)matchLength);
128890 +        if (ip+matchLength == iend) {   /* equal : no way to know if inf or sup */
128891 +            break;   /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
128892 +        }
128894 +        if (match[matchLength] < ip[matchLength]) {  /* necessarily within buffer */
128895 +            /* match is smaller than current */
128896 +            *smallerPtr = matchIndex;             /* update smaller idx */
128897 +            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
128898 +            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
128899 +            DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u",
128900 +                        matchIndex, btLow, nextPtr[1]);
128901 +            smallerPtr = nextPtr+1;               /* new "candidate" => larger than match, which was smaller than target */
128902 +            matchIndex = nextPtr[1];              /* new matchIndex, larger than previous and closer to current */
128903 +        } else {
128904 +            /* match is larger than current */
128905 +            *largerPtr = matchIndex;
128906 +            commonLengthLarger = matchLength;
128907 +            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
128908 +            DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u",
128909 +                        matchIndex, btLow, nextPtr[0]);
128910 +            largerPtr = nextPtr;
128911 +            matchIndex = nextPtr[0];
128912 +    }   }
128914 +    *smallerPtr = *largerPtr = 0;
128918 +static size_t
128919 +ZSTD_DUBT_findBetterDictMatch (
128920 +        ZSTD_matchState_t* ms,
128921 +        const BYTE* const ip, const BYTE* const iend,
128922 +        size_t* offsetPtr,
128923 +        size_t bestLength,
128924 +        U32 nbCompares,
128925 +        U32 const mls,
128926 +        const ZSTD_dictMode_e dictMode)
128928 +    const ZSTD_matchState_t * const dms = ms->dictMatchState;
128929 +    const ZSTD_compressionParameters* const dmsCParams = &dms->cParams;
128930 +    const U32 * const dictHashTable = dms->hashTable;
128931 +    U32         const hashLog = dmsCParams->hashLog;
128932 +    size_t      const h  = ZSTD_hashPtr(ip, hashLog, mls);
128933 +    U32               dictMatchIndex = dictHashTable[h];
128935 +    const BYTE* const base = ms->window.base;
128936 +    const BYTE* const prefixStart = base + ms->window.dictLimit;
128937 +    U32         const curr = (U32)(ip-base);
128938 +    const BYTE* const dictBase = dms->window.base;
128939 +    const BYTE* const dictEnd = dms->window.nextSrc;
128940 +    U32         const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base);
128941 +    U32         const dictLowLimit = dms->window.lowLimit;
128942 +    U32         const dictIndexDelta = ms->window.lowLimit - dictHighLimit;
128944 +    U32*        const dictBt = dms->chainTable;
128945 +    U32         const btLog  = dmsCParams->chainLog - 1;
128946 +    U32         const btMask = (1 << btLog) - 1;
128947 +    U32         const btLow = (btMask >= dictHighLimit - dictLowLimit) ? dictLowLimit : dictHighLimit - btMask;
128949 +    size_t commonLengthSmaller=0, commonLengthLarger=0;
128951 +    (void)dictMode;
128952 +    assert(dictMode == ZSTD_dictMatchState);
128954 +    while (nbCompares-- && (dictMatchIndex > dictLowLimit)) {
128955 +        U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask);
128956 +        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
128957 +        const BYTE* match = dictBase + dictMatchIndex;
128958 +        matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
128959 +        if (dictMatchIndex+matchLength >= dictHighLimit)
128960 +            match = base + dictMatchIndex + dictIndexDelta;   /* to prepare for next usage of match[matchLength] */
128962 +        if (matchLength > bestLength) {
128963 +            U32 matchIndex = dictMatchIndex + dictIndexDelta;
128964 +            if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
128965 +                DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)",
128966 +                    curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + curr - matchIndex, dictMatchIndex, matchIndex);
128967 +                bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
128968 +            }
128969 +            if (ip+matchLength == iend) {   /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */
128970 +                break;   /* drop, to guarantee consistency (miss a little bit of compression) */
128971 +            }
128972 +        }
128974 +        if (match[matchLength] < ip[matchLength]) {
128975 +            if (dictMatchIndex <= btLow) { break; }   /* beyond tree size, stop the search */
128976 +            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
128977 +            dictMatchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
128978 +        } else {
128979 +            /* match is larger than current */
128980 +            if (dictMatchIndex <= btLow) { break; }   /* beyond tree size, stop the search */
128981 +            commonLengthLarger = matchLength;
128982 +            dictMatchIndex = nextPtr[0];
128983 +        }
128984 +    }
128986 +    if (bestLength >= MINMATCH) {
128987 +        U32 const mIndex = curr - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
128988 +        DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
128989 +                    curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
128990 +    }
128991 +    return bestLength;
128996 +static size_t
128997 +ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
128998 +                        const BYTE* const ip, const BYTE* const iend,
128999 +                        size_t* offsetPtr,
129000 +                        U32 const mls,
129001 +                        const ZSTD_dictMode_e dictMode)
129003 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
129004 +    U32*   const hashTable = ms->hashTable;
129005 +    U32    const hashLog = cParams->hashLog;
129006 +    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
129007 +    U32          matchIndex  = hashTable[h];
129009 +    const BYTE* const base = ms->window.base;
129010 +    U32    const curr = (U32)(ip-base);
129011 +    U32    const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
129013 +    U32*   const bt = ms->chainTable;
129014 +    U32    const btLog  = cParams->chainLog - 1;
129015 +    U32    const btMask = (1 << btLog) - 1;
129016 +    U32    const btLow = (btMask >= curr) ? 0 : curr - btMask;
129017 +    U32    const unsortLimit = MAX(btLow, windowLow);
129019 +    U32*         nextCandidate = bt + 2*(matchIndex&btMask);
129020 +    U32*         unsortedMark = bt + 2*(matchIndex&btMask) + 1;
129021 +    U32          nbCompares = 1U << cParams->searchLog;
129022 +    U32          nbCandidates = nbCompares;
129023 +    U32          previousCandidate = 0;
129025 +    DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", curr);
129026 +    assert(ip <= iend-8);   /* required for h calculation */
129027 +    assert(dictMode != ZSTD_dedicatedDictSearch);
129029 +    /* reach end of unsorted candidates list */
129030 +    while ( (matchIndex > unsortLimit)
129031 +         && (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK)
129032 +         && (nbCandidates > 1) ) {
129033 +        DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted",
129034 +                    matchIndex);
129035 +        *unsortedMark = previousCandidate;  /* the unsortedMark becomes a reversed chain, to move up back to original position */
129036 +        previousCandidate = matchIndex;
129037 +        matchIndex = *nextCandidate;
129038 +        nextCandidate = bt + 2*(matchIndex&btMask);
129039 +        unsortedMark = bt + 2*(matchIndex&btMask) + 1;
129040 +        nbCandidates --;
129041 +    }
129043 +    /* nullify last candidate if it's still unsorted
129044 +     * simplification, detrimental to compression ratio, beneficial for speed */
129045 +    if ( (matchIndex > unsortLimit)
129046 +      && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {
129047 +        DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u",
129048 +                    matchIndex);
129049 +        *nextCandidate = *unsortedMark = 0;
129050 +    }
129052 +    /* batch sort stacked candidates */
129053 +    matchIndex = previousCandidate;
129054 +    while (matchIndex) {  /* will end on matchIndex == 0 */
129055 +        U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;
129056 +        U32 const nextCandidateIdx = *nextCandidateIdxPtr;
129057 +        ZSTD_insertDUBT1(ms, matchIndex, iend,
129058 +                         nbCandidates, unsortLimit, dictMode);
129059 +        matchIndex = nextCandidateIdx;
129060 +        nbCandidates++;
129061 +    }
129063 +    /* find longest match */
129064 +    {   size_t commonLengthSmaller = 0, commonLengthLarger = 0;
129065 +        const BYTE* const dictBase = ms->window.dictBase;
129066 +        const U32 dictLimit = ms->window.dictLimit;
129067 +        const BYTE* const dictEnd = dictBase + dictLimit;
129068 +        const BYTE* const prefixStart = base + dictLimit;
129069 +        U32* smallerPtr = bt + 2*(curr&btMask);
129070 +        U32* largerPtr  = bt + 2*(curr&btMask) + 1;
129071 +        U32 matchEndIdx = curr + 8 + 1;
129072 +        U32 dummy32;   /* to be nullified at the end */
129073 +        size_t bestLength = 0;
129075 +        matchIndex  = hashTable[h];
129076 +        hashTable[h] = curr;   /* Update Hash Table */
129078 +        while (nbCompares-- && (matchIndex > windowLow)) {
129079 +            U32* const nextPtr = bt + 2*(matchIndex & btMask);
129080 +            size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
129081 +            const BYTE* match;
129083 +            if ((dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) {
129084 +                match = base + matchIndex;
129085 +                matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
129086 +            } else {
129087 +                match = dictBase + matchIndex;
129088 +                matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
129089 +                if (matchIndex+matchLength >= dictLimit)
129090 +                    match = base + matchIndex;   /* to prepare for next usage of match[matchLength] */
129091 +            }
129093 +            if (matchLength > bestLength) {
129094 +                if (matchLength > matchEndIdx - matchIndex)
129095 +                    matchEndIdx = matchIndex + (U32)matchLength;
129096 +                if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
129097 +                    bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
129098 +                if (ip+matchLength == iend) {   /* equal : no way to know if inf or sup */
129099 +                    if (dictMode == ZSTD_dictMatchState) {
129100 +                        nbCompares = 0; /* in addition to avoiding checking any
129101 +                                         * further in this loop, make sure we
129102 +                                         * skip checking in the dictionary. */
129103 +                    }
129104 +                    break;   /* drop, to guarantee consistency (miss a little bit of compression) */
129105 +                }
129106 +            }
129108 +            if (match[matchLength] < ip[matchLength]) {
129109 +                /* match is smaller than current */
129110 +                *smallerPtr = matchIndex;             /* update smaller idx */
129111 +                commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
129112 +                if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
129113 +                smallerPtr = nextPtr+1;               /* new "smaller" => larger of match */
129114 +                matchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
129115 +            } else {
129116 +                /* match is larger than current */
129117 +                *largerPtr = matchIndex;
129118 +                commonLengthLarger = matchLength;
129119 +                if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
129120 +                largerPtr = nextPtr;
129121 +                matchIndex = nextPtr[0];
129122 +        }   }
129124 +        *smallerPtr = *largerPtr = 0;
129126 +        if (dictMode == ZSTD_dictMatchState && nbCompares) {
129127 +            bestLength = ZSTD_DUBT_findBetterDictMatch(
129128 +                    ms, ip, iend,
129129 +                    offsetPtr, bestLength, nbCompares,
129130 +                    mls, dictMode);
129131 +        }
129133 +        assert(matchEndIdx > curr+8); /* ensure nextToUpdate is increased */
129134 +        ms->nextToUpdate = matchEndIdx - 8;   /* skip repetitive patterns */
129135 +        if (bestLength >= MINMATCH) {
129136 +            U32 const mIndex = curr - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
129137 +            DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
129138 +                        curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
129139 +        }
129140 +        return bestLength;
129141 +    }
129145 +/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
129146 +FORCE_INLINE_TEMPLATE size_t
129147 +ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
129148 +                const BYTE* const ip, const BYTE* const iLimit,
129149 +                      size_t* offsetPtr,
129150 +                const U32 mls /* template */,
129151 +                const ZSTD_dictMode_e dictMode)
129153 +    DEBUGLOG(7, "ZSTD_BtFindBestMatch");
129154 +    if (ip < ms->window.base + ms->nextToUpdate) return 0;   /* skipped area */
129155 +    ZSTD_updateDUBT(ms, ip, iLimit, mls);
129156 +    return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode);
129160 +static size_t
129161 +ZSTD_BtFindBestMatch_selectMLS (  ZSTD_matchState_t* ms,
129162 +                            const BYTE* ip, const BYTE* const iLimit,
129163 +                                  size_t* offsetPtr)
129165 +    switch(ms->cParams.minMatch)
129166 +    {
129167 +    default : /* includes case 3 */
129168 +    case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
129169 +    case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
129170 +    case 7 :
129171 +    case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
129172 +    }
129176 +static size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS (
129177 +                        ZSTD_matchState_t* ms,
129178 +                        const BYTE* ip, const BYTE* const iLimit,
129179 +                        size_t* offsetPtr)
129181 +    switch(ms->cParams.minMatch)
129182 +    {
129183 +    default : /* includes case 3 */
129184 +    case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
129185 +    case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
129186 +    case 7 :
129187 +    case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
129188 +    }
129192 +static size_t ZSTD_BtFindBestMatch_extDict_selectMLS (
129193 +                        ZSTD_matchState_t* ms,
129194 +                        const BYTE* ip, const BYTE* const iLimit,
129195 +                        size_t* offsetPtr)
129197 +    switch(ms->cParams.minMatch)
129198 +    {
129199 +    default : /* includes case 3 */
129200 +    case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
129201 +    case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
129202 +    case 7 :
129203 +    case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
129204 +    }
129209 +/* *********************************
129210 +*  Hash Chain
129211 +***********************************/
129212 +#define NEXT_IN_CHAIN(d, mask)   chainTable[(d) & (mask)]
129214 +/* Update chains up to ip (excluded)
129215 +   Assumption : always within prefix (i.e. not within extDict) */
129216 +FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
129217 +                        ZSTD_matchState_t* ms,
129218 +                        const ZSTD_compressionParameters* const cParams,
129219 +                        const BYTE* ip, U32 const mls)
129221 +    U32* const hashTable  = ms->hashTable;
129222 +    const U32 hashLog = cParams->hashLog;
129223 +    U32* const chainTable = ms->chainTable;
129224 +    const U32 chainMask = (1 << cParams->chainLog) - 1;
129225 +    const BYTE* const base = ms->window.base;
129226 +    const U32 target = (U32)(ip - base);
129227 +    U32 idx = ms->nextToUpdate;
129229 +    while(idx < target) { /* catch up */
129230 +        size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
129231 +        NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
129232 +        hashTable[h] = idx;
129233 +        idx++;
129234 +    }
129236 +    ms->nextToUpdate = target;
129237 +    return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
129240 +U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
129241 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
129242 +    return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch);
129245 +void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip)
129247 +    const BYTE* const base = ms->window.base;
129248 +    U32 const target = (U32)(ip - base);
129249 +    U32* const hashTable = ms->hashTable;
129250 +    U32* const chainTable = ms->chainTable;
129251 +    U32 const chainSize = 1 << ms->cParams.chainLog;
129252 +    U32 idx = ms->nextToUpdate;
129253 +    U32 const minChain = chainSize < target ? target - chainSize : idx;
129254 +    U32 const bucketSize = 1 << ZSTD_LAZY_DDSS_BUCKET_LOG;
129255 +    U32 const cacheSize = bucketSize - 1;
129256 +    U32 const chainAttempts = (1 << ms->cParams.searchLog) - cacheSize;
129257 +    U32 const chainLimit = chainAttempts > 255 ? 255 : chainAttempts;
129259 +    /* We know the hashtable is oversized by a factor of `bucketSize`.
129260 +     * We are going to temporarily pretend `bucketSize == 1`, keeping only a
129261 +     * single entry. We will use the rest of the space to construct a temporary
129262 +     * chaintable.
129263 +     */
129264 +    U32 const hashLog = ms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;
129265 +    U32* const tmpHashTable = hashTable;
129266 +    U32* const tmpChainTable = hashTable + ((size_t)1 << hashLog);
129267 +    U32 const tmpChainSize = ((1 << ZSTD_LAZY_DDSS_BUCKET_LOG) - 1) << hashLog;
129268 +    U32 const tmpMinChain = tmpChainSize < target ? target - tmpChainSize : idx;
129270 +    U32 hashIdx;
129272 +    assert(ms->cParams.chainLog <= 24);
129273 +    assert(ms->cParams.hashLog >= ms->cParams.chainLog);
129274 +    assert(idx != 0);
129275 +    assert(tmpMinChain <= minChain);
129277 +    /* fill conventional hash table and conventional chain table */
129278 +    for ( ; idx < target; idx++) {
129279 +        U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch);
129280 +        if (idx >= tmpMinChain) {
129281 +            tmpChainTable[idx - tmpMinChain] = hashTable[h];
129282 +        }
129283 +        tmpHashTable[h] = idx;
129284 +    }
129286 +    /* sort chains into ddss chain table */
129287 +    {
129288 +        U32 chainPos = 0;
129289 +        for (hashIdx = 0; hashIdx < (1U << hashLog); hashIdx++) {
129290 +            U32 count;
129291 +            U32 countBeyondMinChain = 0;
129292 +            U32 i = tmpHashTable[hashIdx];
129293 +            for (count = 0; i >= tmpMinChain && count < cacheSize; count++) {
129294 +                /* skip through the chain to the first position that won't be
129295 +                 * in the hash cache bucket */
129296 +                if (i < minChain) {
129297 +                    countBeyondMinChain++;
129298 +                }
129299 +                i = tmpChainTable[i - tmpMinChain];
129300 +            }
129301 +            if (count == cacheSize) {
129302 +                for (count = 0; count < chainLimit;) {
129303 +                    if (i < minChain) {
129304 +                        if (!i || countBeyondMinChain++ > cacheSize) {
129305 +                            /* only allow pulling `cacheSize` number of entries
129306 +                             * into the cache or chainTable beyond `minChain`,
129307 +                             * to replace the entries pulled out of the
129308 +                             * chainTable into the cache. This lets us reach
129309 +                             * back further without increasing the total number
129310 +                             * of entries in the chainTable, guaranteeing the
129311 +                             * DDSS chain table will fit into the space
129312 +                             * allocated for the regular one. */
129313 +                            break;
129314 +                        }
129315 +                    }
129316 +                    chainTable[chainPos++] = i;
129317 +                    count++;
129318 +                    if (i < tmpMinChain) {
129319 +                        break;
129320 +                    }
129321 +                    i = tmpChainTable[i - tmpMinChain];
129322 +                }
129323 +            } else {
129324 +                count = 0;
129325 +            }
129326 +            if (count) {
129327 +                tmpHashTable[hashIdx] = ((chainPos - count) << 8) + count;
129328 +            } else {
129329 +                tmpHashTable[hashIdx] = 0;
129330 +            }
129331 +        }
129332 +        assert(chainPos <= chainSize); /* I believe this is guaranteed... */
129333 +    }
129335 +    /* move chain pointers into the last entry of each hash bucket */
129336 +    for (hashIdx = (1 << hashLog); hashIdx; ) {
129337 +        U32 const bucketIdx = --hashIdx << ZSTD_LAZY_DDSS_BUCKET_LOG;
129338 +        U32 const chainPackedPointer = tmpHashTable[hashIdx];
129339 +        U32 i;
129340 +        for (i = 0; i < cacheSize; i++) {
129341 +            hashTable[bucketIdx + i] = 0;
129342 +        }
129343 +        hashTable[bucketIdx + bucketSize - 1] = chainPackedPointer;
129344 +    }
129346 +    /* fill the buckets of the hash table */
129347 +    for (idx = ms->nextToUpdate; idx < target; idx++) {
129348 +        U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch)
129349 +                   << ZSTD_LAZY_DDSS_BUCKET_LOG;
129350 +        U32 i;
129351 +        /* Shift hash cache down 1. */
129352 +        for (i = cacheSize - 1; i; i--)
129353 +            hashTable[h + i] = hashTable[h + i - 1];
129354 +        hashTable[h] = idx;
129355 +    }
129357 +    ms->nextToUpdate = target;
129361 +/* inlining is important to hardwire a hot branch (template emulation) */
129362 +FORCE_INLINE_TEMPLATE
129363 +size_t ZSTD_HcFindBestMatch_generic (
129364 +                        ZSTD_matchState_t* ms,
129365 +                        const BYTE* const ip, const BYTE* const iLimit,
129366 +                        size_t* offsetPtr,
129367 +                        const U32 mls, const ZSTD_dictMode_e dictMode)
129369 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
129370 +    U32* const chainTable = ms->chainTable;
129371 +    const U32 chainSize = (1 << cParams->chainLog);
129372 +    const U32 chainMask = chainSize-1;
129373 +    const BYTE* const base = ms->window.base;
129374 +    const BYTE* const dictBase = ms->window.dictBase;
129375 +    const U32 dictLimit = ms->window.dictLimit;
129376 +    const BYTE* const prefixStart = base + dictLimit;
129377 +    const BYTE* const dictEnd = dictBase + dictLimit;
129378 +    const U32 curr = (U32)(ip-base);
129379 +    const U32 maxDistance = 1U << cParams->windowLog;
129380 +    const U32 lowestValid = ms->window.lowLimit;
129381 +    const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
129382 +    const U32 isDictionary = (ms->loadedDictEnd != 0);
129383 +    const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
129384 +    const U32 minChain = curr > chainSize ? curr - chainSize : 0;
129385 +    U32 nbAttempts = 1U << cParams->searchLog;
129386 +    size_t ml=4-1;
129388 +    const ZSTD_matchState_t* const dms = ms->dictMatchState;
129389 +    const U32 ddsHashLog = dictMode == ZSTD_dedicatedDictSearch
129390 +                         ? dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
129391 +    const size_t ddsIdx = dictMode == ZSTD_dedicatedDictSearch
129392 +                        ? ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
129394 +    U32 matchIndex;
129396 +    if (dictMode == ZSTD_dedicatedDictSearch) {
129397 +        const U32* entry = &dms->hashTable[ddsIdx];
129398 +        PREFETCH_L1(entry);
129399 +    }
129401 +    /* HC4 match finder */
129402 +    matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
129404 +    for ( ; (matchIndex>=lowLimit) & (nbAttempts>0) ; nbAttempts--) {
129405 +        size_t currentMl=0;
129406 +        if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
129407 +            const BYTE* const match = base + matchIndex;
129408 +            assert(matchIndex >= dictLimit);   /* ensures this is true if dictMode != ZSTD_extDict */
129409 +            if (match[ml] == ip[ml])   /* potentially better */
129410 +                currentMl = ZSTD_count(ip, match, iLimit);
129411 +        } else {
129412 +            const BYTE* const match = dictBase + matchIndex;
129413 +            assert(match+4 <= dictEnd);
129414 +            if (MEM_read32(match) == MEM_read32(ip))   /* assumption : matchIndex <= dictLimit-4 (by table construction) */
129415 +                currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
129416 +        }
129418 +        /* save best solution */
129419 +        if (currentMl > ml) {
129420 +            ml = currentMl;
129421 +            *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE;
129422 +            if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
129423 +        }
129425 +        if (matchIndex <= minChain) break;
129426 +        matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
129427 +    }
129429 +    if (dictMode == ZSTD_dedicatedDictSearch) {
129430 +        const U32 ddsLowestIndex  = dms->window.dictLimit;
129431 +        const BYTE* const ddsBase = dms->window.base;
129432 +        const BYTE* const ddsEnd  = dms->window.nextSrc;
129433 +        const U32 ddsSize         = (U32)(ddsEnd - ddsBase);
129434 +        const U32 ddsIndexDelta   = dictLimit - ddsSize;
129435 +        const U32 bucketSize      = (1 << ZSTD_LAZY_DDSS_BUCKET_LOG);
129436 +        const U32 bucketLimit     = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1;
129437 +        U32 ddsAttempt;
129439 +        for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) {
129440 +            PREFETCH_L1(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]);
129441 +        }
129443 +        {
129444 +            U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
129445 +            U32 const chainIndex = chainPackedPointer >> 8;
129447 +            PREFETCH_L1(&dms->chainTable[chainIndex]);
129448 +        }
129450 +        for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) {
129451 +            size_t currentMl=0;
129452 +            const BYTE* match;
129453 +            matchIndex = dms->hashTable[ddsIdx + ddsAttempt];
129454 +            match = ddsBase + matchIndex;
129456 +            if (!matchIndex) {
129457 +                return ml;
129458 +            }
129460 +            /* guaranteed by table construction */
129461 +            (void)ddsLowestIndex;
129462 +            assert(matchIndex >= ddsLowestIndex);
129463 +            assert(match+4 <= ddsEnd);
129464 +            if (MEM_read32(match) == MEM_read32(ip)) {
129465 +                /* assumption : matchIndex <= dictLimit-4 (by table construction) */
129466 +                currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
129467 +            }
129469 +            /* save best solution */
129470 +            if (currentMl > ml) {
129471 +                ml = currentMl;
129472 +                *offsetPtr = curr - (matchIndex + ddsIndexDelta) + ZSTD_REP_MOVE;
129473 +                if (ip+currentMl == iLimit) {
129474 +                    /* best possible, avoids read overflow on next attempt */
129475 +                    return ml;
129476 +                }
129477 +            }
129478 +        }
129480 +        {
129481 +            U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
129482 +            U32 chainIndex = chainPackedPointer >> 8;
129483 +            U32 const chainLength = chainPackedPointer & 0xFF;
129484 +            U32 const chainAttempts = nbAttempts - ddsAttempt;
129485 +            U32 const chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts;
129486 +            U32 chainAttempt;
129488 +            for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++) {
129489 +                PREFETCH_L1(ddsBase + dms->chainTable[chainIndex + chainAttempt]);
129490 +            }
129492 +            for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++, chainIndex++) {
129493 +                size_t currentMl=0;
129494 +                const BYTE* match;
129495 +                matchIndex = dms->chainTable[chainIndex];
129496 +                match = ddsBase + matchIndex;
129498 +                /* guaranteed by table construction */
129499 +                assert(matchIndex >= ddsLowestIndex);
129500 +                assert(match+4 <= ddsEnd);
129501 +                if (MEM_read32(match) == MEM_read32(ip)) {
129502 +                    /* assumption : matchIndex <= dictLimit-4 (by table construction) */
129503 +                    currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
129504 +                }
129506 +                /* save best solution */
129507 +                if (currentMl > ml) {
129508 +                    ml = currentMl;
129509 +                    *offsetPtr = curr - (matchIndex + ddsIndexDelta) + ZSTD_REP_MOVE;
129510 +                    if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
129511 +                }
129512 +            }
129513 +        }
129514 +    } else if (dictMode == ZSTD_dictMatchState) {
129515 +        const U32* const dmsChainTable = dms->chainTable;
129516 +        const U32 dmsChainSize         = (1 << dms->cParams.chainLog);
129517 +        const U32 dmsChainMask         = dmsChainSize - 1;
129518 +        const U32 dmsLowestIndex       = dms->window.dictLimit;
129519 +        const BYTE* const dmsBase      = dms->window.base;
129520 +        const BYTE* const dmsEnd       = dms->window.nextSrc;
129521 +        const U32 dmsSize              = (U32)(dmsEnd - dmsBase);
129522 +        const U32 dmsIndexDelta        = dictLimit - dmsSize;
129523 +        const U32 dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0;
129525 +        matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)];
129527 +        for ( ; (matchIndex>=dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) {
129528 +            size_t currentMl=0;
129529 +            const BYTE* const match = dmsBase + matchIndex;
129530 +            assert(match+4 <= dmsEnd);
129531 +            if (MEM_read32(match) == MEM_read32(ip))   /* assumption : matchIndex <= dictLimit-4 (by table construction) */
129532 +                currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
129534 +            /* save best solution */
129535 +            if (currentMl > ml) {
129536 +                ml = currentMl;
129537 +                *offsetPtr = curr - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE;
129538 +                if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
129539 +            }
129541 +            if (matchIndex <= dmsMinChain) break;
129543 +            matchIndex = dmsChainTable[matchIndex & dmsChainMask];
129544 +        }
129545 +    }
129547 +    return ml;
129551 +FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
129552 +                        ZSTD_matchState_t* ms,
129553 +                        const BYTE* ip, const BYTE* const iLimit,
129554 +                        size_t* offsetPtr)
129556 +    switch(ms->cParams.minMatch)
129557 +    {
129558 +    default : /* includes case 3 */
129559 +    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
129560 +    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
129561 +    case 7 :
129562 +    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
129563 +    }
129567 +static size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS (
129568 +                        ZSTD_matchState_t* ms,
129569 +                        const BYTE* ip, const BYTE* const iLimit,
129570 +                        size_t* offsetPtr)
129572 +    switch(ms->cParams.minMatch)
129573 +    {
129574 +    default : /* includes case 3 */
129575 +    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
129576 +    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
129577 +    case 7 :
129578 +    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
129579 +    }
129583 +static size_t ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS (
129584 +                        ZSTD_matchState_t* ms,
129585 +                        const BYTE* ip, const BYTE* const iLimit,
129586 +                        size_t* offsetPtr)
129588 +    switch(ms->cParams.minMatch)
129589 +    {
129590 +    default : /* includes case 3 */
129591 +    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dedicatedDictSearch);
129592 +    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dedicatedDictSearch);
129593 +    case 7 :
129594 +    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dedicatedDictSearch);
129595 +    }
129599 +FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
129600 +                        ZSTD_matchState_t* ms,
129601 +                        const BYTE* ip, const BYTE* const iLimit,
129602 +                        size_t* offsetPtr)
129604 +    switch(ms->cParams.minMatch)
129605 +    {
129606 +    default : /* includes case 3 */
129607 +    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
129608 +    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
129609 +    case 7 :
129610 +    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
129611 +    }
129615 +/* *******************************
129616 +*  Common parser - lazy strategy
129617 +*********************************/
129618 +typedef enum { search_hashChain, search_binaryTree } searchMethod_e;
129620 +FORCE_INLINE_TEMPLATE size_t
129621 +ZSTD_compressBlock_lazy_generic(
129622 +                        ZSTD_matchState_t* ms, seqStore_t* seqStore,
129623 +                        U32 rep[ZSTD_REP_NUM],
129624 +                        const void* src, size_t srcSize,
129625 +                        const searchMethod_e searchMethod, const U32 depth,
129626 +                        ZSTD_dictMode_e const dictMode)
129628 +    const BYTE* const istart = (const BYTE*)src;
129629 +    const BYTE* ip = istart;
129630 +    const BYTE* anchor = istart;
129631 +    const BYTE* const iend = istart + srcSize;
129632 +    const BYTE* const ilimit = iend - 8;
129633 +    const BYTE* const base = ms->window.base;
129634 +    const U32 prefixLowestIndex = ms->window.dictLimit;
129635 +    const BYTE* const prefixLowest = base + prefixLowestIndex;
129637 +    typedef size_t (*searchMax_f)(
129638 +                        ZSTD_matchState_t* ms,
129639 +                        const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
129641 +    /**
129642 +     * This table is indexed first by the four ZSTD_dictMode_e values, and then
129643 +     * by the two searchMethod_e values. NULLs are placed for configurations
129644 +     * that should never occur (extDict modes go to the other implementation
129645 +     * below and there is no DDSS for binary tree search yet).
129646 +     */
129647 +    const searchMax_f searchFuncs[4][2] = {
129648 +        {
129649 +            ZSTD_HcFindBestMatch_selectMLS,
129650 +            ZSTD_BtFindBestMatch_selectMLS
129651 +        },
129652 +        {
129653 +            NULL,
129654 +            NULL
129655 +        },
129656 +        {
129657 +            ZSTD_HcFindBestMatch_dictMatchState_selectMLS,
129658 +            ZSTD_BtFindBestMatch_dictMatchState_selectMLS
129659 +        },
129660 +        {
129661 +            ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS,
129662 +            NULL
129663 +        }
129664 +    };
129666 +    searchMax_f const searchMax = searchFuncs[dictMode][searchMethod == search_binaryTree];
129667 +    U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
129669 +    const int isDMS = dictMode == ZSTD_dictMatchState;
129670 +    const int isDDS = dictMode == ZSTD_dedicatedDictSearch;
129671 +    const int isDxS = isDMS || isDDS;
129672 +    const ZSTD_matchState_t* const dms = ms->dictMatchState;
129673 +    const U32 dictLowestIndex      = isDxS ? dms->window.dictLimit : 0;
129674 +    const BYTE* const dictBase     = isDxS ? dms->window.base : NULL;
129675 +    const BYTE* const dictLowest   = isDxS ? dictBase + dictLowestIndex : NULL;
129676 +    const BYTE* const dictEnd      = isDxS ? dms->window.nextSrc : NULL;
129677 +    const U32 dictIndexDelta       = isDxS ?
129678 +                                     prefixLowestIndex - (U32)(dictEnd - dictBase) :
129679 +                                     0;
129680 +    const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest));
129682 +    assert(searchMax != NULL);
129684 +    DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u)", (U32)dictMode);
129686 +    /* init */
129687 +    ip += (dictAndPrefixLength == 0);
129688 +    if (dictMode == ZSTD_noDict) {
129689 +        U32 const curr = (U32)(ip - base);
129690 +        U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog);
129691 +        U32 const maxRep = curr - windowLow;
129692 +        if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
129693 +        if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
129694 +    }
129695 +    if (isDxS) {
129696 +        /* dictMatchState repCode checks don't currently handle repCode == 0
129697 +         * disabling. */
129698 +        assert(offset_1 <= dictAndPrefixLength);
129699 +        assert(offset_2 <= dictAndPrefixLength);
129700 +    }
129702 +    /* Match Loop */
129703 +#if defined(__x86_64__)
129704 +    /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
129705 +     * code alignment is perturbed. To fix the instability align the loop on 32-bytes.
129706 +     */
129707 +    __asm__(".p2align 5");
129708 +#endif
129709 +    while (ip < ilimit) {
129710 +        size_t matchLength=0;
129711 +        size_t offset=0;
129712 +        const BYTE* start=ip+1;
129714 +        /* check repCode */
129715 +        if (isDxS) {
129716 +            const U32 repIndex = (U32)(ip - base) + 1 - offset_1;
129717 +            const BYTE* repMatch = ((dictMode == ZSTD_dictMatchState || dictMode == ZSTD_dedicatedDictSearch)
129718 +                                && repIndex < prefixLowestIndex) ?
129719 +                                   dictBase + (repIndex - dictIndexDelta) :
129720 +                                   base + repIndex;
129721 +            if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
129722 +                && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
129723 +                const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
129724 +                matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
129725 +                if (depth==0) goto _storeSequence;
129726 +            }
129727 +        }
129728 +        if ( dictMode == ZSTD_noDict
129729 +          && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
129730 +            matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
129731 +            if (depth==0) goto _storeSequence;
129732 +        }
129734 +        /* first search (depth 0) */
129735 +        {   size_t offsetFound = 999999999;
129736 +            size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
129737 +            if (ml2 > matchLength)
129738 +                matchLength = ml2, start = ip, offset=offsetFound;
129739 +        }
129741 +        if (matchLength < 4) {
129742 +            ip += ((ip-anchor) >> kSearchStrength) + 1;   /* jump faster over incompressible sections */
129743 +            continue;
129744 +        }
129746 +        /* let's try to find a better solution */
129747 +        if (depth>=1)
129748 +        while (ip<ilimit) {
129749 +            ip ++;
129750 +            if ( (dictMode == ZSTD_noDict)
129751 +              && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
129752 +                size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
129753 +                int const gain2 = (int)(mlRep * 3);
129754 +                int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
129755 +                if ((mlRep >= 4) && (gain2 > gain1))
129756 +                    matchLength = mlRep, offset = 0, start = ip;
129757 +            }
129758 +            if (isDxS) {
129759 +                const U32 repIndex = (U32)(ip - base) - offset_1;
129760 +                const BYTE* repMatch = repIndex < prefixLowestIndex ?
129761 +                               dictBase + (repIndex - dictIndexDelta) :
129762 +                               base + repIndex;
129763 +                if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
129764 +                    && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
129765 +                    const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
129766 +                    size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
129767 +                    int const gain2 = (int)(mlRep * 3);
129768 +                    int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
129769 +                    if ((mlRep >= 4) && (gain2 > gain1))
129770 +                        matchLength = mlRep, offset = 0, start = ip;
129771 +                }
129772 +            }
129773 +            {   size_t offset2=999999999;
129774 +                size_t const ml2 = searchMax(ms, ip, iend, &offset2);
129775 +                int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
129776 +                int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
129777 +                if ((ml2 >= 4) && (gain2 > gain1)) {
129778 +                    matchLength = ml2, offset = offset2, start = ip;
129779 +                    continue;   /* search a better one */
129780 +            }   }
129782 +            /* let's find an even better one */
129783 +            if ((depth==2) && (ip<ilimit)) {
129784 +                ip ++;
129785 +                if ( (dictMode == ZSTD_noDict)
129786 +                  && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
129787 +                    size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
129788 +                    int const gain2 = (int)(mlRep * 4);
129789 +                    int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
129790 +                    if ((mlRep >= 4) && (gain2 > gain1))
129791 +                        matchLength = mlRep, offset = 0, start = ip;
129792 +                }
129793 +                if (isDxS) {
129794 +                    const U32 repIndex = (U32)(ip - base) - offset_1;
129795 +                    const BYTE* repMatch = repIndex < prefixLowestIndex ?
129796 +                                   dictBase + (repIndex - dictIndexDelta) :
129797 +                                   base + repIndex;
129798 +                    if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
129799 +                        && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
129800 +                        const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
129801 +                        size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
129802 +                        int const gain2 = (int)(mlRep * 4);
129803 +                        int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
129804 +                        if ((mlRep >= 4) && (gain2 > gain1))
129805 +                            matchLength = mlRep, offset = 0, start = ip;
129806 +                    }
129807 +                }
129808 +                {   size_t offset2=999999999;
129809 +                    size_t const ml2 = searchMax(ms, ip, iend, &offset2);
129810 +                    int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
129811 +                    int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
129812 +                    if ((ml2 >= 4) && (gain2 > gain1)) {
129813 +                        matchLength = ml2, offset = offset2, start = ip;
129814 +                        continue;
129815 +            }   }   }
129816 +            break;  /* nothing found : store previous solution */
129817 +        }
129819 +        /* NOTE:
129820 +         * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
129821 +         * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
129822 +         * overflows the pointer, which is undefined behavior.
129823 +         */
129824 +        /* catch up */
129825 +        if (offset) {
129826 +            if (dictMode == ZSTD_noDict) {
129827 +                while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > prefixLowest))
129828 +                     && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) )  /* only search for offset within prefix */
129829 +                    { start--; matchLength++; }
129830 +            }
129831 +            if (isDxS) {
129832 +                U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
129833 +                const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex;
129834 +                const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest;
129835 +                while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; }  /* catch up */
129836 +            }
129837 +            offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
129838 +        }
129839 +        /* store sequence */
129840 +_storeSequence:
129841 +        {   size_t const litLength = start - anchor;
129842 +            ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
129843 +            anchor = ip = start + matchLength;
129844 +        }
129846 +        /* check immediate repcode */
129847 +        if (isDxS) {
129848 +            while (ip <= ilimit) {
129849 +                U32 const current2 = (U32)(ip-base);
129850 +                U32 const repIndex = current2 - offset_2;
129851 +                const BYTE* repMatch = repIndex < prefixLowestIndex ?
129852 +                        dictBase - dictIndexDelta + repIndex :
129853 +                        base + repIndex;
129854 +                if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */)
129855 +                   && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
129856 +                    const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
129857 +                    matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
129858 +                    offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset;   /* swap offset_2 <=> offset_1 */
129859 +                    ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
129860 +                    ip += matchLength;
129861 +                    anchor = ip;
129862 +                    continue;
129863 +                }
129864 +                break;
129865 +            }
129866 +        }
129868 +        if (dictMode == ZSTD_noDict) {
129869 +            while ( ((ip <= ilimit) & (offset_2>0))
129870 +                 && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
129871 +                /* store sequence */
129872 +                matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
129873 +                offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
129874 +                ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
129875 +                ip += matchLength;
129876 +                anchor = ip;
129877 +                continue;   /* faster when present ... (?) */
129878 +    }   }   }
129880 +    /* Save reps for next block */
129881 +    rep[0] = offset_1 ? offset_1 : savedOffset;
129882 +    rep[1] = offset_2 ? offset_2 : savedOffset;
129884 +    /* Return the last literals size */
129885 +    return (size_t)(iend - anchor);
129889 +size_t ZSTD_compressBlock_btlazy2(
129890 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129891 +        void const* src, size_t srcSize)
129893 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
129896 +size_t ZSTD_compressBlock_lazy2(
129897 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129898 +        void const* src, size_t srcSize)
129900 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
129903 +size_t ZSTD_compressBlock_lazy(
129904 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129905 +        void const* src, size_t srcSize)
129907 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
129910 +size_t ZSTD_compressBlock_greedy(
129911 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129912 +        void const* src, size_t srcSize)
129914 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
129917 +size_t ZSTD_compressBlock_btlazy2_dictMatchState(
129918 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129919 +        void const* src, size_t srcSize)
129921 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
129924 +size_t ZSTD_compressBlock_lazy2_dictMatchState(
129925 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129926 +        void const* src, size_t srcSize)
129928 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);
129931 +size_t ZSTD_compressBlock_lazy_dictMatchState(
129932 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129933 +        void const* src, size_t srcSize)
129935 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
129938 +size_t ZSTD_compressBlock_greedy_dictMatchState(
129939 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129940 +        void const* src, size_t srcSize)
129942 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
129946 +size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
129947 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129948 +        void const* src, size_t srcSize)
129950 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch);
129953 +size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
129954 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129955 +        void const* src, size_t srcSize)
129957 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch);
129960 +size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
129961 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129962 +        void const* src, size_t srcSize)
129964 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch);
129968 +FORCE_INLINE_TEMPLATE
129969 +size_t ZSTD_compressBlock_lazy_extDict_generic(
129970 +                        ZSTD_matchState_t* ms, seqStore_t* seqStore,
129971 +                        U32 rep[ZSTD_REP_NUM],
129972 +                        const void* src, size_t srcSize,
129973 +                        const searchMethod_e searchMethod, const U32 depth)
129975 +    const BYTE* const istart = (const BYTE*)src;
129976 +    const BYTE* ip = istart;
129977 +    const BYTE* anchor = istart;
129978 +    const BYTE* const iend = istart + srcSize;
129979 +    const BYTE* const ilimit = iend - 8;
129980 +    const BYTE* const base = ms->window.base;
129981 +    const U32 dictLimit = ms->window.dictLimit;
129982 +    const BYTE* const prefixStart = base + dictLimit;
129983 +    const BYTE* const dictBase = ms->window.dictBase;
129984 +    const BYTE* const dictEnd  = dictBase + dictLimit;
129985 +    const BYTE* const dictStart  = dictBase + ms->window.lowLimit;
129986 +    const U32 windowLog = ms->cParams.windowLog;
129988 +    typedef size_t (*searchMax_f)(
129989 +                        ZSTD_matchState_t* ms,
129990 +                        const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
129991 +    searchMax_f searchMax = searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
129993 +    U32 offset_1 = rep[0], offset_2 = rep[1];
129995 +    DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic");
129997 +    /* init */
129998 +    ip += (ip == prefixStart);
130000 +    /* Match Loop */
130001 +#if defined(__x86_64__)
130002 +    /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
130003 +     * code alignment is perturbed. To fix the instability align the loop on 32-bytes.
130004 +     */
130005 +    __asm__(".p2align 5");
130006 +#endif
130007 +    while (ip < ilimit) {
130008 +        size_t matchLength=0;
130009 +        size_t offset=0;
130010 +        const BYTE* start=ip+1;
130011 +        U32 curr = (U32)(ip-base);
130013 +        /* check repCode */
130014 +        {   const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr+1, windowLog);
130015 +            const U32 repIndex = (U32)(curr+1 - offset_1);
130016 +            const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
130017 +            const BYTE* const repMatch = repBase + repIndex;
130018 +            if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow))   /* intentional overflow */
130019 +            if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
130020 +                /* repcode detected we should take it */
130021 +                const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
130022 +                matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;
130023 +                if (depth==0) goto _storeSequence;
130024 +        }   }
130026 +        /* first search (depth 0) */
130027 +        {   size_t offsetFound = 999999999;
130028 +            size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
130029 +            if (ml2 > matchLength)
130030 +                matchLength = ml2, start = ip, offset=offsetFound;
130031 +        }
130033 +         if (matchLength < 4) {
130034 +            ip += ((ip-anchor) >> kSearchStrength) + 1;   /* jump faster over incompressible sections */
130035 +            continue;
130036 +        }
130038 +        /* let's try to find a better solution */
130039 +        if (depth>=1)
130040 +        while (ip<ilimit) {
130041 +            ip ++;
130042 +            curr++;
130043 +            /* check repCode */
130044 +            if (offset) {
130045 +                const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
130046 +                const U32 repIndex = (U32)(curr - offset_1);
130047 +                const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
130048 +                const BYTE* const repMatch = repBase + repIndex;
130049 +                if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow))  /* intentional overflow */
130050 +                if (MEM_read32(ip) == MEM_read32(repMatch)) {
130051 +                    /* repcode detected */
130052 +                    const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
130053 +                    size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
130054 +                    int const gain2 = (int)(repLength * 3);
130055 +                    int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
130056 +                    if ((repLength >= 4) && (gain2 > gain1))
130057 +                        matchLength = repLength, offset = 0, start = ip;
130058 +            }   }
130060 +            /* search match, depth 1 */
130061 +            {   size_t offset2=999999999;
130062 +                size_t const ml2 = searchMax(ms, ip, iend, &offset2);
130063 +                int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
130064 +                int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
130065 +                if ((ml2 >= 4) && (gain2 > gain1)) {
130066 +                    matchLength = ml2, offset = offset2, start = ip;
130067 +                    continue;   /* search a better one */
130068 +            }   }
130070 +            /* let's find an even better one */
130071 +            if ((depth==2) && (ip<ilimit)) {
130072 +                ip ++;
130073 +                curr++;
130074 +                /* check repCode */
130075 +                if (offset) {
130076 +                    const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
130077 +                    const U32 repIndex = (U32)(curr - offset_1);
130078 +                    const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
130079 +                    const BYTE* const repMatch = repBase + repIndex;
130080 +                    if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow))  /* intentional overflow */
130081 +                    if (MEM_read32(ip) == MEM_read32(repMatch)) {
130082 +                        /* repcode detected */
130083 +                        const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
130084 +                        size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
130085 +                        int const gain2 = (int)(repLength * 4);
130086 +                        int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
130087 +                        if ((repLength >= 4) && (gain2 > gain1))
130088 +                            matchLength = repLength, offset = 0, start = ip;
130089 +                }   }
130091 +                /* search match, depth 2 */
130092 +                {   size_t offset2=999999999;
130093 +                    size_t const ml2 = searchMax(ms, ip, iend, &offset2);
130094 +                    int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
130095 +                    int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
130096 +                    if ((ml2 >= 4) && (gain2 > gain1)) {
130097 +                        matchLength = ml2, offset = offset2, start = ip;
130098 +                        continue;
130099 +            }   }   }
130100 +            break;  /* nothing found : store previous solution */
130101 +        }
130103 +        /* catch up */
130104 +        if (offset) {
130105 +            U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
130106 +            const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
130107 +            const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
130108 +            while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; }  /* catch up */
130109 +            offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
130110 +        }
130112 +        /* store sequence */
130113 +_storeSequence:
130114 +        {   size_t const litLength = start - anchor;
130115 +            ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
130116 +            anchor = ip = start + matchLength;
130117 +        }
130119 +        /* check immediate repcode */
130120 +        while (ip <= ilimit) {
130121 +            const U32 repCurrent = (U32)(ip-base);
130122 +            const U32 windowLow = ZSTD_getLowestMatchIndex(ms, repCurrent, windowLog);
130123 +            const U32 repIndex = repCurrent - offset_2;
130124 +            const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
130125 +            const BYTE* const repMatch = repBase + repIndex;
130126 +            if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow))  /* intentional overflow */
130127 +            if (MEM_read32(ip) == MEM_read32(repMatch)) {
130128 +                /* repcode detected we should take it */
130129 +                const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
130130 +                matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
130131 +                offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset;   /* swap offset history */
130132 +                ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
130133 +                ip += matchLength;
130134 +                anchor = ip;
130135 +                continue;   /* faster when present ... (?) */
130136 +            }
130137 +            break;
130138 +    }   }
130140 +    /* Save reps for next block */
130141 +    rep[0] = offset_1;
130142 +    rep[1] = offset_2;
130144 +    /* Return the last literals size */
130145 +    return (size_t)(iend - anchor);
130149 +size_t ZSTD_compressBlock_greedy_extDict(
130150 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130151 +        void const* src, size_t srcSize)
130153 +    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0);
130156 +size_t ZSTD_compressBlock_lazy_extDict(
130157 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130158 +        void const* src, size_t srcSize)
130161 +    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);
130164 +size_t ZSTD_compressBlock_lazy2_extDict(
130165 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130166 +        void const* src, size_t srcSize)
130169 +    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);
130172 +size_t ZSTD_compressBlock_btlazy2_extDict(
130173 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130174 +        void const* src, size_t srcSize)
130177 +    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
130179 diff --git a/lib/zstd/compress/zstd_lazy.h b/lib/zstd/compress/zstd_lazy.h
130180 new file mode 100644
130181 index 000000000000..1fb7621e6a88
130182 --- /dev/null
130183 +++ b/lib/zstd/compress/zstd_lazy.h
130184 @@ -0,0 +1,81 @@
130186 + * Copyright (c) Yann Collet, Facebook, Inc.
130187 + * All rights reserved.
130189 + * This source code is licensed under both the BSD-style license (found in the
130190 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
130191 + * in the COPYING file in the root directory of this source tree).
130192 + * You may select, at your option, one of the above-listed licenses.
130193 + */
130195 +#ifndef ZSTD_LAZY_H
130196 +#define ZSTD_LAZY_H
130199 +#include "zstd_compress_internal.h"
130202 + * Dedicated Dictionary Search Structure bucket log. In the
130203 + * ZSTD_dedicatedDictSearch mode, the hashTable has
130204 + * 2 ** ZSTD_LAZY_DDSS_BUCKET_LOG entries in each bucket, rather than just
130205 + * one.
130206 + */
130207 +#define ZSTD_LAZY_DDSS_BUCKET_LOG 2
130209 +U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
130211 +void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip);
130213 +void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue);  /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
130215 +size_t ZSTD_compressBlock_btlazy2(
130216 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130217 +        void const* src, size_t srcSize);
130218 +size_t ZSTD_compressBlock_lazy2(
130219 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130220 +        void const* src, size_t srcSize);
130221 +size_t ZSTD_compressBlock_lazy(
130222 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130223 +        void const* src, size_t srcSize);
130224 +size_t ZSTD_compressBlock_greedy(
130225 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130226 +        void const* src, size_t srcSize);
130228 +size_t ZSTD_compressBlock_btlazy2_dictMatchState(
130229 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130230 +        void const* src, size_t srcSize);
130231 +size_t ZSTD_compressBlock_lazy2_dictMatchState(
130232 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130233 +        void const* src, size_t srcSize);
130234 +size_t ZSTD_compressBlock_lazy_dictMatchState(
130235 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130236 +        void const* src, size_t srcSize);
130237 +size_t ZSTD_compressBlock_greedy_dictMatchState(
130238 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130239 +        void const* src, size_t srcSize);
130241 +size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
130242 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130243 +        void const* src, size_t srcSize);
130244 +size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
130245 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130246 +        void const* src, size_t srcSize);
130247 +size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
130248 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130249 +        void const* src, size_t srcSize);
130251 +size_t ZSTD_compressBlock_greedy_extDict(
130252 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130253 +        void const* src, size_t srcSize);
130254 +size_t ZSTD_compressBlock_lazy_extDict(
130255 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130256 +        void const* src, size_t srcSize);
130257 +size_t ZSTD_compressBlock_lazy2_extDict(
130258 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130259 +        void const* src, size_t srcSize);
130260 +size_t ZSTD_compressBlock_btlazy2_extDict(
130261 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130262 +        void const* src, size_t srcSize);
130265 +#endif /* ZSTD_LAZY_H */
130266 diff --git a/lib/zstd/compress/zstd_ldm.c b/lib/zstd/compress/zstd_ldm.c
130267 new file mode 100644
130268 index 000000000000..084fd24fdca8
130269 --- /dev/null
130270 +++ b/lib/zstd/compress/zstd_ldm.c
130271 @@ -0,0 +1,686 @@
130273 + * Copyright (c) Yann Collet, Facebook, Inc.
130274 + * All rights reserved.
130276 + * This source code is licensed under both the BSD-style license (found in the
130277 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
130278 + * in the COPYING file in the root directory of this source tree).
130279 + * You may select, at your option, one of the above-listed licenses.
130280 + */
130282 +#include "zstd_ldm.h"
130284 +#include "../common/debug.h"
130285 +#include <linux/xxhash.h>
130286 +#include "zstd_fast.h"          /* ZSTD_fillHashTable() */
130287 +#include "zstd_double_fast.h"   /* ZSTD_fillDoubleHashTable() */
130288 +#include "zstd_ldm_geartab.h"
130290 +#define LDM_BUCKET_SIZE_LOG 3
130291 +#define LDM_MIN_MATCH_LENGTH 64
130292 +#define LDM_HASH_RLOG 7
130294 +typedef struct {
130295 +    U64 rolling;
130296 +    U64 stopMask;
130297 +} ldmRollingHashState_t;
130299 +/** ZSTD_ldm_gear_init():
130301 + * Initializes the rolling hash state such that it will honor the
130302 + * settings in params. */
130303 +static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params)
130305 +    unsigned maxBitsInMask = MIN(params->minMatchLength, 64);
130306 +    unsigned hashRateLog = params->hashRateLog;
130308 +    state->rolling = ~(U32)0;
130310 +    /* The choice of the splitting criterion is subject to two conditions:
130311 +     *   1. it has to trigger on average every 2^(hashRateLog) bytes;
130312 +     *   2. ideally, it has to depend on a window of minMatchLength bytes.
130313 +     *
130314 +     * In the gear hash algorithm, bit n depends on the last n bytes;
130315 +     * so in order to obtain a good quality splitting criterion it is
130316 +     * preferable to use bits with high weight.
130317 +     *
130318 +     * To match condition 1 we use a mask with hashRateLog bits set
130319 +     * and, because of the previous remark, we make sure these bits
130320 +     * have the highest possible weight while still respecting
130321 +     * condition 2.
130322 +     */
130323 +    if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) {
130324 +        state->stopMask = (((U64)1 << hashRateLog) - 1) << (maxBitsInMask - hashRateLog);
130325 +    } else {
130326 +        /* In this degenerate case we simply honor the hash rate. */
130327 +        state->stopMask = ((U64)1 << hashRateLog) - 1;
130328 +    }
130331 +/** ZSTD_ldm_gear_feed():
130333 + * Registers in the splits array all the split points found in the first
130334 + * size bytes following the data pointer. This function terminates when
130335 + * either all the data has been processed or LDM_BATCH_SIZE splits are
130336 + * present in the splits array.
130338 + * Precondition: The splits array must not be full.
130339 + * Returns: The number of bytes processed. */
130340 +static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state,
130341 +                                 BYTE const* data, size_t size,
130342 +                                 size_t* splits, unsigned* numSplits)
130344 +    size_t n;
130345 +    U64 hash, mask;
130347 +    hash = state->rolling;
130348 +    mask = state->stopMask;
130349 +    n = 0;
130351 +#define GEAR_ITER_ONCE() do { \
130352 +        hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
130353 +        n += 1; \
130354 +        if (UNLIKELY((hash & mask) == 0)) { \
130355 +            splits[*numSplits] = n; \
130356 +            *numSplits += 1; \
130357 +            if (*numSplits == LDM_BATCH_SIZE) \
130358 +                goto done; \
130359 +        } \
130360 +    } while (0)
130362 +    while (n + 3 < size) {
130363 +        GEAR_ITER_ONCE();
130364 +        GEAR_ITER_ONCE();
130365 +        GEAR_ITER_ONCE();
130366 +        GEAR_ITER_ONCE();
130367 +    }
130368 +    while (n < size) {
130369 +        GEAR_ITER_ONCE();
130370 +    }
130372 +#undef GEAR_ITER_ONCE
130374 +done:
130375 +    state->rolling = hash;
130376 +    return n;
130379 +void ZSTD_ldm_adjustParameters(ldmParams_t* params,
130380 +                               ZSTD_compressionParameters const* cParams)
130382 +    params->windowLog = cParams->windowLog;
130383 +    ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
130384 +    DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
130385 +    if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
130386 +    if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
130387 +    if (params->hashLog == 0) {
130388 +        params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
130389 +        assert(params->hashLog <= ZSTD_HASHLOG_MAX);
130390 +    }
130391 +    if (params->hashRateLog == 0) {
130392 +        params->hashRateLog = params->windowLog < params->hashLog
130393 +                                   ? 0
130394 +                                   : params->windowLog - params->hashLog;
130395 +    }
130396 +    params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
130399 +size_t ZSTD_ldm_getTableSize(ldmParams_t params)
130401 +    size_t const ldmHSize = ((size_t)1) << params.hashLog;
130402 +    size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
130403 +    size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
130404 +    size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
130405 +                           + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
130406 +    return params.enableLdm ? totalSize : 0;
130409 +size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
130411 +    return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0;
130414 +/** ZSTD_ldm_getBucket() :
130415 + *  Returns a pointer to the start of the bucket associated with hash. */
130416 +static ldmEntry_t* ZSTD_ldm_getBucket(
130417 +        ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
130419 +    return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
130422 +/** ZSTD_ldm_insertEntry() :
130423 + *  Insert the entry with corresponding hash into the hash table */
130424 +static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
130425 +                                 size_t const hash, const ldmEntry_t entry,
130426 +                                 ldmParams_t const ldmParams)
130428 +    BYTE* const pOffset = ldmState->bucketOffsets + hash;
130429 +    unsigned const offset = *pOffset;
130431 +    *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry;
130432 +    *pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1));
130436 +/** ZSTD_ldm_countBackwardsMatch() :
130437 + *  Returns the number of bytes that match backwards before pIn and pMatch.
130439 + *  We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
130440 +static size_t ZSTD_ldm_countBackwardsMatch(
130441 +            const BYTE* pIn, const BYTE* pAnchor,
130442 +            const BYTE* pMatch, const BYTE* pMatchBase)
130444 +    size_t matchLength = 0;
130445 +    while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) {
130446 +        pIn--;
130447 +        pMatch--;
130448 +        matchLength++;
130449 +    }
130450 +    return matchLength;
130453 +/** ZSTD_ldm_countBackwardsMatch_2segments() :
130454 + *  Returns the number of bytes that match backwards from pMatch,
130455 + *  even with the backwards match spanning 2 different segments.
130457 + *  On reaching `pMatchBase`, start counting from mEnd */
130458 +static size_t ZSTD_ldm_countBackwardsMatch_2segments(
130459 +                    const BYTE* pIn, const BYTE* pAnchor,
130460 +                    const BYTE* pMatch, const BYTE* pMatchBase,
130461 +                    const BYTE* pExtDictStart, const BYTE* pExtDictEnd)
130463 +    size_t matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase);
130464 +    if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) {
130465 +        /* If backwards match is entirely in the extDict or prefix, immediately return */
130466 +        return matchLength;
130467 +    }
130468 +    DEBUGLOG(7, "ZSTD_ldm_countBackwardsMatch_2segments: found 2-parts backwards match (length in prefix==%zu)", matchLength);
130469 +    matchLength += ZSTD_ldm_countBackwardsMatch(pIn - matchLength, pAnchor, pExtDictEnd, pExtDictStart);
130470 +    DEBUGLOG(7, "final backwards match length = %zu", matchLength);
130471 +    return matchLength;
130474 +/** ZSTD_ldm_fillFastTables() :
130476 + *  Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
130477 + *  This is similar to ZSTD_loadDictionaryContent.
130479 + *  The tables for the other strategies are filled within their
130480 + *  block compressors. */
130481 +static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
130482 +                                      void const* end)
130484 +    const BYTE* const iend = (const BYTE*)end;
130486 +    switch(ms->cParams.strategy)
130487 +    {
130488 +    case ZSTD_fast:
130489 +        ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast);
130490 +        break;
130492 +    case ZSTD_dfast:
130493 +        ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast);
130494 +        break;
130496 +    case ZSTD_greedy:
130497 +    case ZSTD_lazy:
130498 +    case ZSTD_lazy2:
130499 +    case ZSTD_btlazy2:
130500 +    case ZSTD_btopt:
130501 +    case ZSTD_btultra:
130502 +    case ZSTD_btultra2:
130503 +        break;
130504 +    default:
130505 +        assert(0);  /* not possible : not a valid strategy id */
130506 +    }
130508 +    return 0;
130511 +void ZSTD_ldm_fillHashTable(
130512 +            ldmState_t* ldmState, const BYTE* ip,
130513 +            const BYTE* iend, ldmParams_t const* params)
130515 +    U32 const minMatchLength = params->minMatchLength;
130516 +    U32 const hBits = params->hashLog - params->bucketSizeLog;
130517 +    BYTE const* const base = ldmState->window.base;
130518 +    BYTE const* const istart = ip;
130519 +    ldmRollingHashState_t hashState;
130520 +    size_t* const splits = ldmState->splitIndices;
130521 +    unsigned numSplits;
130523 +    DEBUGLOG(5, "ZSTD_ldm_fillHashTable");
130525 +    ZSTD_ldm_gear_init(&hashState, params);
130526 +    while (ip < iend) {
130527 +        size_t hashed;
130528 +        unsigned n;
130530 +        numSplits = 0;
130531 +        hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits);
130533 +        for (n = 0; n < numSplits; n++) {
130534 +            if (ip + splits[n] >= istart + minMatchLength) {
130535 +                BYTE const* const split = ip + splits[n] - minMatchLength;
130536 +                U64 const xxhash = xxh64(split, minMatchLength, 0);
130537 +                U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
130538 +                ldmEntry_t entry;
130540 +                entry.offset = (U32)(split - base);
130541 +                entry.checksum = (U32)(xxhash >> 32);
130542 +                ZSTD_ldm_insertEntry(ldmState, hash, entry, *params);
130543 +            }
130544 +        }
130546 +        ip += hashed;
130547 +    }
130551 +/** ZSTD_ldm_limitTableUpdate() :
130553 + *  Sets cctx->nextToUpdate to a position corresponding closer to anchor
130554 + *  if it is far way
130555 + *  (after a long match, only update tables a limited amount). */
130556 +static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
130558 +    U32 const curr = (U32)(anchor - ms->window.base);
130559 +    if (curr > ms->nextToUpdate + 1024) {
130560 +        ms->nextToUpdate =
130561 +            curr - MIN(512, curr - ms->nextToUpdate - 1024);
130562 +    }
130565 +static size_t ZSTD_ldm_generateSequences_internal(
130566 +        ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
130567 +        ldmParams_t const* params, void const* src, size_t srcSize)
130569 +    /* LDM parameters */
130570 +    int const extDict = ZSTD_window_hasExtDict(ldmState->window);
130571 +    U32 const minMatchLength = params->minMatchLength;
130572 +    U32 const entsPerBucket = 1U << params->bucketSizeLog;
130573 +    U32 const hBits = params->hashLog - params->bucketSizeLog;
130574 +    /* Prefix and extDict parameters */
130575 +    U32 const dictLimit = ldmState->window.dictLimit;
130576 +    U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
130577 +    BYTE const* const base = ldmState->window.base;
130578 +    BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL;
130579 +    BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL;
130580 +    BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL;
130581 +    BYTE const* const lowPrefixPtr = base + dictLimit;
130582 +    /* Input bounds */
130583 +    BYTE const* const istart = (BYTE const*)src;
130584 +    BYTE const* const iend = istart + srcSize;
130585 +    BYTE const* const ilimit = iend - HASH_READ_SIZE;
130586 +    /* Input positions */
130587 +    BYTE const* anchor = istart;
130588 +    BYTE const* ip = istart;
130589 +    /* Rolling hash state */
130590 +    ldmRollingHashState_t hashState;
130591 +    /* Arrays for staged-processing */
130592 +    size_t* const splits = ldmState->splitIndices;
130593 +    ldmMatchCandidate_t* const candidates = ldmState->matchCandidates;
130594 +    unsigned numSplits;
130596 +    if (srcSize < minMatchLength)
130597 +        return iend - anchor;
130599 +    /* Initialize the rolling hash state with the first minMatchLength bytes */
130600 +    ZSTD_ldm_gear_init(&hashState, params);
130601 +    {
130602 +        size_t n = 0;
130604 +        while (n < minMatchLength) {
130605 +            numSplits = 0;
130606 +            n += ZSTD_ldm_gear_feed(&hashState, ip + n, minMatchLength - n,
130607 +                                    splits, &numSplits);
130608 +        }
130609 +        ip += minMatchLength;
130610 +    }
130612 +    while (ip < ilimit) {
130613 +        size_t hashed;
130614 +        unsigned n;
130616 +        numSplits = 0;
130617 +        hashed = ZSTD_ldm_gear_feed(&hashState, ip, ilimit - ip,
130618 +                                    splits, &numSplits);
130620 +        for (n = 0; n < numSplits; n++) {
130621 +            BYTE const* const split = ip + splits[n] - minMatchLength;
130622 +            U64 const xxhash = xxh64(split, minMatchLength, 0);
130623 +            U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
130625 +            candidates[n].split = split;
130626 +            candidates[n].hash = hash;
130627 +            candidates[n].checksum = (U32)(xxhash >> 32);
130628 +            candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params);
130629 +            PREFETCH_L1(candidates[n].bucket);
130630 +        }
130632 +        for (n = 0; n < numSplits; n++) {
130633 +            size_t forwardMatchLength = 0, backwardMatchLength = 0,
130634 +                   bestMatchLength = 0, mLength;
130635 +            BYTE const* const split = candidates[n].split;
130636 +            U32 const checksum = candidates[n].checksum;
130637 +            U32 const hash = candidates[n].hash;
130638 +            ldmEntry_t* const bucket = candidates[n].bucket;
130639 +            ldmEntry_t const* cur;
130640 +            ldmEntry_t const* bestEntry = NULL;
130641 +            ldmEntry_t newEntry;
130643 +            newEntry.offset = (U32)(split - base);
130644 +            newEntry.checksum = checksum;
130646 +            /* If a split point would generate a sequence overlapping with
130647 +             * the previous one, we merely register it in the hash table and
130648 +             * move on */
130649 +            if (split < anchor) {
130650 +                ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
130651 +                continue;
130652 +            }
130654 +            for (cur = bucket; cur < bucket + entsPerBucket; cur++) {
130655 +                size_t curForwardMatchLength, curBackwardMatchLength,
130656 +                       curTotalMatchLength;
130657 +                if (cur->checksum != checksum || cur->offset <= lowestIndex) {
130658 +                    continue;
130659 +                }
130660 +                if (extDict) {
130661 +                    BYTE const* const curMatchBase =
130662 +                        cur->offset < dictLimit ? dictBase : base;
130663 +                    BYTE const* const pMatch = curMatchBase + cur->offset;
130664 +                    BYTE const* const matchEnd =
130665 +                        cur->offset < dictLimit ? dictEnd : iend;
130666 +                    BYTE const* const lowMatchPtr =
130667 +                        cur->offset < dictLimit ? dictStart : lowPrefixPtr;
130668 +                    curForwardMatchLength =
130669 +                        ZSTD_count_2segments(split, pMatch, iend, matchEnd, lowPrefixPtr);
130670 +                    if (curForwardMatchLength < minMatchLength) {
130671 +                        continue;
130672 +                    }
130673 +                    curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments(
130674 +                            split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd);
130675 +                } else { /* !extDict */
130676 +                    BYTE const* const pMatch = base + cur->offset;
130677 +                    curForwardMatchLength = ZSTD_count(split, pMatch, iend);
130678 +                    if (curForwardMatchLength < minMatchLength) {
130679 +                        continue;
130680 +                    }
130681 +                    curBackwardMatchLength =
130682 +                        ZSTD_ldm_countBackwardsMatch(split, anchor, pMatch, lowPrefixPtr);
130683 +                }
130684 +                curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength;
130686 +                if (curTotalMatchLength > bestMatchLength) {
130687 +                    bestMatchLength = curTotalMatchLength;
130688 +                    forwardMatchLength = curForwardMatchLength;
130689 +                    backwardMatchLength = curBackwardMatchLength;
130690 +                    bestEntry = cur;
130691 +                }
130692 +            }
130694 +            /* No match found -- insert an entry into the hash table
130695 +             * and process the next candidate match */
130696 +            if (bestEntry == NULL) {
130697 +                ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
130698 +                continue;
130699 +            }
130701 +            /* Match found */
130702 +            mLength = forwardMatchLength + backwardMatchLength;
130703 +            {
130704 +                U32 const offset = (U32)(split - base) - bestEntry->offset;
130705 +                rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
130707 +                /* Out of sequence storage */
130708 +                if (rawSeqStore->size == rawSeqStore->capacity)
130709 +                    return ERROR(dstSize_tooSmall);
130710 +                seq->litLength = (U32)(split - backwardMatchLength - anchor);
130711 +                seq->matchLength = (U32)mLength;
130712 +                seq->offset = offset;
130713 +                rawSeqStore->size++;
130714 +            }
130716 +            /* Insert the current entry into the hash table --- it must be
130717 +             * done after the previous block to avoid clobbering bestEntry */
130718 +            ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
130720 +            anchor = split + forwardMatchLength;
130721 +        }
130723 +        ip += hashed;
130724 +    }
130726 +    return iend - anchor;
130729 +/*! ZSTD_ldm_reduceTable() :
130730 + *  reduce table indexes by `reducerValue` */
130731 +static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
130732 +                                 U32 const reducerValue)
130734 +    U32 u;
130735 +    for (u = 0; u < size; u++) {
130736 +        if (table[u].offset < reducerValue) table[u].offset = 0;
130737 +        else table[u].offset -= reducerValue;
130738 +    }
130741 +size_t ZSTD_ldm_generateSequences(
130742 +        ldmState_t* ldmState, rawSeqStore_t* sequences,
130743 +        ldmParams_t const* params, void const* src, size_t srcSize)
130745 +    U32 const maxDist = 1U << params->windowLog;
130746 +    BYTE const* const istart = (BYTE const*)src;
130747 +    BYTE const* const iend = istart + srcSize;
130748 +    size_t const kMaxChunkSize = 1 << 20;
130749 +    size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0);
130750 +    size_t chunk;
130751 +    size_t leftoverSize = 0;
130753 +    assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize);
130754 +    /* Check that ZSTD_window_update() has been called for this chunk prior
130755 +     * to passing it to this function.
130756 +     */
130757 +    assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
130758 +    /* The input could be very large (in zstdmt), so it must be broken up into
130759 +     * chunks to enforce the maximum distance and handle overflow correction.
130760 +     */
130761 +    assert(sequences->pos <= sequences->size);
130762 +    assert(sequences->size <= sequences->capacity);
130763 +    for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {
130764 +        BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;
130765 +        size_t const remaining = (size_t)(iend - chunkStart);
130766 +        BYTE const *const chunkEnd =
130767 +            (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;
130768 +        size_t const chunkSize = chunkEnd - chunkStart;
130769 +        size_t newLeftoverSize;
130770 +        size_t const prevSize = sequences->size;
130772 +        assert(chunkStart < iend);
130773 +        /* 1. Perform overflow correction if necessary. */
130774 +        if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) {
130775 +            U32 const ldmHSize = 1U << params->hashLog;
130776 +            U32 const correction = ZSTD_window_correctOverflow(
130777 +                &ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);
130778 +            ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
130779 +            /* invalidate dictionaries on overflow correction */
130780 +            ldmState->loadedDictEnd = 0;
130781 +        }
130782 +        /* 2. We enforce the maximum offset allowed.
130783 +         *
130784 +         * kMaxChunkSize should be small enough that we don't lose too much of
130785 +         * the window through early invalidation.
130786 +         * TODO: * Test the chunk size.
130787 +         *       * Try invalidation after the sequence generation and test the
130788 +         *         the offset against maxDist directly.
130789 +         *
130790 +         * NOTE: Because of dictionaries + sequence splitting we MUST make sure
130791 +         * that any offset used is valid at the END of the sequence, since it may
130792 +         * be split into two sequences. This condition holds when using
130793 +         * ZSTD_window_enforceMaxDist(), but if we move to checking offsets
130794 +         * against maxDist directly, we'll have to carefully handle that case.
130795 +         */
130796 +        ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL);
130797 +        /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
130798 +        newLeftoverSize = ZSTD_ldm_generateSequences_internal(
130799 +            ldmState, sequences, params, chunkStart, chunkSize);
130800 +        if (ZSTD_isError(newLeftoverSize))
130801 +            return newLeftoverSize;
130802 +        /* 4. We add the leftover literals from previous iterations to the first
130803 +         *    newly generated sequence, or add the `newLeftoverSize` if none are
130804 +         *    generated.
130805 +         */
130806 +        /* Prepend the leftover literals from the last call */
130807 +        if (prevSize < sequences->size) {
130808 +            sequences->seq[prevSize].litLength += (U32)leftoverSize;
130809 +            leftoverSize = newLeftoverSize;
130810 +        } else {
130811 +            assert(newLeftoverSize == chunkSize);
130812 +            leftoverSize += chunkSize;
130813 +        }
130814 +    }
130815 +    return 0;
130818 +void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) {
130819 +    while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
130820 +        rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
130821 +        if (srcSize <= seq->litLength) {
130822 +            /* Skip past srcSize literals */
130823 +            seq->litLength -= (U32)srcSize;
130824 +            return;
130825 +        }
130826 +        srcSize -= seq->litLength;
130827 +        seq->litLength = 0;
130828 +        if (srcSize < seq->matchLength) {
130829 +            /* Skip past the first srcSize of the match */
130830 +            seq->matchLength -= (U32)srcSize;
130831 +            if (seq->matchLength < minMatch) {
130832 +                /* The match is too short, omit it */
130833 +                if (rawSeqStore->pos + 1 < rawSeqStore->size) {
130834 +                    seq[1].litLength += seq[0].matchLength;
130835 +                }
130836 +                rawSeqStore->pos++;
130837 +            }
130838 +            return;
130839 +        }
130840 +        srcSize -= seq->matchLength;
130841 +        seq->matchLength = 0;
130842 +        rawSeqStore->pos++;
130843 +    }
130847 + * If the sequence length is longer than remaining then the sequence is split
130848 + * between this block and the next.
130850 + * Returns the current sequence to handle, or if the rest of the block should
130851 + * be literals, it returns a sequence with offset == 0.
130852 + */
130853 +static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
130854 +                                 U32 const remaining, U32 const minMatch)
130856 +    rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
130857 +    assert(sequence.offset > 0);
130858 +    /* Likely: No partial sequence */
130859 +    if (remaining >= sequence.litLength + sequence.matchLength) {
130860 +        rawSeqStore->pos++;
130861 +        return sequence;
130862 +    }
130863 +    /* Cut the sequence short (offset == 0 ==> rest is literals). */
130864 +    if (remaining <= sequence.litLength) {
130865 +        sequence.offset = 0;
130866 +    } else if (remaining < sequence.litLength + sequence.matchLength) {
130867 +        sequence.matchLength = remaining - sequence.litLength;
130868 +        if (sequence.matchLength < minMatch) {
130869 +            sequence.offset = 0;
130870 +        }
130871 +    }
130872 +    /* Skip past `remaining` bytes for the future sequences. */
130873 +    ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch);
130874 +    return sequence;
130877 +void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
130878 +    U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
130879 +    while (currPos && rawSeqStore->pos < rawSeqStore->size) {
130880 +        rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
130881 +        if (currPos >= currSeq.litLength + currSeq.matchLength) {
130882 +            currPos -= currSeq.litLength + currSeq.matchLength;
130883 +            rawSeqStore->pos++;
130884 +        } else {
130885 +            rawSeqStore->posInSequence = currPos;
130886 +            break;
130887 +        }
130888 +    }
130889 +    if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
130890 +        rawSeqStore->posInSequence = 0;
130891 +    }
130894 +size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
130895 +    ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130896 +    void const* src, size_t srcSize)
130898 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
130899 +    unsigned const minMatch = cParams->minMatch;
130900 +    ZSTD_blockCompressor const blockCompressor =
130901 +        ZSTD_selectBlockCompressor(cParams->strategy, ZSTD_matchState_dictMode(ms));
130902 +    /* Input bounds */
130903 +    BYTE const* const istart = (BYTE const*)src;
130904 +    BYTE const* const iend = istart + srcSize;
130905 +    /* Input positions */
130906 +    BYTE const* ip = istart;
130908 +    DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
130909 +    /* If using opt parser, use LDMs only as candidates rather than always accepting them */
130910 +    if (cParams->strategy >= ZSTD_btopt) {
130911 +        size_t lastLLSize;
130912 +        ms->ldmSeqStore = rawSeqStore;
130913 +        lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize);
130914 +        ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize);
130915 +        return lastLLSize;
130916 +    }
130918 +    assert(rawSeqStore->pos <= rawSeqStore->size);
130919 +    assert(rawSeqStore->size <= rawSeqStore->capacity);
130920 +    /* Loop through each sequence and apply the block compressor to the literals */
130921 +    while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
130922 +        /* maybeSplitSequence updates rawSeqStore->pos */
130923 +        rawSeq const sequence = maybeSplitSequence(rawSeqStore,
130924 +                                                   (U32)(iend - ip), minMatch);
130925 +        int i;
130926 +        /* End signal */
130927 +        if (sequence.offset == 0)
130928 +            break;
130930 +        assert(ip + sequence.litLength + sequence.matchLength <= iend);
130932 +        /* Fill tables for block compressor */
130933 +        ZSTD_ldm_limitTableUpdate(ms, ip);
130934 +        ZSTD_ldm_fillFastTables(ms, ip);
130935 +        /* Run the block compressor */
130936 +        DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength);
130937 +        {
130938 +            size_t const newLitLength =
130939 +                blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
130940 +            ip += sequence.litLength;
130941 +            /* Update the repcodes */
130942 +            for (i = ZSTD_REP_NUM - 1; i > 0; i--)
130943 +                rep[i] = rep[i-1];
130944 +            rep[0] = sequence.offset;
130945 +            /* Store the sequence */
130946 +            ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
130947 +                          sequence.offset + ZSTD_REP_MOVE,
130948 +                          sequence.matchLength - MINMATCH);
130949 +            ip += sequence.matchLength;
130950 +        }
130951 +    }
130952 +    /* Fill the tables for the block compressor */
130953 +    ZSTD_ldm_limitTableUpdate(ms, ip);
130954 +    ZSTD_ldm_fillFastTables(ms, ip);
130955 +    /* Compress the last literals */
130956 +    return blockCompressor(ms, seqStore, rep, ip, iend - ip);
130958 diff --git a/lib/zstd/compress/zstd_ldm.h b/lib/zstd/compress/zstd_ldm.h
130959 new file mode 100644
130960 index 000000000000..5ee467eaca2e
130961 --- /dev/null
130962 +++ b/lib/zstd/compress/zstd_ldm.h
130963 @@ -0,0 +1,110 @@
130965 + * Copyright (c) Yann Collet, Facebook, Inc.
130966 + * All rights reserved.
130968 + * This source code is licensed under both the BSD-style license (found in the
130969 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
130970 + * in the COPYING file in the root directory of this source tree).
130971 + * You may select, at your option, one of the above-listed licenses.
130972 + */
130974 +#ifndef ZSTD_LDM_H
130975 +#define ZSTD_LDM_H
130978 +#include "zstd_compress_internal.h"   /* ldmParams_t, U32 */
130979 +#include <linux/zstd.h>   /* ZSTD_CCtx, size_t */
130981 +/*-*************************************
130982 +*  Long distance matching
130983 +***************************************/
130985 +#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_LIMIT_DEFAULT
130987 +void ZSTD_ldm_fillHashTable(
130988 +            ldmState_t* state, const BYTE* ip,
130989 +            const BYTE* iend, ldmParams_t const* params);
130992 + * ZSTD_ldm_generateSequences():
130994 + * Generates the sequences using the long distance match finder.
130995 + * Generates long range matching sequences in `sequences`, which parse a prefix
130996 + * of the source. `sequences` must be large enough to store every sequence,
130997 + * which can be checked with `ZSTD_ldm_getMaxNbSeq()`.
130998 + * @returns 0 or an error code.
131000 + * NOTE: The user must have called ZSTD_window_update() for all of the input
131001 + * they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks.
131002 + * NOTE: This function returns an error if it runs out of space to store
131003 + *       sequences.
131004 + */
131005 +size_t ZSTD_ldm_generateSequences(
131006 +            ldmState_t* ldms, rawSeqStore_t* sequences,
131007 +            ldmParams_t const* params, void const* src, size_t srcSize);
131010 + * ZSTD_ldm_blockCompress():
131012 + * Compresses a block using the predefined sequences, along with a secondary
131013 + * block compressor. The literals section of every sequence is passed to the
131014 + * secondary block compressor, and those sequences are interspersed with the
131015 + * predefined sequences. Returns the length of the last literals.
131016 + * Updates `rawSeqStore.pos` to indicate how many sequences have been consumed.
131017 + * `rawSeqStore.seq` may also be updated to split the last sequence between two
131018 + * blocks.
131019 + * @return The length of the last literals.
131021 + * NOTE: The source must be at most the maximum block size, but the predefined
131022 + * sequences can be any size, and may be longer than the block. In the case that
131023 + * they are longer than the block, the last sequences may need to be split into
131024 + * two. We handle that case correctly, and update `rawSeqStore` appropriately.
131025 + * NOTE: This function does not return any errors.
131026 + */
131027 +size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
131028 +            ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
131029 +            void const* src, size_t srcSize);
131032 + * ZSTD_ldm_skipSequences():
131034 + * Skip past `srcSize` bytes worth of sequences in `rawSeqStore`.
131035 + * Avoids emitting matches less than `minMatch` bytes.
131036 + * Must be called for data that is not passed to ZSTD_ldm_blockCompress().
131037 + */
131038 +void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize,
131039 +    U32 const minMatch);
131041 +/* ZSTD_ldm_skipRawSeqStoreBytes():
131042 + * Moves forward in rawSeqStore by nbBytes, updating fields 'pos' and 'posInSequence'.
131043 + * Not to be used in conjunction with ZSTD_ldm_skipSequences().
131044 + * Must be called for data with is not passed to ZSTD_ldm_blockCompress().
131045 + */
131046 +void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes);
131048 +/** ZSTD_ldm_getTableSize() :
131049 + *  Estimate the space needed for long distance matching tables or 0 if LDM is
131050 + *  disabled.
131051 + */
131052 +size_t ZSTD_ldm_getTableSize(ldmParams_t params);
131054 +/** ZSTD_ldm_getSeqSpace() :
131055 + *  Return an upper bound on the number of sequences that can be produced by
131056 + *  the long distance matcher, or 0 if LDM is disabled.
131057 + */
131058 +size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize);
131060 +/** ZSTD_ldm_adjustParameters() :
131061 + *  If the params->hashRateLog is not set, set it to its default value based on
131062 + *  windowLog and params->hashLog.
131064 + *  Ensures that params->bucketSizeLog is <= params->hashLog (setting it to
131065 + *  params->hashLog if it is not).
131067 + *  Ensures that the minMatchLength >= targetLength during optimal parsing.
131068 + */
131069 +void ZSTD_ldm_adjustParameters(ldmParams_t* params,
131070 +                               ZSTD_compressionParameters const* cParams);
131073 +#endif /* ZSTD_FAST_H */
131074 diff --git a/lib/zstd/compress/zstd_ldm_geartab.h b/lib/zstd/compress/zstd_ldm_geartab.h
131075 new file mode 100644
131076 index 000000000000..e5c24d856b0a
131077 --- /dev/null
131078 +++ b/lib/zstd/compress/zstd_ldm_geartab.h
131079 @@ -0,0 +1,103 @@
131081 + * Copyright (c) Yann Collet, Facebook, Inc.
131082 + * All rights reserved.
131084 + * This source code is licensed under both the BSD-style license (found in the
131085 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
131086 + * in the COPYING file in the root directory of this source tree).
131087 + * You may select, at your option, one of the above-listed licenses.
131088 + */
131090 +#ifndef ZSTD_LDM_GEARTAB_H
131091 +#define ZSTD_LDM_GEARTAB_H
131093 +static U64 ZSTD_ldm_gearTab[256] = {
131094 +    0xf5b8f72c5f77775c, 0x84935f266b7ac412, 0xb647ada9ca730ccc,
131095 +    0xb065bb4b114fb1de, 0x34584e7e8c3a9fd0, 0x4e97e17c6ae26b05,
131096 +    0x3a03d743bc99a604, 0xcecd042422c4044f, 0x76de76c58524259e,
131097 +    0x9c8528f65badeaca, 0x86563706e2097529, 0x2902475fa375d889,
131098 +    0xafb32a9739a5ebe6, 0xce2714da3883e639, 0x21eaf821722e69e,
131099 +    0x37b628620b628,    0x49a8d455d88caf5,  0x8556d711e6958140,
131100 +    0x4f7ae74fc605c1f,  0x829f0c3468bd3a20, 0x4ffdc885c625179e,
131101 +    0x8473de048a3daf1b, 0x51008822b05646b2, 0x69d75d12b2d1cc5f,
131102 +    0x8c9d4a19159154bc, 0xc3cc10f4abbd4003, 0xd06ddc1cecb97391,
131103 +    0xbe48e6e7ed80302e, 0x3481db31cee03547, 0xacc3f67cdaa1d210,
131104 +    0x65cb771d8c7f96cc, 0x8eb27177055723dd, 0xc789950d44cd94be,
131105 +    0x934feadc3700b12b, 0x5e485f11edbdf182, 0x1e2e2a46fd64767a,
131106 +    0x2969ca71d82efa7c, 0x9d46e9935ebbba2e, 0xe056b67e05e6822b,
131107 +    0x94d73f55739d03a0, 0xcd7010bdb69b5a03, 0x455ef9fcd79b82f4,
131108 +    0x869cb54a8749c161, 0x38d1a4fa6185d225, 0xb475166f94bbe9bb,
131109 +    0xa4143548720959f1, 0x7aed4780ba6b26ba, 0xd0ce264439e02312,
131110 +    0x84366d746078d508, 0xa8ce973c72ed17be, 0x21c323a29a430b01,
131111 +    0x9962d617e3af80ee, 0xab0ce91d9c8cf75b, 0x530e8ee6d19a4dbc,
131112 +    0x2ef68c0cf53f5d72, 0xc03a681640a85506, 0x496e4e9f9c310967,
131113 +    0x78580472b59b14a0, 0x273824c23b388577, 0x66bf923ad45cb553,
131114 +    0x47ae1a5a2492ba86, 0x35e304569e229659, 0x4765182a46870b6f,
131115 +    0x6cbab625e9099412, 0xddac9a2e598522c1, 0x7172086e666624f2,
131116 +    0xdf5003ca503b7837, 0x88c0c1db78563d09, 0x58d51865acfc289d,
131117 +    0x177671aec65224f1, 0xfb79d8a241e967d7, 0x2be1e101cad9a49a,
131118 +    0x6625682f6e29186b, 0x399553457ac06e50, 0x35dffb4c23abb74,
131119 +    0x429db2591f54aade, 0xc52802a8037d1009, 0x6acb27381f0b25f3,
131120 +    0xf45e2551ee4f823b, 0x8b0ea2d99580c2f7, 0x3bed519cbcb4e1e1,
131121 +    0xff452823dbb010a,  0x9d42ed614f3dd267, 0x5b9313c06257c57b,
131122 +    0xa114b8008b5e1442, 0xc1fe311c11c13d4b, 0x66e8763ea34c5568,
131123 +    0x8b982af1c262f05d, 0xee8876faaa75fbb7, 0x8a62a4d0d172bb2a,
131124 +    0xc13d94a3b7449a97, 0x6dbbba9dc15d037c, 0xc786101f1d92e0f1,
131125 +    0xd78681a907a0b79b, 0xf61aaf2962c9abb9, 0x2cfd16fcd3cb7ad9,
131126 +    0x868c5b6744624d21, 0x25e650899c74ddd7, 0xba042af4a7c37463,
131127 +    0x4eb1a539465a3eca, 0xbe09dbf03b05d5ca, 0x774e5a362b5472ba,
131128 +    0x47a1221229d183cd, 0x504b0ca18ef5a2df, 0xdffbdfbde2456eb9,
131129 +    0x46cd2b2fbee34634, 0xf2aef8fe819d98c3, 0x357f5276d4599d61,
131130 +    0x24a5483879c453e3, 0x88026889192b4b9,  0x28da96671782dbec,
131131 +    0x4ef37c40588e9aaa, 0x8837b90651bc9fb3, 0xc164f741d3f0e5d6,
131132 +    0xbc135a0a704b70ba, 0x69cd868f7622ada,  0xbc37ba89e0b9c0ab,
131133 +    0x47c14a01323552f6, 0x4f00794bacee98bb, 0x7107de7d637a69d5,
131134 +    0x88af793bb6f2255e, 0xf3c6466b8799b598, 0xc288c616aa7f3b59,
131135 +    0x81ca63cf42fca3fd, 0x88d85ace36a2674b, 0xd056bd3792389e7,
131136 +    0xe55c396c4e9dd32d, 0xbefb504571e6c0a6, 0x96ab32115e91e8cc,
131137 +    0xbf8acb18de8f38d1, 0x66dae58801672606, 0x833b6017872317fb,
131138 +    0xb87c16f2d1c92864, 0xdb766a74e58b669c, 0x89659f85c61417be,
131139 +    0xc8daad856011ea0c, 0x76a4b565b6fe7eae, 0xa469d085f6237312,
131140 +    0xaaf0365683a3e96c, 0x4dbb746f8424f7b8, 0x638755af4e4acc1,
131141 +    0x3d7807f5bde64486, 0x17be6d8f5bbb7639, 0x903f0cd44dc35dc,
131142 +    0x67b672eafdf1196c, 0xa676ff93ed4c82f1, 0x521d1004c5053d9d,
131143 +    0x37ba9ad09ccc9202, 0x84e54d297aacfb51, 0xa0b4b776a143445,
131144 +    0x820d471e20b348e,  0x1874383cb83d46dc, 0x97edeec7a1efe11c,
131145 +    0xb330e50b1bdc42aa, 0x1dd91955ce70e032, 0xa514cdb88f2939d5,
131146 +    0x2791233fd90db9d3, 0x7b670a4cc50f7a9b, 0x77c07d2a05c6dfa5,
131147 +    0xe3778b6646d0a6fa, 0xb39c8eda47b56749, 0x933ed448addbef28,
131148 +    0xaf846af6ab7d0bf4, 0xe5af208eb666e49,  0x5e6622f73534cd6a,
131149 +    0x297daeca42ef5b6e, 0x862daef3d35539a6, 0xe68722498f8e1ea9,
131150 +    0x981c53093dc0d572, 0xfa09b0bfbf86fbf5, 0x30b1e96166219f15,
131151 +    0x70e7d466bdc4fb83, 0x5a66736e35f2a8e9, 0xcddb59d2b7c1baef,
131152 +    0xd6c7d247d26d8996, 0xea4e39eac8de1ba3, 0x539c8bb19fa3aff2,
131153 +    0x9f90e4c5fd508d8,  0xa34e5956fbaf3385, 0x2e2f8e151d3ef375,
131154 +    0x173691e9b83faec1, 0xb85a8d56bf016379, 0x8382381267408ae3,
131155 +    0xb90f901bbdc0096d, 0x7c6ad32933bcec65, 0x76bb5e2f2c8ad595,
131156 +    0x390f851a6cf46d28, 0xc3e6064da1c2da72, 0xc52a0c101cfa5389,
131157 +    0xd78eaf84a3fbc530, 0x3781b9e2288b997e, 0x73c2f6dea83d05c4,
131158 +    0x4228e364c5b5ed7,  0x9d7a3edf0da43911, 0x8edcfeda24686756,
131159 +    0x5e7667a7b7a9b3a1, 0x4c4f389fa143791d, 0xb08bc1023da7cddc,
131160 +    0x7ab4be3ae529b1cc, 0x754e6132dbe74ff9, 0x71635442a839df45,
131161 +    0x2f6fb1643fbe52de, 0x961e0a42cf7a8177, 0xf3b45d83d89ef2ea,
131162 +    0xee3de4cf4a6e3e9b, 0xcd6848542c3295e7, 0xe4cee1664c78662f,
131163 +    0x9947548b474c68c4, 0x25d73777a5ed8b0b, 0xc915b1d636b7fc,
131164 +    0x21c2ba75d9b0d2da, 0x5f6b5dcf608a64a1, 0xdcf333255ff9570c,
131165 +    0x633b922418ced4ee, 0xc136dde0b004b34a, 0x58cc83b05d4b2f5a,
131166 +    0x5eb424dda28e42d2, 0x62df47369739cd98, 0xb4e0b42485e4ce17,
131167 +    0x16e1f0c1f9a8d1e7, 0x8ec3916707560ebf, 0x62ba6e2df2cc9db3,
131168 +    0xcbf9f4ff77d83a16, 0x78d9d7d07d2bbcc4, 0xef554ce1e02c41f4,
131169 +    0x8d7581127eccf94d, 0xa9b53336cb3c8a05, 0x38c42c0bf45c4f91,
131170 +    0x640893cdf4488863, 0x80ec34bc575ea568, 0x39f324f5b48eaa40,
131171 +    0xe9d9ed1f8eff527f, 0x9224fc058cc5a214, 0xbaba00b04cfe7741,
131172 +    0x309a9f120fcf52af, 0xa558f3ec65626212, 0x424bec8b7adabe2f,
131173 +    0x41622513a6aea433, 0xb88da2d5324ca798, 0xd287733b245528a4,
131174 +    0x9a44697e6d68aec3, 0x7b1093be2f49bb28, 0x50bbec632e3d8aad,
131175 +    0x6cd90723e1ea8283, 0x897b9e7431b02bf3, 0x219efdcb338a7047,
131176 +    0x3b0311f0a27c0656, 0xdb17bf91c0db96e7, 0x8cd4fd6b4e85a5b2,
131177 +    0xfab071054ba6409d, 0x40d6fe831fa9dfd9, 0xaf358debad7d791e,
131178 +    0xeb8d0e25a65e3e58, 0xbbcbd3df14e08580, 0xcf751f27ecdab2b,
131179 +    0x2b4da14f2613d8f4
131182 +#endif /* ZSTD_LDM_GEARTAB_H */
131183 diff --git a/lib/zstd/compress/zstd_opt.c b/lib/zstd/compress/zstd_opt.c
131184 new file mode 100644
131185 index 000000000000..9ab92d4ef499
131186 --- /dev/null
131187 +++ b/lib/zstd/compress/zstd_opt.c
131188 @@ -0,0 +1,1345 @@
131190 + * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
131191 + * All rights reserved.
131193 + * This source code is licensed under both the BSD-style license (found in the
131194 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
131195 + * in the COPYING file in the root directory of this source tree).
131196 + * You may select, at your option, one of the above-listed licenses.
131197 + */
131199 +#include "zstd_compress_internal.h"
131200 +#include "hist.h"
131201 +#include "zstd_opt.h"
131204 +#define ZSTD_LITFREQ_ADD    2   /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
131205 +#define ZSTD_FREQ_DIV       4   /* log factor when using previous stats to init next stats */
131206 +#define ZSTD_MAX_PRICE     (1<<30)
131208 +#define ZSTD_PREDEF_THRESHOLD 1024   /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
131211 +/*-*************************************
131212 +*  Price functions for optimal parser
131213 +***************************************/
131215 +#if 0    /* approximation at bit level */
131216 +#  define BITCOST_ACCURACY 0
131217 +#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
131218 +#  define WEIGHT(stat)  ((void)opt, ZSTD_bitWeight(stat))
131219 +#elif 0  /* fractional bit accuracy */
131220 +#  define BITCOST_ACCURACY 8
131221 +#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
131222 +#  define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat))
131223 +#else    /* opt==approx, ultra==accurate */
131224 +#  define BITCOST_ACCURACY 8
131225 +#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
131226 +#  define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat))
131227 +#endif
131229 +MEM_STATIC U32 ZSTD_bitWeight(U32 stat)
131231 +    return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER);
131234 +MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat)
131236 +    U32 const stat = rawStat + 1;
131237 +    U32 const hb = ZSTD_highbit32(stat);
131238 +    U32 const BWeight = hb * BITCOST_MULTIPLIER;
131239 +    U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb;
131240 +    U32 const weight = BWeight + FWeight;
131241 +    assert(hb + BITCOST_ACCURACY < 31);
131242 +    return weight;
131245 +#if (DEBUGLEVEL>=2)
131246 +/* debugging function,
131247 + * @return price in bytes as fractional value
131248 + * for debug messages only */
131249 +MEM_STATIC double ZSTD_fCost(U32 price)
131251 +    return (double)price / (BITCOST_MULTIPLIER*8);
131253 +#endif
131255 +static int ZSTD_compressedLiterals(optState_t const* const optPtr)
131257 +    return optPtr->literalCompressionMode != ZSTD_lcm_uncompressed;
131260 +static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
131262 +    if (ZSTD_compressedLiterals(optPtr))
131263 +        optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel);
131264 +    optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel);
131265 +    optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel);
131266 +    optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel);
131270 +/* ZSTD_downscaleStat() :
131271 + * reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus)
131272 + * return the resulting sum of elements */
131273 +static U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus)
131275 +    U32 s, sum=0;
131276 +    DEBUGLOG(5, "ZSTD_downscaleStat (nbElts=%u)", (unsigned)lastEltIndex+1);
131277 +    assert(ZSTD_FREQ_DIV+malus > 0 && ZSTD_FREQ_DIV+malus < 31);
131278 +    for (s=0; s<lastEltIndex+1; s++) {
131279 +        table[s] = 1 + (table[s] >> (ZSTD_FREQ_DIV+malus));
131280 +        sum += table[s];
131281 +    }
131282 +    return sum;
131285 +/* ZSTD_rescaleFreqs() :
131286 + * if first block (detected by optPtr->litLengthSum == 0) : init statistics
131287 + *    take hints from dictionary if there is one
131288 + *    or init from zero, using src for literals stats, or flat 1 for match symbols
131289 + * otherwise downscale existing stats, to be used as seed for next block.
131290 + */
131291 +static void
131292 +ZSTD_rescaleFreqs(optState_t* const optPtr,
131293 +            const BYTE* const src, size_t const srcSize,
131294 +                  int const optLevel)
131296 +    int const compressedLiterals = ZSTD_compressedLiterals(optPtr);
131297 +    DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize);
131298 +    optPtr->priceType = zop_dynamic;
131300 +    if (optPtr->litLengthSum == 0) {  /* first block : init */
131301 +        if (srcSize <= ZSTD_PREDEF_THRESHOLD) {  /* heuristic */
131302 +            DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef");
131303 +            optPtr->priceType = zop_predef;
131304 +        }
131306 +        assert(optPtr->symbolCosts != NULL);
131307 +        if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) {
131308 +            /* huffman table presumed generated by dictionary */
131309 +            optPtr->priceType = zop_dynamic;
131311 +            if (compressedLiterals) {
131312 +                unsigned lit;
131313 +                assert(optPtr->litFreq != NULL);
131314 +                optPtr->litSum = 0;
131315 +                for (lit=0; lit<=MaxLit; lit++) {
131316 +                    U32 const scaleLog = 11;   /* scale to 2K */
131317 +                    U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit);
131318 +                    assert(bitCost <= scaleLog);
131319 +                    optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
131320 +                    optPtr->litSum += optPtr->litFreq[lit];
131321 +            }   }
131323 +            {   unsigned ll;
131324 +                FSE_CState_t llstate;
131325 +                FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable);
131326 +                optPtr->litLengthSum = 0;
131327 +                for (ll=0; ll<=MaxLL; ll++) {
131328 +                    U32 const scaleLog = 10;   /* scale to 1K */
131329 +                    U32 const bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll);
131330 +                    assert(bitCost < scaleLog);
131331 +                    optPtr->litLengthFreq[ll] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
131332 +                    optPtr->litLengthSum += optPtr->litLengthFreq[ll];
131333 +            }   }
131335 +            {   unsigned ml;
131336 +                FSE_CState_t mlstate;
131337 +                FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable);
131338 +                optPtr->matchLengthSum = 0;
131339 +                for (ml=0; ml<=MaxML; ml++) {
131340 +                    U32 const scaleLog = 10;
131341 +                    U32 const bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml);
131342 +                    assert(bitCost < scaleLog);
131343 +                    optPtr->matchLengthFreq[ml] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
131344 +                    optPtr->matchLengthSum += optPtr->matchLengthFreq[ml];
131345 +            }   }
131347 +            {   unsigned of;
131348 +                FSE_CState_t ofstate;
131349 +                FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable);
131350 +                optPtr->offCodeSum = 0;
131351 +                for (of=0; of<=MaxOff; of++) {
131352 +                    U32 const scaleLog = 10;
131353 +                    U32 const bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of);
131354 +                    assert(bitCost < scaleLog);
131355 +                    optPtr->offCodeFreq[of] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
131356 +                    optPtr->offCodeSum += optPtr->offCodeFreq[of];
131357 +            }   }
131359 +        } else {  /* not a dictionary */
131361 +            assert(optPtr->litFreq != NULL);
131362 +            if (compressedLiterals) {
131363 +                unsigned lit = MaxLit;
131364 +                HIST_count_simple(optPtr->litFreq, &lit, src, srcSize);   /* use raw first block to init statistics */
131365 +                optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
131366 +            }
131368 +            {   unsigned ll;
131369 +                for (ll=0; ll<=MaxLL; ll++)
131370 +                    optPtr->litLengthFreq[ll] = 1;
131371 +            }
131372 +            optPtr->litLengthSum = MaxLL+1;
131374 +            {   unsigned ml;
131375 +                for (ml=0; ml<=MaxML; ml++)
131376 +                    optPtr->matchLengthFreq[ml] = 1;
131377 +            }
131378 +            optPtr->matchLengthSum = MaxML+1;
131380 +            {   unsigned of;
131381 +                for (of=0; of<=MaxOff; of++)
131382 +                    optPtr->offCodeFreq[of] = 1;
131383 +            }
131384 +            optPtr->offCodeSum = MaxOff+1;
131386 +        }
131388 +    } else {   /* new block : re-use previous statistics, scaled down */
131390 +        if (compressedLiterals)
131391 +            optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
131392 +        optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0);
131393 +        optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0);
131394 +        optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0);
131395 +    }
131397 +    ZSTD_setBasePrices(optPtr, optLevel);
131400 +/* ZSTD_rawLiteralsCost() :
131401 + * price of literals (only) in specified segment (which length can be 0).
131402 + * does not include price of literalLength symbol */
131403 +static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
131404 +                                const optState_t* const optPtr,
131405 +                                int optLevel)
131407 +    if (litLength == 0) return 0;
131409 +    if (!ZSTD_compressedLiterals(optPtr))
131410 +        return (litLength << 3) * BITCOST_MULTIPLIER;  /* Uncompressed - 8 bytes per literal. */
131412 +    if (optPtr->priceType == zop_predef)
131413 +        return (litLength*6) * BITCOST_MULTIPLIER;  /* 6 bit per literal - no statistic used */
131415 +    /* dynamic statistics */
131416 +    {   U32 price = litLength * optPtr->litSumBasePrice;
131417 +        U32 u;
131418 +        for (u=0; u < litLength; u++) {
131419 +            assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice);   /* literal cost should never be negative */
131420 +            price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel);
131421 +        }
131422 +        return price;
131423 +    }
131426 +/* ZSTD_litLengthPrice() :
131427 + * cost of literalLength symbol */
131428 +static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel)
131430 +    if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel);
131432 +    /* dynamic statistics */
131433 +    {   U32 const llCode = ZSTD_LLcode(litLength);
131434 +        return (LL_bits[llCode] * BITCOST_MULTIPLIER)
131435 +             + optPtr->litLengthSumBasePrice
131436 +             - WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
131437 +    }
131440 +/* ZSTD_getMatchPrice() :
131441 + * Provides the cost of the match part (offset + matchLength) of a sequence
131442 + * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
131443 + * optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */
131444 +FORCE_INLINE_TEMPLATE U32
131445 +ZSTD_getMatchPrice(U32 const offset,
131446 +                   U32 const matchLength,
131447 +             const optState_t* const optPtr,
131448 +                   int const optLevel)
131450 +    U32 price;
131451 +    U32 const offCode = ZSTD_highbit32(offset+1);
131452 +    U32 const mlBase = matchLength - MINMATCH;
131453 +    assert(matchLength >= MINMATCH);
131455 +    if (optPtr->priceType == zop_predef)  /* fixed scheme, do not use statistics */
131456 +        return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER);
131458 +    /* dynamic statistics */
131459 +    price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel));
131460 +    if ((optLevel<2) /*static*/ && offCode >= 20)
131461 +        price += (offCode-19)*2 * BITCOST_MULTIPLIER; /* handicap for long distance offsets, favor decompression speed */
131463 +    /* match Length */
131464 +    {   U32 const mlCode = ZSTD_MLcode(mlBase);
131465 +        price += (ML_bits[mlCode] * BITCOST_MULTIPLIER) + (optPtr->matchLengthSumBasePrice - WEIGHT(optPtr->matchLengthFreq[mlCode], optLevel));
131466 +    }
131468 +    price += BITCOST_MULTIPLIER / 5;   /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */
131470 +    DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price);
131471 +    return price;
131474 +/* ZSTD_updateStats() :
131475 + * assumption : literals + litLengtn <= iend */
131476 +static void ZSTD_updateStats(optState_t* const optPtr,
131477 +                             U32 litLength, const BYTE* literals,
131478 +                             U32 offsetCode, U32 matchLength)
131480 +    /* literals */
131481 +    if (ZSTD_compressedLiterals(optPtr)) {
131482 +        U32 u;
131483 +        for (u=0; u < litLength; u++)
131484 +            optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
131485 +        optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
131486 +    }
131488 +    /* literal Length */
131489 +    {   U32 const llCode = ZSTD_LLcode(litLength);
131490 +        optPtr->litLengthFreq[llCode]++;
131491 +        optPtr->litLengthSum++;
131492 +    }
131494 +    /* match offset code (0-2=>repCode; 3+=>offset+2) */
131495 +    {   U32 const offCode = ZSTD_highbit32(offsetCode+1);
131496 +        assert(offCode <= MaxOff);
131497 +        optPtr->offCodeFreq[offCode]++;
131498 +        optPtr->offCodeSum++;
131499 +    }
131501 +    /* match Length */
131502 +    {   U32 const mlBase = matchLength - MINMATCH;
131503 +        U32 const mlCode = ZSTD_MLcode(mlBase);
131504 +        optPtr->matchLengthFreq[mlCode]++;
131505 +        optPtr->matchLengthSum++;
131506 +    }
131510 +/* ZSTD_readMINMATCH() :
131511 + * function safe only for comparisons
131512 + * assumption : memPtr must be at least 4 bytes before end of buffer */
131513 +MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
131515 +    switch (length)
131516 +    {
131517 +    default :
131518 +    case 4 : return MEM_read32(memPtr);
131519 +    case 3 : if (MEM_isLittleEndian())
131520 +                return MEM_read32(memPtr)<<8;
131521 +             else
131522 +                return MEM_read32(memPtr)>>8;
131523 +    }
131527 +/* Update hashTable3 up to ip (excluded)
131528 +   Assumption : always within prefix (i.e. not within extDict) */
131529 +static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms,
131530 +                                              U32* nextToUpdate3,
131531 +                                              const BYTE* const ip)
131533 +    U32* const hashTable3 = ms->hashTable3;
131534 +    U32 const hashLog3 = ms->hashLog3;
131535 +    const BYTE* const base = ms->window.base;
131536 +    U32 idx = *nextToUpdate3;
131537 +    U32 const target = (U32)(ip - base);
131538 +    size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3);
131539 +    assert(hashLog3 > 0);
131541 +    while(idx < target) {
131542 +        hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx;
131543 +        idx++;
131544 +    }
131546 +    *nextToUpdate3 = target;
131547 +    return hashTable3[hash3];
131551 +/*-*************************************
131552 +*  Binary Tree search
131553 +***************************************/
131554 +/** ZSTD_insertBt1() : add one or multiple positions to tree.
131555 + *  ip : assumed <= iend-8 .
131556 + * @return : nb of positions added */
131557 +static U32 ZSTD_insertBt1(
131558 +                ZSTD_matchState_t* ms,
131559 +                const BYTE* const ip, const BYTE* const iend,
131560 +                U32 const mls, const int extDict)
131562 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
131563 +    U32*   const hashTable = ms->hashTable;
131564 +    U32    const hashLog = cParams->hashLog;
131565 +    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
131566 +    U32*   const bt = ms->chainTable;
131567 +    U32    const btLog  = cParams->chainLog - 1;
131568 +    U32    const btMask = (1 << btLog) - 1;
131569 +    U32 matchIndex = hashTable[h];
131570 +    size_t commonLengthSmaller=0, commonLengthLarger=0;
131571 +    const BYTE* const base = ms->window.base;
131572 +    const BYTE* const dictBase = ms->window.dictBase;
131573 +    const U32 dictLimit = ms->window.dictLimit;
131574 +    const BYTE* const dictEnd = dictBase + dictLimit;
131575 +    const BYTE* const prefixStart = base + dictLimit;
131576 +    const BYTE* match;
131577 +    const U32 curr = (U32)(ip-base);
131578 +    const U32 btLow = btMask >= curr ? 0 : curr - btMask;
131579 +    U32* smallerPtr = bt + 2*(curr&btMask);
131580 +    U32* largerPtr  = smallerPtr + 1;
131581 +    U32 dummy32;   /* to be nullified at the end */
131582 +    U32 const windowLow = ms->window.lowLimit;
131583 +    U32 matchEndIdx = curr+8+1;
131584 +    size_t bestLength = 8;
131585 +    U32 nbCompares = 1U << cParams->searchLog;
131586 +#ifdef ZSTD_C_PREDICT
131587 +    U32 predictedSmall = *(bt + 2*((curr-1)&btMask) + 0);
131588 +    U32 predictedLarge = *(bt + 2*((curr-1)&btMask) + 1);
131589 +    predictedSmall += (predictedSmall>0);
131590 +    predictedLarge += (predictedLarge>0);
131591 +#endif /* ZSTD_C_PREDICT */
131593 +    DEBUGLOG(8, "ZSTD_insertBt1 (%u)", curr);
131595 +    assert(ip <= iend-8);   /* required for h calculation */
131596 +    hashTable[h] = curr;   /* Update Hash Table */
131598 +    assert(windowLow > 0);
131599 +    while (nbCompares-- && (matchIndex >= windowLow)) {
131600 +        U32* const nextPtr = bt + 2*(matchIndex & btMask);
131601 +        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
131602 +        assert(matchIndex < curr);
131604 +#ifdef ZSTD_C_PREDICT   /* note : can create issues when hlog small <= 11 */
131605 +        const U32* predictPtr = bt + 2*((matchIndex-1) & btMask);   /* written this way, as bt is a roll buffer */
131606 +        if (matchIndex == predictedSmall) {
131607 +            /* no need to check length, result known */
131608 +            *smallerPtr = matchIndex;
131609 +            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
131610 +            smallerPtr = nextPtr+1;               /* new "smaller" => larger of match */
131611 +            matchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
131612 +            predictedSmall = predictPtr[1] + (predictPtr[1]>0);
131613 +            continue;
131614 +        }
131615 +        if (matchIndex == predictedLarge) {
131616 +            *largerPtr = matchIndex;
131617 +            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
131618 +            largerPtr = nextPtr;
131619 +            matchIndex = nextPtr[0];
131620 +            predictedLarge = predictPtr[0] + (predictPtr[0]>0);
131621 +            continue;
131622 +        }
131623 +#endif
131625 +        if (!extDict || (matchIndex+matchLength >= dictLimit)) {
131626 +            assert(matchIndex+matchLength >= dictLimit);   /* might be wrong if actually extDict */
131627 +            match = base + matchIndex;
131628 +            matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
131629 +        } else {
131630 +            match = dictBase + matchIndex;
131631 +            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
131632 +            if (matchIndex+matchLength >= dictLimit)
131633 +                match = base + matchIndex;   /* to prepare for next usage of match[matchLength] */
131634 +        }
131636 +        if (matchLength > bestLength) {
131637 +            bestLength = matchLength;
131638 +            if (matchLength > matchEndIdx - matchIndex)
131639 +                matchEndIdx = matchIndex + (U32)matchLength;
131640 +        }
131642 +        if (ip+matchLength == iend) {   /* equal : no way to know if inf or sup */
131643 +            break;   /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
131644 +        }
131646 +        if (match[matchLength] < ip[matchLength]) {  /* necessarily within buffer */
131647 +            /* match is smaller than current */
131648 +            *smallerPtr = matchIndex;             /* update smaller idx */
131649 +            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
131650 +            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
131651 +            smallerPtr = nextPtr+1;               /* new "candidate" => larger than match, which was smaller than target */
131652 +            matchIndex = nextPtr[1];              /* new matchIndex, larger than previous and closer to current */
131653 +        } else {
131654 +            /* match is larger than current */
131655 +            *largerPtr = matchIndex;
131656 +            commonLengthLarger = matchLength;
131657 +            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
131658 +            largerPtr = nextPtr;
131659 +            matchIndex = nextPtr[0];
131660 +    }   }
131662 +    *smallerPtr = *largerPtr = 0;
131663 +    {   U32 positions = 0;
131664 +        if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384));   /* speed optimization */
131665 +        assert(matchEndIdx > curr + 8);
131666 +        return MAX(positions, matchEndIdx - (curr + 8));
131667 +    }
131670 +FORCE_INLINE_TEMPLATE
131671 +void ZSTD_updateTree_internal(
131672 +                ZSTD_matchState_t* ms,
131673 +                const BYTE* const ip, const BYTE* const iend,
131674 +                const U32 mls, const ZSTD_dictMode_e dictMode)
131676 +    const BYTE* const base = ms->window.base;
131677 +    U32 const target = (U32)(ip - base);
131678 +    U32 idx = ms->nextToUpdate;
131679 +    DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u  (dictMode:%u)",
131680 +                idx, target, dictMode);
131682 +    while(idx < target) {
131683 +        U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict);
131684 +        assert(idx < (U32)(idx + forward));
131685 +        idx += forward;
131686 +    }
131687 +    assert((size_t)(ip - base) <= (size_t)(U32)(-1));
131688 +    assert((size_t)(iend - base) <= (size_t)(U32)(-1));
131689 +    ms->nextToUpdate = target;
131692 +void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {
131693 +    ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict);
131696 +FORCE_INLINE_TEMPLATE
131697 +U32 ZSTD_insertBtAndGetAllMatches (
131698 +                    ZSTD_match_t* matches,   /* store result (found matches) in this table (presumed large enough) */
131699 +                    ZSTD_matchState_t* ms,
131700 +                    U32* nextToUpdate3,
131701 +                    const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode,
131702 +                    const U32 rep[ZSTD_REP_NUM],
131703 +                    U32 const ll0,   /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
131704 +                    const U32 lengthToBeat,
131705 +                    U32 const mls /* template */)
131707 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
131708 +    U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
131709 +    const BYTE* const base = ms->window.base;
131710 +    U32 const curr = (U32)(ip-base);
131711 +    U32 const hashLog = cParams->hashLog;
131712 +    U32 const minMatch = (mls==3) ? 3 : 4;
131713 +    U32* const hashTable = ms->hashTable;
131714 +    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
131715 +    U32 matchIndex  = hashTable[h];
131716 +    U32* const bt   = ms->chainTable;
131717 +    U32 const btLog = cParams->chainLog - 1;
131718 +    U32 const btMask= (1U << btLog) - 1;
131719 +    size_t commonLengthSmaller=0, commonLengthLarger=0;
131720 +    const BYTE* const dictBase = ms->window.dictBase;
131721 +    U32 const dictLimit = ms->window.dictLimit;
131722 +    const BYTE* const dictEnd = dictBase + dictLimit;
131723 +    const BYTE* const prefixStart = base + dictLimit;
131724 +    U32 const btLow = (btMask >= curr) ? 0 : curr - btMask;
131725 +    U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
131726 +    U32 const matchLow = windowLow ? windowLow : 1;
131727 +    U32* smallerPtr = bt + 2*(curr&btMask);
131728 +    U32* largerPtr  = bt + 2*(curr&btMask) + 1;
131729 +    U32 matchEndIdx = curr+8+1;   /* farthest referenced position of any match => detects repetitive patterns */
131730 +    U32 dummy32;   /* to be nullified at the end */
131731 +    U32 mnum = 0;
131732 +    U32 nbCompares = 1U << cParams->searchLog;
131734 +    const ZSTD_matchState_t* dms    = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL;
131735 +    const ZSTD_compressionParameters* const dmsCParams =
131736 +                                      dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL;
131737 +    const BYTE* const dmsBase       = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL;
131738 +    const BYTE* const dmsEnd        = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL;
131739 +    U32         const dmsHighLimit  = dictMode == ZSTD_dictMatchState ? (U32)(dmsEnd - dmsBase) : 0;
131740 +    U32         const dmsLowLimit   = dictMode == ZSTD_dictMatchState ? dms->window.lowLimit : 0;
131741 +    U32         const dmsIndexDelta = dictMode == ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0;
131742 +    U32         const dmsHashLog    = dictMode == ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog;
131743 +    U32         const dmsBtLog      = dictMode == ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog;
131744 +    U32         const dmsBtMask     = dictMode == ZSTD_dictMatchState ? (1U << dmsBtLog) - 1 : 0;
131745 +    U32         const dmsBtLow      = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit;
131747 +    size_t bestLength = lengthToBeat-1;
131748 +    DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", curr);
131750 +    /* check repCode */
131751 +    assert(ll0 <= 1);   /* necessarily 1 or 0 */
131752 +    {   U32 const lastR = ZSTD_REP_NUM + ll0;
131753 +        U32 repCode;
131754 +        for (repCode = ll0; repCode < lastR; repCode++) {
131755 +            U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
131756 +            U32 const repIndex = curr - repOffset;
131757 +            U32 repLen = 0;
131758 +            assert(curr >= dictLimit);
131759 +            if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < curr-dictLimit) {  /* equivalent to `curr > repIndex >= dictLimit` */
131760 +                /* We must validate the repcode offset because when we're using a dictionary the
131761 +                 * valid offset range shrinks when the dictionary goes out of bounds.
131762 +                 */
131763 +                if ((repIndex >= windowLow) & (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch))) {
131764 +                    repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch;
131765 +                }
131766 +            } else {  /* repIndex < dictLimit || repIndex >= curr */
131767 +                const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ?
131768 +                                             dmsBase + repIndex - dmsIndexDelta :
131769 +                                             dictBase + repIndex;
131770 +                assert(curr >= windowLow);
131771 +                if ( dictMode == ZSTD_extDict
131772 +                  && ( ((repOffset-1) /*intentional overflow*/ < curr - windowLow)  /* equivalent to `curr > repIndex >= windowLow` */
131773 +                     & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
131774 +                  && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
131775 +                    repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
131776 +                }
131777 +                if (dictMode == ZSTD_dictMatchState
131778 +                  && ( ((repOffset-1) /*intentional overflow*/ < curr - (dmsLowLimit + dmsIndexDelta))  /* equivalent to `curr > repIndex >= dmsLowLimit` */
131779 +                     & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */
131780 +                  && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
131781 +                    repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch;
131782 +            }   }
131783 +            /* save longer solution */
131784 +            if (repLen > bestLength) {
131785 +                DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u",
131786 +                            repCode, ll0, repOffset, repLen);
131787 +                bestLength = repLen;
131788 +                matches[mnum].off = repCode - ll0;
131789 +                matches[mnum].len = (U32)repLen;
131790 +                mnum++;
131791 +                if ( (repLen > sufficient_len)
131792 +                   | (ip+repLen == iLimit) ) {  /* best possible */
131793 +                    return mnum;
131794 +    }   }   }   }
131796 +    /* HC3 match finder */
131797 +    if ((mls == 3) /*static*/ && (bestLength < mls)) {
131798 +        U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip);
131799 +        if ((matchIndex3 >= matchLow)
131800 +          & (curr - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
131801 +            size_t mlen;
131802 +            if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) {
131803 +                const BYTE* const match = base + matchIndex3;
131804 +                mlen = ZSTD_count(ip, match, iLimit);
131805 +            } else {
131806 +                const BYTE* const match = dictBase + matchIndex3;
131807 +                mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart);
131808 +            }
131810 +            /* save best solution */
131811 +            if (mlen >= mls /* == 3 > bestLength */) {
131812 +                DEBUGLOG(8, "found small match with hlog3, of length %u",
131813 +                            (U32)mlen);
131814 +                bestLength = mlen;
131815 +                assert(curr > matchIndex3);
131816 +                assert(mnum==0);  /* no prior solution */
131817 +                matches[0].off = (curr - matchIndex3) + ZSTD_REP_MOVE;
131818 +                matches[0].len = (U32)mlen;
131819 +                mnum = 1;
131820 +                if ( (mlen > sufficient_len) |
131821 +                     (ip+mlen == iLimit) ) {  /* best possible length */
131822 +                    ms->nextToUpdate = curr+1;  /* skip insertion */
131823 +                    return 1;
131824 +        }   }   }
131825 +        /* no dictMatchState lookup: dicts don't have a populated HC3 table */
131826 +    }
131828 +    hashTable[h] = curr;   /* Update Hash Table */
131830 +    while (nbCompares-- && (matchIndex >= matchLow)) {
131831 +        U32* const nextPtr = bt + 2*(matchIndex & btMask);
131832 +        const BYTE* match;
131833 +        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
131834 +        assert(curr > matchIndex);
131836 +        if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) {
131837 +            assert(matchIndex+matchLength >= dictLimit);  /* ensure the condition is correct when !extDict */
131838 +            match = base + matchIndex;
131839 +            if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0);  /* ensure early section of match is equal as expected */
131840 +            matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);
131841 +        } else {
131842 +            match = dictBase + matchIndex;
131843 +            assert(memcmp(match, ip, matchLength) == 0);  /* ensure early section of match is equal as expected */
131844 +            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
131845 +            if (matchIndex+matchLength >= dictLimit)
131846 +                match = base + matchIndex;   /* prepare for match[matchLength] read */
131847 +        }
131849 +        if (matchLength > bestLength) {
131850 +            DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)",
131851 +                    (U32)matchLength, curr - matchIndex, curr - matchIndex + ZSTD_REP_MOVE);
131852 +            assert(matchEndIdx > matchIndex);
131853 +            if (matchLength > matchEndIdx - matchIndex)
131854 +                matchEndIdx = matchIndex + (U32)matchLength;
131855 +            bestLength = matchLength;
131856 +            matches[mnum].off = (curr - matchIndex) + ZSTD_REP_MOVE;
131857 +            matches[mnum].len = (U32)matchLength;
131858 +            mnum++;
131859 +            if ( (matchLength > ZSTD_OPT_NUM)
131860 +               | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
131861 +                if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */
131862 +                break; /* drop, to preserve bt consistency (miss a little bit of compression) */
131863 +            }
131864 +        }
131866 +        if (match[matchLength] < ip[matchLength]) {
131867 +            /* match smaller than current */
131868 +            *smallerPtr = matchIndex;             /* update smaller idx */
131869 +            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
131870 +            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
131871 +            smallerPtr = nextPtr+1;               /* new candidate => larger than match, which was smaller than current */
131872 +            matchIndex = nextPtr[1];              /* new matchIndex, larger than previous, closer to current */
131873 +        } else {
131874 +            *largerPtr = matchIndex;
131875 +            commonLengthLarger = matchLength;
131876 +            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
131877 +            largerPtr = nextPtr;
131878 +            matchIndex = nextPtr[0];
131879 +    }   }
131881 +    *smallerPtr = *largerPtr = 0;
131883 +    if (dictMode == ZSTD_dictMatchState && nbCompares) {
131884 +        size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls);
131885 +        U32 dictMatchIndex = dms->hashTable[dmsH];
131886 +        const U32* const dmsBt = dms->chainTable;
131887 +        commonLengthSmaller = commonLengthLarger = 0;
131888 +        while (nbCompares-- && (dictMatchIndex > dmsLowLimit)) {
131889 +            const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask);
131890 +            size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
131891 +            const BYTE* match = dmsBase + dictMatchIndex;
131892 +            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dmsEnd, prefixStart);
131893 +            if (dictMatchIndex+matchLength >= dmsHighLimit)
131894 +                match = base + dictMatchIndex + dmsIndexDelta;   /* to prepare for next usage of match[matchLength] */
131896 +            if (matchLength > bestLength) {
131897 +                matchIndex = dictMatchIndex + dmsIndexDelta;
131898 +                DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)",
131899 +                        (U32)matchLength, curr - matchIndex, curr - matchIndex + ZSTD_REP_MOVE);
131900 +                if (matchLength > matchEndIdx - matchIndex)
131901 +                    matchEndIdx = matchIndex + (U32)matchLength;
131902 +                bestLength = matchLength;
131903 +                matches[mnum].off = (curr - matchIndex) + ZSTD_REP_MOVE;
131904 +                matches[mnum].len = (U32)matchLength;
131905 +                mnum++;
131906 +                if ( (matchLength > ZSTD_OPT_NUM)
131907 +                   | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
131908 +                    break;   /* drop, to guarantee consistency (miss a little bit of compression) */
131909 +                }
131910 +            }
131912 +            if (dictMatchIndex <= dmsBtLow) { break; }   /* beyond tree size, stop the search */
131913 +            if (match[matchLength] < ip[matchLength]) {
131914 +                commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
131915 +                dictMatchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
131916 +            } else {
131917 +                /* match is larger than current */
131918 +                commonLengthLarger = matchLength;
131919 +                dictMatchIndex = nextPtr[0];
131920 +            }
131921 +        }
131922 +    }
131924 +    assert(matchEndIdx > curr+8);
131925 +    ms->nextToUpdate = matchEndIdx - 8;  /* skip repetitive patterns */
131926 +    return mnum;
131930 +FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches (
131931 +                        ZSTD_match_t* matches,   /* store result (match found, increasing size) in this table */
131932 +                        ZSTD_matchState_t* ms,
131933 +                        U32* nextToUpdate3,
131934 +                        const BYTE* ip, const BYTE* const iHighLimit, const ZSTD_dictMode_e dictMode,
131935 +                        const U32 rep[ZSTD_REP_NUM],
131936 +                        U32 const ll0,
131937 +                        U32 const lengthToBeat)
131939 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
131940 +    U32 const matchLengthSearch = cParams->minMatch;
131941 +    DEBUGLOG(8, "ZSTD_BtGetAllMatches");
131942 +    if (ip < ms->window.base + ms->nextToUpdate) return 0;   /* skipped area */
131943 +    ZSTD_updateTree_internal(ms, ip, iHighLimit, matchLengthSearch, dictMode);
131944 +    switch(matchLengthSearch)
131945 +    {
131946 +    case 3 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 3);
131947 +    default :
131948 +    case 4 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 4);
131949 +    case 5 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 5);
131950 +    case 7 :
131951 +    case 6 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 6);
131952 +    }
131955 +/*************************
131956 +*  LDM helper functions  *
131957 +*************************/
131959 +/* Struct containing info needed to make decision about ldm inclusion */
131960 +typedef struct {
131961 +    rawSeqStore_t seqStore;         /* External match candidates store for this block */
131962 +    U32 startPosInBlock;            /* Start position of the current match candidate */
131963 +    U32 endPosInBlock;              /* End position of the current match candidate */
131964 +    U32 offset;                     /* Offset of the match candidate */
131965 +} ZSTD_optLdm_t;
131967 +/* ZSTD_optLdm_skipRawSeqStoreBytes():
131968 + * Moves forward in rawSeqStore by nbBytes, which will update the fields 'pos' and 'posInSequence'.
131969 + */
131970 +static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
131971 +    U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
131972 +    while (currPos && rawSeqStore->pos < rawSeqStore->size) {
131973 +        rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
131974 +        if (currPos >= currSeq.litLength + currSeq.matchLength) {
131975 +            currPos -= currSeq.litLength + currSeq.matchLength;
131976 +            rawSeqStore->pos++;
131977 +        } else {
131978 +            rawSeqStore->posInSequence = currPos;
131979 +            break;
131980 +        }
131981 +    }
131982 +    if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
131983 +        rawSeqStore->posInSequence = 0;
131984 +    }
131987 +/* ZSTD_opt_getNextMatchAndUpdateSeqStore():
131988 + * Calculates the beginning and end of the next match in the current block.
131989 + * Updates 'pos' and 'posInSequence' of the ldmSeqStore.
131990 + */
131991 +static void ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock,
131992 +                                                   U32 blockBytesRemaining) {
131993 +    rawSeq currSeq;
131994 +    U32 currBlockEndPos;
131995 +    U32 literalsBytesRemaining;
131996 +    U32 matchBytesRemaining;
131998 +    /* Setting match end position to MAX to ensure we never use an LDM during this block */
131999 +    if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
132000 +        optLdm->startPosInBlock = UINT_MAX;
132001 +        optLdm->endPosInBlock = UINT_MAX;
132002 +        return;
132003 +    }
132004 +    /* Calculate appropriate bytes left in matchLength and litLength after adjusting
132005 +       based on ldmSeqStore->posInSequence */
132006 +    currSeq = optLdm->seqStore.seq[optLdm->seqStore.pos];
132007 +    assert(optLdm->seqStore.posInSequence <= currSeq.litLength + currSeq.matchLength);
132008 +    currBlockEndPos = currPosInBlock + blockBytesRemaining;
132009 +    literalsBytesRemaining = (optLdm->seqStore.posInSequence < currSeq.litLength) ?
132010 +            currSeq.litLength - (U32)optLdm->seqStore.posInSequence :
132011 +            0;
132012 +    matchBytesRemaining = (literalsBytesRemaining == 0) ?
132013 +            currSeq.matchLength - ((U32)optLdm->seqStore.posInSequence - currSeq.litLength) :
132014 +            currSeq.matchLength;
132016 +    /* If there are more literal bytes than bytes remaining in block, no ldm is possible */
132017 +    if (literalsBytesRemaining >= blockBytesRemaining) {
132018 +        optLdm->startPosInBlock = UINT_MAX;
132019 +        optLdm->endPosInBlock = UINT_MAX;
132020 +        ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, blockBytesRemaining);
132021 +        return;
132022 +    }
132024 +    /* Matches may be < MINMATCH by this process. In that case, we will reject them
132025 +       when we are deciding whether or not to add the ldm */
132026 +    optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining;
132027 +    optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining;
132028 +    optLdm->offset = currSeq.offset;
132030 +    if (optLdm->endPosInBlock > currBlockEndPos) {
132031 +        /* Match ends after the block ends, we can't use the whole match */
132032 +        optLdm->endPosInBlock = currBlockEndPos;
132033 +        ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, currBlockEndPos - currPosInBlock);
132034 +    } else {
132035 +        /* Consume nb of bytes equal to size of sequence left */
132036 +        ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, literalsBytesRemaining + matchBytesRemaining);
132037 +    }
132040 +/* ZSTD_optLdm_maybeAddMatch():
132041 + * Adds a match if it's long enough, based on it's 'matchStartPosInBlock'
132042 + * and 'matchEndPosInBlock', into 'matches'. Maintains the correct ordering of 'matches'
132043 + */
132044 +static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
132045 +                                      ZSTD_optLdm_t* optLdm, U32 currPosInBlock) {
132046 +    U32 posDiff = currPosInBlock - optLdm->startPosInBlock;
132047 +    /* Note: ZSTD_match_t actually contains offCode and matchLength (before subtracting MINMATCH) */
132048 +    U32 candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff;
132049 +    U32 candidateOffCode = optLdm->offset + ZSTD_REP_MOVE;
132051 +    /* Ensure that current block position is not outside of the match */
132052 +    if (currPosInBlock < optLdm->startPosInBlock
132053 +      || currPosInBlock >= optLdm->endPosInBlock
132054 +      || candidateMatchLength < MINMATCH) {
132055 +        return;
132056 +    }
132058 +    if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) {
132059 +        DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offCode: %u matchLength %u) at block position=%u",
132060 +                 candidateOffCode, candidateMatchLength, currPosInBlock);
132061 +        matches[*nbMatches].len = candidateMatchLength;
132062 +        matches[*nbMatches].off = candidateOffCode;
132063 +        (*nbMatches)++;
132064 +    }
132067 +/* ZSTD_optLdm_processMatchCandidate():
132068 + * Wrapper function to update ldm seq store and call ldm functions as necessary.
132069 + */
132070 +static void ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, ZSTD_match_t* matches, U32* nbMatches,
132071 +                                              U32 currPosInBlock, U32 remainingBytes) {
132072 +    if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
132073 +        return;
132074 +    }
132076 +    if (currPosInBlock >= optLdm->endPosInBlock) {
132077 +        if (currPosInBlock > optLdm->endPosInBlock) {
132078 +            /* The position at which ZSTD_optLdm_processMatchCandidate() is called is not necessarily
132079 +             * at the end of a match from the ldm seq store, and will often be some bytes
132080 +             * over beyond matchEndPosInBlock. As such, we need to correct for these "overshoots"
132081 +             */
132082 +            U32 posOvershoot = currPosInBlock - optLdm->endPosInBlock;
132083 +            ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, posOvershoot);
132084 +        }
132085 +        ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes);
132086 +    }
132087 +    ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock);
132090 +/*-*******************************
132091 +*  Optimal parser
132092 +*********************************/
132095 +static U32 ZSTD_totalLen(ZSTD_optimal_t sol)
132097 +    return sol.litlen + sol.mlen;
132100 +#if 0 /* debug */
132102 +static void
132103 +listStats(const U32* table, int lastEltID)
132105 +    int const nbElts = lastEltID + 1;
132106 +    int enb;
132107 +    for (enb=0; enb < nbElts; enb++) {
132108 +        (void)table;
132109 +        /* RAWLOG(2, "%3i:%3i,  ", enb, table[enb]); */
132110 +        RAWLOG(2, "%4i,", table[enb]);
132111 +    }
132112 +    RAWLOG(2, " \n");
132115 +#endif
132117 +FORCE_INLINE_TEMPLATE size_t
132118 +ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
132119 +                               seqStore_t* seqStore,
132120 +                               U32 rep[ZSTD_REP_NUM],
132121 +                         const void* src, size_t srcSize,
132122 +                         const int optLevel,
132123 +                         const ZSTD_dictMode_e dictMode)
132125 +    optState_t* const optStatePtr = &ms->opt;
132126 +    const BYTE* const istart = (const BYTE*)src;
132127 +    const BYTE* ip = istart;
132128 +    const BYTE* anchor = istart;
132129 +    const BYTE* const iend = istart + srcSize;
132130 +    const BYTE* const ilimit = iend - 8;
132131 +    const BYTE* const base = ms->window.base;
132132 +    const BYTE* const prefixStart = base + ms->window.dictLimit;
132133 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
132135 +    U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
132136 +    U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4;
132137 +    U32 nextToUpdate3 = ms->nextToUpdate;
132139 +    ZSTD_optimal_t* const opt = optStatePtr->priceTable;
132140 +    ZSTD_match_t* const matches = optStatePtr->matchTable;
132141 +    ZSTD_optimal_t lastSequence;
132142 +    ZSTD_optLdm_t optLdm;
132144 +    optLdm.seqStore = ms->ldmSeqStore ? *ms->ldmSeqStore : kNullRawSeqStore;
132145 +    optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0;
132146 +    ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (U32)(ip-istart), (U32)(iend-ip));
132148 +    /* init */
132149 +    DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u",
132150 +                (U32)(ip - base), ms->window.dictLimit, ms->nextToUpdate);
132151 +    assert(optLevel <= 2);
132152 +    ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel);
132153 +    ip += (ip==prefixStart);
132155 +    /* Match Loop */
132156 +    while (ip < ilimit) {
132157 +        U32 cur, last_pos = 0;
132159 +        /* find first match */
132160 +        {   U32 const litlen = (U32)(ip - anchor);
132161 +            U32 const ll0 = !litlen;
132162 +            U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, ip, iend, dictMode, rep, ll0, minMatch);
132163 +            ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
132164 +                                              (U32)(ip-istart), (U32)(iend - ip));
132165 +            if (!nbMatches) { ip++; continue; }
132167 +            /* initialize opt[0] */
132168 +            { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
132169 +            opt[0].mlen = 0;  /* means is_a_literal */
132170 +            opt[0].litlen = litlen;
132171 +            /* We don't need to include the actual price of the literals because
132172 +             * it is static for the duration of the forward pass, and is included
132173 +             * in every price. We include the literal length to avoid negative
132174 +             * prices when we subtract the previous literal length.
132175 +             */
132176 +            opt[0].price = ZSTD_litLengthPrice(litlen, optStatePtr, optLevel);
132178 +            /* large match -> immediate encoding */
132179 +            {   U32 const maxML = matches[nbMatches-1].len;
132180 +                U32 const maxOffset = matches[nbMatches-1].off;
132181 +                DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series",
132182 +                            nbMatches, maxML, maxOffset, (U32)(ip-prefixStart));
132184 +                if (maxML > sufficient_len) {
132185 +                    lastSequence.litlen = litlen;
132186 +                    lastSequence.mlen = maxML;
132187 +                    lastSequence.off = maxOffset;
132188 +                    DEBUGLOG(6, "large match (%u>%u), immediate encoding",
132189 +                                maxML, sufficient_len);
132190 +                    cur = 0;
132191 +                    last_pos = ZSTD_totalLen(lastSequence);
132192 +                    goto _shortestPath;
132193 +            }   }
132195 +            /* set prices for first matches starting position == 0 */
132196 +            {   U32 const literalsPrice = opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
132197 +                U32 pos;
132198 +                U32 matchNb;
132199 +                for (pos = 1; pos < minMatch; pos++) {
132200 +                    opt[pos].price = ZSTD_MAX_PRICE;   /* mlen, litlen and price will be fixed during forward scanning */
132201 +                }
132202 +                for (matchNb = 0; matchNb < nbMatches; matchNb++) {
132203 +                    U32 const offset = matches[matchNb].off;
132204 +                    U32 const end = matches[matchNb].len;
132205 +                    for ( ; pos <= end ; pos++ ) {
132206 +                        U32 const matchPrice = ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel);
132207 +                        U32 const sequencePrice = literalsPrice + matchPrice;
132208 +                        DEBUGLOG(7, "rPos:%u => set initial price : %.2f",
132209 +                                    pos, ZSTD_fCost(sequencePrice));
132210 +                        opt[pos].mlen = pos;
132211 +                        opt[pos].off = offset;
132212 +                        opt[pos].litlen = litlen;
132213 +                        opt[pos].price = sequencePrice;
132214 +                }   }
132215 +                last_pos = pos-1;
132216 +            }
132217 +        }
132219 +        /* check further positions */
132220 +        for (cur = 1; cur <= last_pos; cur++) {
132221 +            const BYTE* const inr = ip + cur;
132222 +            assert(cur < ZSTD_OPT_NUM);
132223 +            DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur)
132225 +            /* Fix current position with one literal if cheaper */
132226 +            {   U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1;
132227 +                int const price = opt[cur-1].price
132228 +                                + ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
132229 +                                + ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
132230 +                                - ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
132231 +                assert(price < 1000000000); /* overflow check */
132232 +                if (price <= opt[cur].price) {
132233 +                    DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
132234 +                                inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
132235 +                                opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]);
132236 +                    opt[cur].mlen = 0;
132237 +                    opt[cur].off = 0;
132238 +                    opt[cur].litlen = litlen;
132239 +                    opt[cur].price = price;
132240 +                } else {
132241 +                    DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)",
132242 +                                inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price),
132243 +                                opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]);
132244 +                }
132245 +            }
132247 +            /* Set the repcodes of the current position. We must do it here
132248 +             * because we rely on the repcodes of the 2nd to last sequence being
132249 +             * correct to set the next chunks repcodes during the backward
132250 +             * traversal.
132251 +             */
132252 +            ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t));
132253 +            assert(cur >= opt[cur].mlen);
132254 +            if (opt[cur].mlen != 0) {
132255 +                U32 const prev = cur - opt[cur].mlen;
132256 +                repcodes_t newReps = ZSTD_updateRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0);
132257 +                ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t));
132258 +            } else {
132259 +                ZSTD_memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t));
132260 +            }
132262 +            /* last match must start at a minimum distance of 8 from oend */
132263 +            if (inr > ilimit) continue;
132265 +            if (cur == last_pos) break;
132267 +            if ( (optLevel==0) /*static_test*/
132268 +              && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) {
132269 +                DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1);
132270 +                continue;  /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
132271 +            }
132273 +            {   U32 const ll0 = (opt[cur].mlen != 0);
132274 +                U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;
132275 +                U32 const previousPrice = opt[cur].price;
132276 +                U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
132277 +                U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, inr, iend, dictMode, opt[cur].rep, ll0, minMatch);
132278 +                U32 matchNb;
132280 +                ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
132281 +                                                  (U32)(inr-istart), (U32)(iend-inr));
132283 +                if (!nbMatches) {
132284 +                    DEBUGLOG(7, "rPos:%u : no match found", cur);
132285 +                    continue;
132286 +                }
132288 +                {   U32 const maxML = matches[nbMatches-1].len;
132289 +                    DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u",
132290 +                                inr-istart, cur, nbMatches, maxML);
132292 +                    if ( (maxML > sufficient_len)
132293 +                      || (cur + maxML >= ZSTD_OPT_NUM) ) {
132294 +                        lastSequence.mlen = maxML;
132295 +                        lastSequence.off = matches[nbMatches-1].off;
132296 +                        lastSequence.litlen = litlen;
132297 +                        cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0;  /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */
132298 +                        last_pos = cur + ZSTD_totalLen(lastSequence);
132299 +                        if (cur > ZSTD_OPT_NUM) cur = 0;   /* underflow => first match */
132300 +                        goto _shortestPath;
132301 +                }   }
132303 +                /* set prices using matches found at position == cur */
132304 +                for (matchNb = 0; matchNb < nbMatches; matchNb++) {
132305 +                    U32 const offset = matches[matchNb].off;
132306 +                    U32 const lastML = matches[matchNb].len;
132307 +                    U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
132308 +                    U32 mlen;
132310 +                    DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u",
132311 +                                matchNb, matches[matchNb].off, lastML, litlen);
132313 +                    for (mlen = lastML; mlen >= startML; mlen--) {  /* scan downward */
132314 +                        U32 const pos = cur + mlen;
132315 +                        int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
132317 +                        if ((pos > last_pos) || (price < opt[pos].price)) {
132318 +                            DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)",
132319 +                                        pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
132320 +                            while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; }   /* fill empty positions */
132321 +                            opt[pos].mlen = mlen;
132322 +                            opt[pos].off = offset;
132323 +                            opt[pos].litlen = litlen;
132324 +                            opt[pos].price = price;
132325 +                        } else {
132326 +                            DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)",
132327 +                                        pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
132328 +                            if (optLevel==0) break;  /* early update abort; gets ~+10% speed for about -0.01 ratio loss */
132329 +                        }
132330 +            }   }   }
132331 +        }  /* for (cur = 1; cur <= last_pos; cur++) */
132333 +        lastSequence = opt[last_pos];
132334 +        cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0;  /* single sequence, and it starts before `ip` */
132335 +        assert(cur < ZSTD_OPT_NUM);  /* control overflow*/
132337 +_shortestPath:   /* cur, last_pos, best_mlen, best_off have to be set */
132338 +        assert(opt[0].mlen == 0);
132340 +        /* Set the next chunk's repcodes based on the repcodes of the beginning
132341 +         * of the last match, and the last sequence. This avoids us having to
132342 +         * update them while traversing the sequences.
132343 +         */
132344 +        if (lastSequence.mlen != 0) {
132345 +            repcodes_t reps = ZSTD_updateRep(opt[cur].rep, lastSequence.off, lastSequence.litlen==0);
132346 +            ZSTD_memcpy(rep, &reps, sizeof(reps));
132347 +        } else {
132348 +            ZSTD_memcpy(rep, opt[cur].rep, sizeof(repcodes_t));
132349 +        }
132351 +        {   U32 const storeEnd = cur + 1;
132352 +            U32 storeStart = storeEnd;
132353 +            U32 seqPos = cur;
132355 +            DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)",
132356 +                        last_pos, cur); (void)last_pos;
132357 +            assert(storeEnd < ZSTD_OPT_NUM);
132358 +            DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
132359 +                        storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off);
132360 +            opt[storeEnd] = lastSequence;
132361 +            while (seqPos > 0) {
132362 +                U32 const backDist = ZSTD_totalLen(opt[seqPos]);
132363 +                storeStart--;
132364 +                DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
132365 +                            seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off);
132366 +                opt[storeStart] = opt[seqPos];
132367 +                seqPos = (seqPos > backDist) ? seqPos - backDist : 0;
132368 +            }
132370 +            /* save sequences */
132371 +            DEBUGLOG(6, "sending selected sequences into seqStore")
132372 +            {   U32 storePos;
132373 +                for (storePos=storeStart; storePos <= storeEnd; storePos++) {
132374 +                    U32 const llen = opt[storePos].litlen;
132375 +                    U32 const mlen = opt[storePos].mlen;
132376 +                    U32 const offCode = opt[storePos].off;
132377 +                    U32 const advance = llen + mlen;
132378 +                    DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u",
132379 +                                anchor - istart, (unsigned)llen, (unsigned)mlen);
132381 +                    if (mlen==0) {  /* only literals => must be last "sequence", actually starting a new stream of sequences */
132382 +                        assert(storePos == storeEnd);   /* must be last sequence */
132383 +                        ip = anchor + llen;     /* last "sequence" is a bunch of literals => don't progress anchor */
132384 +                        continue;   /* will finish */
132385 +                    }
132387 +                    assert(anchor + llen <= iend);
132388 +                    ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
132389 +                    ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen-MINMATCH);
132390 +                    anchor += advance;
132391 +                    ip = anchor;
132392 +            }   }
132393 +            ZSTD_setBasePrices(optStatePtr, optLevel);
132394 +        }
132395 +    }   /* while (ip < ilimit) */
132397 +    /* Return the last literals size */
132398 +    return (size_t)(iend - anchor);
132402 +size_t ZSTD_compressBlock_btopt(
132403 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
132404 +        const void* src, size_t srcSize)
132406 +    DEBUGLOG(5, "ZSTD_compressBlock_btopt");
132407 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_noDict);
132411 +/* used in 2-pass strategy */
132412 +static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus)
132414 +    U32 s, sum=0;
132415 +    assert(ZSTD_FREQ_DIV+bonus >= 0);
132416 +    for (s=0; s<lastEltIndex+1; s++) {
132417 +        table[s] <<= ZSTD_FREQ_DIV+bonus;
132418 +        table[s]--;
132419 +        sum += table[s];
132420 +    }
132421 +    return sum;
132424 +/* used in 2-pass strategy */
132425 +MEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr)
132427 +    if (ZSTD_compressedLiterals(optPtr))
132428 +        optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0);
132429 +    optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0);
132430 +    optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0);
132431 +    optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0);
132434 +/* ZSTD_initStats_ultra():
132435 + * make a first compression pass, just to seed stats with more accurate starting values.
132436 + * only works on first block, with no dictionary and no ldm.
132437 + * this function cannot error, hence its contract must be respected.
132438 + */
132439 +static void
132440 +ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
132441 +                     seqStore_t* seqStore,
132442 +                     U32 rep[ZSTD_REP_NUM],
132443 +               const void* src, size_t srcSize)
132445 +    U32 tmpRep[ZSTD_REP_NUM];  /* updated rep codes will sink here */
132446 +    ZSTD_memcpy(tmpRep, rep, sizeof(tmpRep));
132448 +    DEBUGLOG(4, "ZSTD_initStats_ultra (srcSize=%zu)", srcSize);
132449 +    assert(ms->opt.litLengthSum == 0);    /* first block */
132450 +    assert(seqStore->sequences == seqStore->sequencesStart);   /* no ldm */
132451 +    assert(ms->window.dictLimit == ms->window.lowLimit);   /* no dictionary */
132452 +    assert(ms->window.dictLimit - ms->nextToUpdate <= 1);  /* no prefix (note: intentional overflow, defined as 2-complement) */
132454 +    ZSTD_compressBlock_opt_generic(ms, seqStore, tmpRep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);   /* generate stats into ms->opt*/
132456 +    /* invalidate first scan from history */
132457 +    ZSTD_resetSeqStore(seqStore);
132458 +    ms->window.base -= srcSize;
132459 +    ms->window.dictLimit += (U32)srcSize;
132460 +    ms->window.lowLimit = ms->window.dictLimit;
132461 +    ms->nextToUpdate = ms->window.dictLimit;
132463 +    /* re-inforce weight of collected statistics */
132464 +    ZSTD_upscaleStats(&ms->opt);
132467 +size_t ZSTD_compressBlock_btultra(
132468 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
132469 +        const void* src, size_t srcSize)
132471 +    DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize);
132472 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
132475 +size_t ZSTD_compressBlock_btultra2(
132476 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
132477 +        const void* src, size_t srcSize)
132479 +    U32 const curr = (U32)((const BYTE*)src - ms->window.base);
132480 +    DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize);
132482 +    /* 2-pass strategy:
132483 +     * this strategy makes a first pass over first block to collect statistics
132484 +     * and seed next round's statistics with it.
132485 +     * After 1st pass, function forgets everything, and starts a new block.
132486 +     * Consequently, this can only work if no data has been previously loaded in tables,
132487 +     * aka, no dictionary, no prefix, no ldm preprocessing.
132488 +     * The compression ratio gain is generally small (~0.5% on first block),
132489 +     * the cost is 2x cpu time on first block. */
132490 +    assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
132491 +    if ( (ms->opt.litLengthSum==0)   /* first block */
132492 +      && (seqStore->sequences == seqStore->sequencesStart)  /* no ldm */
132493 +      && (ms->window.dictLimit == ms->window.lowLimit)   /* no dictionary */
132494 +      && (curr == ms->window.dictLimit)   /* start of frame, nothing already loaded nor skipped */
132495 +      && (srcSize > ZSTD_PREDEF_THRESHOLD)
132496 +      ) {
132497 +        ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize);
132498 +    }
132500 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
132503 +size_t ZSTD_compressBlock_btopt_dictMatchState(
132504 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
132505 +        const void* src, size_t srcSize)
132507 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_dictMatchState);
132510 +size_t ZSTD_compressBlock_btultra_dictMatchState(
132511 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
132512 +        const void* src, size_t srcSize)
132514 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_dictMatchState);
132517 +size_t ZSTD_compressBlock_btopt_extDict(
132518 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
132519 +        const void* src, size_t srcSize)
132521 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_extDict);
132524 +size_t ZSTD_compressBlock_btultra_extDict(
132525 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
132526 +        const void* src, size_t srcSize)
132528 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_extDict);
132531 +/* note : no btultra2 variant for extDict nor dictMatchState,
132532 + * because btultra2 is not meant to work with dictionaries
132533 + * and is only specific for the first block (no prefix) */
132534 diff --git a/lib/zstd/compress/zstd_opt.h b/lib/zstd/compress/zstd_opt.h
132535 new file mode 100644
132536 index 000000000000..22b862858ba7
132537 --- /dev/null
132538 +++ b/lib/zstd/compress/zstd_opt.h
132539 @@ -0,0 +1,50 @@
132541 + * Copyright (c) Yann Collet, Facebook, Inc.
132542 + * All rights reserved.
132544 + * This source code is licensed under both the BSD-style license (found in the
132545 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
132546 + * in the COPYING file in the root directory of this source tree).
132547 + * You may select, at your option, one of the above-listed licenses.
132548 + */
132550 +#ifndef ZSTD_OPT_H
132551 +#define ZSTD_OPT_H
132554 +#include "zstd_compress_internal.h"
132556 +/* used in ZSTD_loadDictionaryContent() */
132557 +void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend);
132559 +size_t ZSTD_compressBlock_btopt(
132560 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
132561 +        void const* src, size_t srcSize);
132562 +size_t ZSTD_compressBlock_btultra(
132563 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
132564 +        void const* src, size_t srcSize);
132565 +size_t ZSTD_compressBlock_btultra2(
132566 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
132567 +        void const* src, size_t srcSize);
132570 +size_t ZSTD_compressBlock_btopt_dictMatchState(
132571 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
132572 +        void const* src, size_t srcSize);
132573 +size_t ZSTD_compressBlock_btultra_dictMatchState(
132574 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
132575 +        void const* src, size_t srcSize);
132577 +size_t ZSTD_compressBlock_btopt_extDict(
132578 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
132579 +        void const* src, size_t srcSize);
132580 +size_t ZSTD_compressBlock_btultra_extDict(
132581 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
132582 +        void const* src, size_t srcSize);
132584 +        /* note : no btultra2 variant for extDict nor dictMatchState,
132585 +         * because btultra2 is not meant to work with dictionaries
132586 +         * and is only specific for the first block (no prefix) */
132589 +#endif /* ZSTD_OPT_H */
132590 diff --git a/lib/zstd/decompress.c b/lib/zstd/decompress.c
132591 deleted file mode 100644
132592 index 66cd487a326a..000000000000
132593 --- a/lib/zstd/decompress.c
132594 +++ /dev/null
132595 @@ -1,2531 +0,0 @@
132597 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
132598 - * All rights reserved.
132600 - * This source code is licensed under the BSD-style license found in the
132601 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
132602 - * An additional grant of patent rights can be found in the PATENTS file in the
132603 - * same directory.
132605 - * This program is free software; you can redistribute it and/or modify it under
132606 - * the terms of the GNU General Public License version 2 as published by the
132607 - * Free Software Foundation. This program is dual-licensed; you may select
132608 - * either version 2 of the GNU General Public License ("GPL") or BSD license
132609 - * ("BSD").
132610 - */
132612 -/* ***************************************************************
132613 -*  Tuning parameters
132614 -*****************************************************************/
132616 -*  MAXWINDOWSIZE_DEFAULT :
132617 -*  maximum window size accepted by DStream, by default.
132618 -*  Frames requiring more memory will be rejected.
132620 -#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
132621 -#define ZSTD_MAXWINDOWSIZE_DEFAULT ((1 << ZSTD_WINDOWLOG_MAX) + 1) /* defined within zstd.h */
132622 -#endif
132624 -/*-*******************************************************
132625 -*  Dependencies
132626 -*********************************************************/
132627 -#include "fse.h"
132628 -#include "huf.h"
132629 -#include "mem.h" /* low level memory routines */
132630 -#include "zstd_internal.h"
132631 -#include <linux/kernel.h>
132632 -#include <linux/module.h>
132633 -#include <linux/string.h> /* memcpy, memmove, memset */
132635 -#define ZSTD_PREFETCH(ptr) __builtin_prefetch(ptr, 0, 0)
132637 -/*-*************************************
132638 -*  Macros
132639 -***************************************/
132640 -#define ZSTD_isError ERR_isError /* for inlining */
132641 -#define FSE_isError ERR_isError
132642 -#define HUF_isError ERR_isError
132644 -/*_*******************************************************
132645 -*  Memory operations
132646 -**********************************************************/
132647 -static void ZSTD_copy4(void *dst, const void *src) { memcpy(dst, src, 4); }
132649 -/*-*************************************************************
132650 -*   Context management
132651 -***************************************************************/
132652 -typedef enum {
132653 -       ZSTDds_getFrameHeaderSize,
132654 -       ZSTDds_decodeFrameHeader,
132655 -       ZSTDds_decodeBlockHeader,
132656 -       ZSTDds_decompressBlock,
132657 -       ZSTDds_decompressLastBlock,
132658 -       ZSTDds_checkChecksum,
132659 -       ZSTDds_decodeSkippableHeader,
132660 -       ZSTDds_skipFrame
132661 -} ZSTD_dStage;
132663 -typedef struct {
132664 -       FSE_DTable LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
132665 -       FSE_DTable OFTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
132666 -       FSE_DTable MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
132667 -       HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */
132668 -       U64 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32 / 2];
132669 -       U32 rep[ZSTD_REP_NUM];
132670 -} ZSTD_entropyTables_t;
132672 -struct ZSTD_DCtx_s {
132673 -       const FSE_DTable *LLTptr;
132674 -       const FSE_DTable *MLTptr;
132675 -       const FSE_DTable *OFTptr;
132676 -       const HUF_DTable *HUFptr;
132677 -       ZSTD_entropyTables_t entropy;
132678 -       const void *previousDstEnd; /* detect continuity */
132679 -       const void *base;          /* start of curr segment */
132680 -       const void *vBase;        /* virtual start of previous segment if it was just before curr one */
132681 -       const void *dictEnd;    /* end of previous segment */
132682 -       size_t expected;
132683 -       ZSTD_frameParams fParams;
132684 -       blockType_e bType; /* used in ZSTD_decompressContinue(), to transfer blockType between header decoding and block decoding stages */
132685 -       ZSTD_dStage stage;
132686 -       U32 litEntropy;
132687 -       U32 fseEntropy;
132688 -       struct xxh64_state xxhState;
132689 -       size_t headerSize;
132690 -       U32 dictID;
132691 -       const BYTE *litPtr;
132692 -       ZSTD_customMem customMem;
132693 -       size_t litSize;
132694 -       size_t rleSize;
132695 -       BYTE litBuffer[ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH];
132696 -       BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
132697 -}; /* typedef'd to ZSTD_DCtx within "zstd.h" */
132699 -size_t ZSTD_DCtxWorkspaceBound(void) { return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_DCtx)); }
132701 -size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx)
132703 -       dctx->expected = ZSTD_frameHeaderSize_prefix;
132704 -       dctx->stage = ZSTDds_getFrameHeaderSize;
132705 -       dctx->previousDstEnd = NULL;
132706 -       dctx->base = NULL;
132707 -       dctx->vBase = NULL;
132708 -       dctx->dictEnd = NULL;
132709 -       dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
132710 -       dctx->litEntropy = dctx->fseEntropy = 0;
132711 -       dctx->dictID = 0;
132712 -       ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
132713 -       memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
132714 -       dctx->LLTptr = dctx->entropy.LLTable;
132715 -       dctx->MLTptr = dctx->entropy.MLTable;
132716 -       dctx->OFTptr = dctx->entropy.OFTable;
132717 -       dctx->HUFptr = dctx->entropy.hufTable;
132718 -       return 0;
132721 -ZSTD_DCtx *ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
132723 -       ZSTD_DCtx *dctx;
132725 -       if (!customMem.customAlloc || !customMem.customFree)
132726 -               return NULL;
132728 -       dctx = (ZSTD_DCtx *)ZSTD_malloc(sizeof(ZSTD_DCtx), customMem);
132729 -       if (!dctx)
132730 -               return NULL;
132731 -       memcpy(&dctx->customMem, &customMem, sizeof(customMem));
132732 -       ZSTD_decompressBegin(dctx);
132733 -       return dctx;
132736 -ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize)
132738 -       ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
132739 -       return ZSTD_createDCtx_advanced(stackMem);
132742 -size_t ZSTD_freeDCtx(ZSTD_DCtx *dctx)
132744 -       if (dctx == NULL)
132745 -               return 0; /* support free on NULL */
132746 -       ZSTD_free(dctx, dctx->customMem);
132747 -       return 0; /* reserved as a potential error code in the future */
132750 -void ZSTD_copyDCtx(ZSTD_DCtx *dstDCtx, const ZSTD_DCtx *srcDCtx)
132752 -       size_t const workSpaceSize = (ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH) + ZSTD_frameHeaderSize_max;
132753 -       memcpy(dstDCtx, srcDCtx, sizeof(ZSTD_DCtx) - workSpaceSize); /* no need to copy workspace */
132756 -static void ZSTD_refDDict(ZSTD_DCtx *dstDCtx, const ZSTD_DDict *ddict);
132758 -/*-*************************************************************
132759 -*   Decompression section
132760 -***************************************************************/
132762 -/*! ZSTD_isFrame() :
132763 - *  Tells if the content of `buffer` starts with a valid Frame Identifier.
132764 - *  Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
132765 - *  Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
132766 - *  Note 3 : Skippable Frame Identifiers are considered valid. */
132767 -unsigned ZSTD_isFrame(const void *buffer, size_t size)
132769 -       if (size < 4)
132770 -               return 0;
132771 -       {
132772 -               U32 const magic = ZSTD_readLE32(buffer);
132773 -               if (magic == ZSTD_MAGICNUMBER)
132774 -                       return 1;
132775 -               if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START)
132776 -                       return 1;
132777 -       }
132778 -       return 0;
132781 -/** ZSTD_frameHeaderSize() :
132782 -*   srcSize must be >= ZSTD_frameHeaderSize_prefix.
132783 -*   @return : size of the Frame Header */
132784 -static size_t ZSTD_frameHeaderSize(const void *src, size_t srcSize)
132786 -       if (srcSize < ZSTD_frameHeaderSize_prefix)
132787 -               return ERROR(srcSize_wrong);
132788 -       {
132789 -               BYTE const fhd = ((const BYTE *)src)[4];
132790 -               U32 const dictID = fhd & 3;
132791 -               U32 const singleSegment = (fhd >> 5) & 1;
132792 -               U32 const fcsId = fhd >> 6;
132793 -               return ZSTD_frameHeaderSize_prefix + !singleSegment + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId] + (singleSegment && !fcsId);
132794 -       }
132797 -/** ZSTD_getFrameParams() :
132798 -*   decode Frame Header, or require larger `srcSize`.
132799 -*   @return : 0, `fparamsPtr` is correctly filled,
132800 -*            >0, `srcSize` is too small, result is expected `srcSize`,
132801 -*             or an error code, which can be tested using ZSTD_isError() */
132802 -size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src, size_t srcSize)
132804 -       const BYTE *ip = (const BYTE *)src;
132806 -       if (srcSize < ZSTD_frameHeaderSize_prefix)
132807 -               return ZSTD_frameHeaderSize_prefix;
132808 -       if (ZSTD_readLE32(src) != ZSTD_MAGICNUMBER) {
132809 -               if ((ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
132810 -                       if (srcSize < ZSTD_skippableHeaderSize)
132811 -                               return ZSTD_skippableHeaderSize; /* magic number + skippable frame length */
132812 -                       memset(fparamsPtr, 0, sizeof(*fparamsPtr));
132813 -                       fparamsPtr->frameContentSize = ZSTD_readLE32((const char *)src + 4);
132814 -                       fparamsPtr->windowSize = 0; /* windowSize==0 means a frame is skippable */
132815 -                       return 0;
132816 -               }
132817 -               return ERROR(prefix_unknown);
132818 -       }
132820 -       /* ensure there is enough `srcSize` to fully read/decode frame header */
132821 -       {
132822 -               size_t const fhsize = ZSTD_frameHeaderSize(src, srcSize);
132823 -               if (srcSize < fhsize)
132824 -                       return fhsize;
132825 -       }
132827 -       {
132828 -               BYTE const fhdByte = ip[4];
132829 -               size_t pos = 5;
132830 -               U32 const dictIDSizeCode = fhdByte & 3;
132831 -               U32 const checksumFlag = (fhdByte >> 2) & 1;
132832 -               U32 const singleSegment = (fhdByte >> 5) & 1;
132833 -               U32 const fcsID = fhdByte >> 6;
132834 -               U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX;
132835 -               U32 windowSize = 0;
132836 -               U32 dictID = 0;
132837 -               U64 frameContentSize = 0;
132838 -               if ((fhdByte & 0x08) != 0)
132839 -                       return ERROR(frameParameter_unsupported); /* reserved bits, which must be zero */
132840 -               if (!singleSegment) {
132841 -                       BYTE const wlByte = ip[pos++];
132842 -                       U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
132843 -                       if (windowLog > ZSTD_WINDOWLOG_MAX)
132844 -                               return ERROR(frameParameter_windowTooLarge); /* avoids issue with 1 << windowLog */
132845 -                       windowSize = (1U << windowLog);
132846 -                       windowSize += (windowSize >> 3) * (wlByte & 7);
132847 -               }
132849 -               switch (dictIDSizeCode) {
132850 -               default: /* impossible */
132851 -               case 0: break;
132852 -               case 1:
132853 -                       dictID = ip[pos];
132854 -                       pos++;
132855 -                       break;
132856 -               case 2:
132857 -                       dictID = ZSTD_readLE16(ip + pos);
132858 -                       pos += 2;
132859 -                       break;
132860 -               case 3:
132861 -                       dictID = ZSTD_readLE32(ip + pos);
132862 -                       pos += 4;
132863 -                       break;
132864 -               }
132865 -               switch (fcsID) {
132866 -               default: /* impossible */
132867 -               case 0:
132868 -                       if (singleSegment)
132869 -                               frameContentSize = ip[pos];
132870 -                       break;
132871 -               case 1: frameContentSize = ZSTD_readLE16(ip + pos) + 256; break;
132872 -               case 2: frameContentSize = ZSTD_readLE32(ip + pos); break;
132873 -               case 3: frameContentSize = ZSTD_readLE64(ip + pos); break;
132874 -               }
132875 -               if (!windowSize)
132876 -                       windowSize = (U32)frameContentSize;
132877 -               if (windowSize > windowSizeMax)
132878 -                       return ERROR(frameParameter_windowTooLarge);
132879 -               fparamsPtr->frameContentSize = frameContentSize;
132880 -               fparamsPtr->windowSize = windowSize;
132881 -               fparamsPtr->dictID = dictID;
132882 -               fparamsPtr->checksumFlag = checksumFlag;
132883 -       }
132884 -       return 0;
132887 -/** ZSTD_getFrameContentSize() :
132888 -*   compatible with legacy mode
132889 -*   @return : decompressed size of the single frame pointed to be `src` if known, otherwise
132890 -*             - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
132891 -*             - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
132892 -unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
132894 -       {
132895 -               ZSTD_frameParams fParams;
132896 -               if (ZSTD_getFrameParams(&fParams, src, srcSize) != 0)
132897 -                       return ZSTD_CONTENTSIZE_ERROR;
132898 -               if (fParams.windowSize == 0) {
132899 -                       /* Either skippable or empty frame, size == 0 either way */
132900 -                       return 0;
132901 -               } else if (fParams.frameContentSize != 0) {
132902 -                       return fParams.frameContentSize;
132903 -               } else {
132904 -                       return ZSTD_CONTENTSIZE_UNKNOWN;
132905 -               }
132906 -       }
132909 -/** ZSTD_findDecompressedSize() :
132910 - *  compatible with legacy mode
132911 - *  `srcSize` must be the exact length of some number of ZSTD compressed and/or
132912 - *      skippable frames
132913 - *  @return : decompressed size of the frames contained */
132914 -unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize)
132916 -       {
132917 -               unsigned long long totalDstSize = 0;
132918 -               while (srcSize >= ZSTD_frameHeaderSize_prefix) {
132919 -                       const U32 magicNumber = ZSTD_readLE32(src);
132921 -                       if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
132922 -                               size_t skippableSize;
132923 -                               if (srcSize < ZSTD_skippableHeaderSize)
132924 -                                       return ERROR(srcSize_wrong);
132925 -                               skippableSize = ZSTD_readLE32((const BYTE *)src + 4) + ZSTD_skippableHeaderSize;
132926 -                               if (srcSize < skippableSize) {
132927 -                                       return ZSTD_CONTENTSIZE_ERROR;
132928 -                               }
132930 -                               src = (const BYTE *)src + skippableSize;
132931 -                               srcSize -= skippableSize;
132932 -                               continue;
132933 -                       }
132935 -                       {
132936 -                               unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
132937 -                               if (ret >= ZSTD_CONTENTSIZE_ERROR)
132938 -                                       return ret;
132940 -                               /* check for overflow */
132941 -                               if (totalDstSize + ret < totalDstSize)
132942 -                                       return ZSTD_CONTENTSIZE_ERROR;
132943 -                               totalDstSize += ret;
132944 -                       }
132945 -                       {
132946 -                               size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
132947 -                               if (ZSTD_isError(frameSrcSize)) {
132948 -                                       return ZSTD_CONTENTSIZE_ERROR;
132949 -                               }
132951 -                               src = (const BYTE *)src + frameSrcSize;
132952 -                               srcSize -= frameSrcSize;
132953 -                       }
132954 -               }
132956 -               if (srcSize) {
132957 -                       return ZSTD_CONTENTSIZE_ERROR;
132958 -               }
132960 -               return totalDstSize;
132961 -       }
132964 -/** ZSTD_decodeFrameHeader() :
132965 -*   `headerSize` must be the size provided by ZSTD_frameHeaderSize().
132966 -*   @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
132967 -static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx *dctx, const void *src, size_t headerSize)
132969 -       size_t const result = ZSTD_getFrameParams(&(dctx->fParams), src, headerSize);
132970 -       if (ZSTD_isError(result))
132971 -               return result; /* invalid header */
132972 -       if (result > 0)
132973 -               return ERROR(srcSize_wrong); /* headerSize too small */
132974 -       if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID))
132975 -               return ERROR(dictionary_wrong);
132976 -       if (dctx->fParams.checksumFlag)
132977 -               xxh64_reset(&dctx->xxhState, 0);
132978 -       return 0;
132981 -typedef struct {
132982 -       blockType_e blockType;
132983 -       U32 lastBlock;
132984 -       U32 origSize;
132985 -} blockProperties_t;
132987 -/*! ZSTD_getcBlockSize() :
132988 -*   Provides the size of compressed block from block header `src` */
132989 -size_t ZSTD_getcBlockSize(const void *src, size_t srcSize, blockProperties_t *bpPtr)
132991 -       if (srcSize < ZSTD_blockHeaderSize)
132992 -               return ERROR(srcSize_wrong);
132993 -       {
132994 -               U32 const cBlockHeader = ZSTD_readLE24(src);
132995 -               U32 const cSize = cBlockHeader >> 3;
132996 -               bpPtr->lastBlock = cBlockHeader & 1;
132997 -               bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
132998 -               bpPtr->origSize = cSize; /* only useful for RLE */
132999 -               if (bpPtr->blockType == bt_rle)
133000 -                       return 1;
133001 -               if (bpPtr->blockType == bt_reserved)
133002 -                       return ERROR(corruption_detected);
133003 -               return cSize;
133004 -       }
133007 -static size_t ZSTD_copyRawBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
133009 -       if (srcSize > dstCapacity)
133010 -               return ERROR(dstSize_tooSmall);
133011 -       memcpy(dst, src, srcSize);
133012 -       return srcSize;
133015 -static size_t ZSTD_setRleBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize, size_t regenSize)
133017 -       if (srcSize != 1)
133018 -               return ERROR(srcSize_wrong);
133019 -       if (regenSize > dstCapacity)
133020 -               return ERROR(dstSize_tooSmall);
133021 -       memset(dst, *(const BYTE *)src, regenSize);
133022 -       return regenSize;
133025 -/*! ZSTD_decodeLiteralsBlock() :
133026 -       @return : nb of bytes read from src (< srcSize ) */
133027 -size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx *dctx, const void *src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
133029 -       if (srcSize < MIN_CBLOCK_SIZE)
133030 -               return ERROR(corruption_detected);
133032 -       {
133033 -               const BYTE *const istart = (const BYTE *)src;
133034 -               symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
133036 -               switch (litEncType) {
133037 -               case set_repeat:
133038 -                       if (dctx->litEntropy == 0)
133039 -                               return ERROR(dictionary_corrupted);
133040 -                       fallthrough;
133041 -               case set_compressed:
133042 -                       if (srcSize < 5)
133043 -                               return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
133044 -                       {
133045 -                               size_t lhSize, litSize, litCSize;
133046 -                               U32 singleStream = 0;
133047 -                               U32 const lhlCode = (istart[0] >> 2) & 3;
133048 -                               U32 const lhc = ZSTD_readLE32(istart);
133049 -                               switch (lhlCode) {
133050 -                               case 0:
133051 -                               case 1:
133052 -                               default: /* note : default is impossible, since lhlCode into [0..3] */
133053 -                                       /* 2 - 2 - 10 - 10 */
133054 -                                       singleStream = !lhlCode;
133055 -                                       lhSize = 3;
133056 -                                       litSize = (lhc >> 4) & 0x3FF;
133057 -                                       litCSize = (lhc >> 14) & 0x3FF;
133058 -                                       break;
133059 -                               case 2:
133060 -                                       /* 2 - 2 - 14 - 14 */
133061 -                                       lhSize = 4;
133062 -                                       litSize = (lhc >> 4) & 0x3FFF;
133063 -                                       litCSize = lhc >> 18;
133064 -                                       break;
133065 -                               case 3:
133066 -                                       /* 2 - 2 - 18 - 18 */
133067 -                                       lhSize = 5;
133068 -                                       litSize = (lhc >> 4) & 0x3FFFF;
133069 -                                       litCSize = (lhc >> 22) + (istart[4] << 10);
133070 -                                       break;
133071 -                               }
133072 -                               if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX)
133073 -                                       return ERROR(corruption_detected);
133074 -                               if (litCSize + lhSize > srcSize)
133075 -                                       return ERROR(corruption_detected);
133077 -                               if (HUF_isError(
133078 -                                       (litEncType == set_repeat)
133079 -                                           ? (singleStream ? HUF_decompress1X_usingDTable(dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->HUFptr)
133080 -                                                           : HUF_decompress4X_usingDTable(dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->HUFptr))
133081 -                                           : (singleStream
133082 -                                                  ? HUF_decompress1X2_DCtx_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart + lhSize, litCSize,
133083 -                                                                                dctx->entropy.workspace, sizeof(dctx->entropy.workspace))
133084 -                                                  : HUF_decompress4X_hufOnly_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart + lhSize, litCSize,
133085 -                                                                                  dctx->entropy.workspace, sizeof(dctx->entropy.workspace)))))
133086 -                                       return ERROR(corruption_detected);
133088 -                               dctx->litPtr = dctx->litBuffer;
133089 -                               dctx->litSize = litSize;
133090 -                               dctx->litEntropy = 1;
133091 -                               if (litEncType == set_compressed)
133092 -                                       dctx->HUFptr = dctx->entropy.hufTable;
133093 -                               memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
133094 -                               return litCSize + lhSize;
133095 -                       }
133097 -               case set_basic: {
133098 -                       size_t litSize, lhSize;
133099 -                       U32 const lhlCode = ((istart[0]) >> 2) & 3;
133100 -                       switch (lhlCode) {
133101 -                       case 0:
133102 -                       case 2:
133103 -                       default: /* note : default is impossible, since lhlCode into [0..3] */
133104 -                               lhSize = 1;
133105 -                               litSize = istart[0] >> 3;
133106 -                               break;
133107 -                       case 1:
133108 -                               lhSize = 2;
133109 -                               litSize = ZSTD_readLE16(istart) >> 4;
133110 -                               break;
133111 -                       case 3:
133112 -                               lhSize = 3;
133113 -                               litSize = ZSTD_readLE24(istart) >> 4;
133114 -                               break;
133115 -                       }
133117 -                       if (lhSize + litSize + WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
133118 -                               if (litSize + lhSize > srcSize)
133119 -                                       return ERROR(corruption_detected);
133120 -                               memcpy(dctx->litBuffer, istart + lhSize, litSize);
133121 -                               dctx->litPtr = dctx->litBuffer;
133122 -                               dctx->litSize = litSize;
133123 -                               memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
133124 -                               return lhSize + litSize;
133125 -                       }
133126 -                       /* direct reference into compressed stream */
133127 -                       dctx->litPtr = istart + lhSize;
133128 -                       dctx->litSize = litSize;
133129 -                       return lhSize + litSize;
133130 -               }
133132 -               case set_rle: {
133133 -                       U32 const lhlCode = ((istart[0]) >> 2) & 3;
133134 -                       size_t litSize, lhSize;
133135 -                       switch (lhlCode) {
133136 -                       case 0:
133137 -                       case 2:
133138 -                       default: /* note : default is impossible, since lhlCode into [0..3] */
133139 -                               lhSize = 1;
133140 -                               litSize = istart[0] >> 3;
133141 -                               break;
133142 -                       case 1:
133143 -                               lhSize = 2;
133144 -                               litSize = ZSTD_readLE16(istart) >> 4;
133145 -                               break;
133146 -                       case 3:
133147 -                               lhSize = 3;
133148 -                               litSize = ZSTD_readLE24(istart) >> 4;
133149 -                               if (srcSize < 4)
133150 -                                       return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
133151 -                               break;
133152 -                       }
133153 -                       if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX)
133154 -                               return ERROR(corruption_detected);
133155 -                       memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
133156 -                       dctx->litPtr = dctx->litBuffer;
133157 -                       dctx->litSize = litSize;
133158 -                       return lhSize + 1;
133159 -               }
133160 -               default:
133161 -                       return ERROR(corruption_detected); /* impossible */
133162 -               }
133163 -       }
133166 -typedef union {
133167 -       FSE_decode_t realData;
133168 -       U32 alignedBy4;
133169 -} FSE_decode_t4;
133171 -static const FSE_decode_t4 LL_defaultDTable[(1 << LL_DEFAULTNORMLOG) + 1] = {
133172 -    {{LL_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */
133173 -    {{0, 0, 4}},                /* 0 : base, symbol, bits */
133174 -    {{16, 0, 4}},
133175 -    {{32, 1, 5}},
133176 -    {{0, 3, 5}},
133177 -    {{0, 4, 5}},
133178 -    {{0, 6, 5}},
133179 -    {{0, 7, 5}},
133180 -    {{0, 9, 5}},
133181 -    {{0, 10, 5}},
133182 -    {{0, 12, 5}},
133183 -    {{0, 14, 6}},
133184 -    {{0, 16, 5}},
133185 -    {{0, 18, 5}},
133186 -    {{0, 19, 5}},
133187 -    {{0, 21, 5}},
133188 -    {{0, 22, 5}},
133189 -    {{0, 24, 5}},
133190 -    {{32, 25, 5}},
133191 -    {{0, 26, 5}},
133192 -    {{0, 27, 6}},
133193 -    {{0, 29, 6}},
133194 -    {{0, 31, 6}},
133195 -    {{32, 0, 4}},
133196 -    {{0, 1, 4}},
133197 -    {{0, 2, 5}},
133198 -    {{32, 4, 5}},
133199 -    {{0, 5, 5}},
133200 -    {{32, 7, 5}},
133201 -    {{0, 8, 5}},
133202 -    {{32, 10, 5}},
133203 -    {{0, 11, 5}},
133204 -    {{0, 13, 6}},
133205 -    {{32, 16, 5}},
133206 -    {{0, 17, 5}},
133207 -    {{32, 19, 5}},
133208 -    {{0, 20, 5}},
133209 -    {{32, 22, 5}},
133210 -    {{0, 23, 5}},
133211 -    {{0, 25, 4}},
133212 -    {{16, 25, 4}},
133213 -    {{32, 26, 5}},
133214 -    {{0, 28, 6}},
133215 -    {{0, 30, 6}},
133216 -    {{48, 0, 4}},
133217 -    {{16, 1, 4}},
133218 -    {{32, 2, 5}},
133219 -    {{32, 3, 5}},
133220 -    {{32, 5, 5}},
133221 -    {{32, 6, 5}},
133222 -    {{32, 8, 5}},
133223 -    {{32, 9, 5}},
133224 -    {{32, 11, 5}},
133225 -    {{32, 12, 5}},
133226 -    {{0, 15, 6}},
133227 -    {{32, 17, 5}},
133228 -    {{32, 18, 5}},
133229 -    {{32, 20, 5}},
133230 -    {{32, 21, 5}},
133231 -    {{32, 23, 5}},
133232 -    {{32, 24, 5}},
133233 -    {{0, 35, 6}},
133234 -    {{0, 34, 6}},
133235 -    {{0, 33, 6}},
133236 -    {{0, 32, 6}},
133237 -}; /* LL_defaultDTable */
133239 -static const FSE_decode_t4 ML_defaultDTable[(1 << ML_DEFAULTNORMLOG) + 1] = {
133240 -    {{ML_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */
133241 -    {{0, 0, 6}},                /* 0 : base, symbol, bits */
133242 -    {{0, 1, 4}},
133243 -    {{32, 2, 5}},
133244 -    {{0, 3, 5}},
133245 -    {{0, 5, 5}},
133246 -    {{0, 6, 5}},
133247 -    {{0, 8, 5}},
133248 -    {{0, 10, 6}},
133249 -    {{0, 13, 6}},
133250 -    {{0, 16, 6}},
133251 -    {{0, 19, 6}},
133252 -    {{0, 22, 6}},
133253 -    {{0, 25, 6}},
133254 -    {{0, 28, 6}},
133255 -    {{0, 31, 6}},
133256 -    {{0, 33, 6}},
133257 -    {{0, 35, 6}},
133258 -    {{0, 37, 6}},
133259 -    {{0, 39, 6}},
133260 -    {{0, 41, 6}},
133261 -    {{0, 43, 6}},
133262 -    {{0, 45, 6}},
133263 -    {{16, 1, 4}},
133264 -    {{0, 2, 4}},
133265 -    {{32, 3, 5}},
133266 -    {{0, 4, 5}},
133267 -    {{32, 6, 5}},
133268 -    {{0, 7, 5}},
133269 -    {{0, 9, 6}},
133270 -    {{0, 12, 6}},
133271 -    {{0, 15, 6}},
133272 -    {{0, 18, 6}},
133273 -    {{0, 21, 6}},
133274 -    {{0, 24, 6}},
133275 -    {{0, 27, 6}},
133276 -    {{0, 30, 6}},
133277 -    {{0, 32, 6}},
133278 -    {{0, 34, 6}},
133279 -    {{0, 36, 6}},
133280 -    {{0, 38, 6}},
133281 -    {{0, 40, 6}},
133282 -    {{0, 42, 6}},
133283 -    {{0, 44, 6}},
133284 -    {{32, 1, 4}},
133285 -    {{48, 1, 4}},
133286 -    {{16, 2, 4}},
133287 -    {{32, 4, 5}},
133288 -    {{32, 5, 5}},
133289 -    {{32, 7, 5}},
133290 -    {{32, 8, 5}},
133291 -    {{0, 11, 6}},
133292 -    {{0, 14, 6}},
133293 -    {{0, 17, 6}},
133294 -    {{0, 20, 6}},
133295 -    {{0, 23, 6}},
133296 -    {{0, 26, 6}},
133297 -    {{0, 29, 6}},
133298 -    {{0, 52, 6}},
133299 -    {{0, 51, 6}},
133300 -    {{0, 50, 6}},
133301 -    {{0, 49, 6}},
133302 -    {{0, 48, 6}},
133303 -    {{0, 47, 6}},
133304 -    {{0, 46, 6}},
133305 -}; /* ML_defaultDTable */
133307 -static const FSE_decode_t4 OF_defaultDTable[(1 << OF_DEFAULTNORMLOG) + 1] = {
133308 -    {{OF_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */
133309 -    {{0, 0, 5}},                /* 0 : base, symbol, bits */
133310 -    {{0, 6, 4}},
133311 -    {{0, 9, 5}},
133312 -    {{0, 15, 5}},
133313 -    {{0, 21, 5}},
133314 -    {{0, 3, 5}},
133315 -    {{0, 7, 4}},
133316 -    {{0, 12, 5}},
133317 -    {{0, 18, 5}},
133318 -    {{0, 23, 5}},
133319 -    {{0, 5, 5}},
133320 -    {{0, 8, 4}},
133321 -    {{0, 14, 5}},
133322 -    {{0, 20, 5}},
133323 -    {{0, 2, 5}},
133324 -    {{16, 7, 4}},
133325 -    {{0, 11, 5}},
133326 -    {{0, 17, 5}},
133327 -    {{0, 22, 5}},
133328 -    {{0, 4, 5}},
133329 -    {{16, 8, 4}},
133330 -    {{0, 13, 5}},
133331 -    {{0, 19, 5}},
133332 -    {{0, 1, 5}},
133333 -    {{16, 6, 4}},
133334 -    {{0, 10, 5}},
133335 -    {{0, 16, 5}},
133336 -    {{0, 28, 5}},
133337 -    {{0, 27, 5}},
133338 -    {{0, 26, 5}},
133339 -    {{0, 25, 5}},
133340 -    {{0, 24, 5}},
133341 -}; /* OF_defaultDTable */
133343 -/*! ZSTD_buildSeqTable() :
133344 -       @return : nb bytes read from src,
133345 -                         or an error code if it fails, testable with ZSTD_isError()
133347 -static size_t ZSTD_buildSeqTable(FSE_DTable *DTableSpace, const FSE_DTable **DTablePtr, symbolEncodingType_e type, U32 max, U32 maxLog, const void *src,
133348 -                                size_t srcSize, const FSE_decode_t4 *defaultTable, U32 flagRepeatTable, void *workspace, size_t workspaceSize)
133350 -       const void *const tmpPtr = defaultTable; /* bypass strict aliasing */
133351 -       switch (type) {
133352 -       case set_rle:
133353 -               if (!srcSize)
133354 -                       return ERROR(srcSize_wrong);
133355 -               if ((*(const BYTE *)src) > max)
133356 -                       return ERROR(corruption_detected);
133357 -               FSE_buildDTable_rle(DTableSpace, *(const BYTE *)src);
133358 -               *DTablePtr = DTableSpace;
133359 -               return 1;
133360 -       case set_basic: *DTablePtr = (const FSE_DTable *)tmpPtr; return 0;
133361 -       case set_repeat:
133362 -               if (!flagRepeatTable)
133363 -                       return ERROR(corruption_detected);
133364 -               return 0;
133365 -       default: /* impossible */
133366 -       case set_compressed: {
133367 -               U32 tableLog;
133368 -               S16 *norm = (S16 *)workspace;
133369 -               size_t const spaceUsed32 = ALIGN(sizeof(S16) * (MaxSeq + 1), sizeof(U32)) >> 2;
133371 -               if ((spaceUsed32 << 2) > workspaceSize)
133372 -                       return ERROR(GENERIC);
133373 -               workspace = (U32 *)workspace + spaceUsed32;
133374 -               workspaceSize -= (spaceUsed32 << 2);
133375 -               {
133376 -                       size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
133377 -                       if (FSE_isError(headerSize))
133378 -                               return ERROR(corruption_detected);
133379 -                       if (tableLog > maxLog)
133380 -                               return ERROR(corruption_detected);
133381 -                       FSE_buildDTable_wksp(DTableSpace, norm, max, tableLog, workspace, workspaceSize);
133382 -                       *DTablePtr = DTableSpace;
133383 -                       return headerSize;
133384 -               }
133385 -       }
133386 -       }
133389 -size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx *dctx, int *nbSeqPtr, const void *src, size_t srcSize)
133391 -       const BYTE *const istart = (const BYTE *const)src;
133392 -       const BYTE *const iend = istart + srcSize;
133393 -       const BYTE *ip = istart;
133395 -       /* check */
133396 -       if (srcSize < MIN_SEQUENCES_SIZE)
133397 -               return ERROR(srcSize_wrong);
133399 -       /* SeqHead */
133400 -       {
133401 -               int nbSeq = *ip++;
133402 -               if (!nbSeq) {
133403 -                       *nbSeqPtr = 0;
133404 -                       return 1;
133405 -               }
133406 -               if (nbSeq > 0x7F) {
133407 -                       if (nbSeq == 0xFF) {
133408 -                               if (ip + 2 > iend)
133409 -                                       return ERROR(srcSize_wrong);
133410 -                               nbSeq = ZSTD_readLE16(ip) + LONGNBSEQ, ip += 2;
133411 -                       } else {
133412 -                               if (ip >= iend)
133413 -                                       return ERROR(srcSize_wrong);
133414 -                               nbSeq = ((nbSeq - 0x80) << 8) + *ip++;
133415 -                       }
133416 -               }
133417 -               *nbSeqPtr = nbSeq;
133418 -       }
133420 -       /* FSE table descriptors */
133421 -       if (ip + 4 > iend)
133422 -               return ERROR(srcSize_wrong); /* minimum possible size */
133423 -       {
133424 -               symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
133425 -               symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
133426 -               symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
133427 -               ip++;
133429 -               /* Build DTables */
133430 -               {
133431 -                       size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, LLtype, MaxLL, LLFSELog, ip, iend - ip,
133432 -                                                                 LL_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace));
133433 -                       if (ZSTD_isError(llhSize))
133434 -                               return ERROR(corruption_detected);
133435 -                       ip += llhSize;
133436 -               }
133437 -               {
133438 -                       size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, OFtype, MaxOff, OffFSELog, ip, iend - ip,
133439 -                                                                 OF_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace));
133440 -                       if (ZSTD_isError(ofhSize))
133441 -                               return ERROR(corruption_detected);
133442 -                       ip += ofhSize;
133443 -               }
133444 -               {
133445 -                       size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, MLtype, MaxML, MLFSELog, ip, iend - ip,
133446 -                                                                 ML_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace));
133447 -                       if (ZSTD_isError(mlhSize))
133448 -                               return ERROR(corruption_detected);
133449 -                       ip += mlhSize;
133450 -               }
133451 -       }
133453 -       return ip - istart;
133456 -typedef struct {
133457 -       size_t litLength;
133458 -       size_t matchLength;
133459 -       size_t offset;
133460 -       const BYTE *match;
133461 -} seq_t;
133463 -typedef struct {
133464 -       BIT_DStream_t DStream;
133465 -       FSE_DState_t stateLL;
133466 -       FSE_DState_t stateOffb;
133467 -       FSE_DState_t stateML;
133468 -       size_t prevOffset[ZSTD_REP_NUM];
133469 -       const BYTE *base;
133470 -       size_t pos;
133471 -       uPtrDiff gotoDict;
133472 -} seqState_t;
133474 -FORCE_NOINLINE
133475 -size_t ZSTD_execSequenceLast7(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base,
133476 -                             const BYTE *const vBase, const BYTE *const dictEnd)
133478 -       BYTE *const oLitEnd = op + sequence.litLength;
133479 -       size_t const sequenceLength = sequence.litLength + sequence.matchLength;
133480 -       BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
133481 -       BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH;
133482 -       const BYTE *const iLitEnd = *litPtr + sequence.litLength;
133483 -       const BYTE *match = oLitEnd - sequence.offset;
133485 -       /* check */
133486 -       if (oMatchEnd > oend)
133487 -               return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
133488 -       if (iLitEnd > litLimit)
133489 -               return ERROR(corruption_detected); /* over-read beyond lit buffer */
133490 -       if (oLitEnd <= oend_w)
133491 -               return ERROR(GENERIC); /* Precondition */
133493 -       /* copy literals */
133494 -       if (op < oend_w) {
133495 -               ZSTD_wildcopy(op, *litPtr, oend_w - op);
133496 -               *litPtr += oend_w - op;
133497 -               op = oend_w;
133498 -       }
133499 -       while (op < oLitEnd)
133500 -               *op++ = *(*litPtr)++;
133502 -       /* copy Match */
133503 -       if (sequence.offset > (size_t)(oLitEnd - base)) {
133504 -               /* offset beyond prefix */
133505 -               if (sequence.offset > (size_t)(oLitEnd - vBase))
133506 -                       return ERROR(corruption_detected);
133507 -               match = dictEnd - (base - match);
133508 -               if (match + sequence.matchLength <= dictEnd) {
133509 -                       memmove(oLitEnd, match, sequence.matchLength);
133510 -                       return sequenceLength;
133511 -               }
133512 -               /* span extDict & currPrefixSegment */
133513 -               {
133514 -                       size_t const length1 = dictEnd - match;
133515 -                       memmove(oLitEnd, match, length1);
133516 -                       op = oLitEnd + length1;
133517 -                       sequence.matchLength -= length1;
133518 -                       match = base;
133519 -               }
133520 -       }
133521 -       while (op < oMatchEnd)
133522 -               *op++ = *match++;
133523 -       return sequenceLength;
133526 -static seq_t ZSTD_decodeSequence(seqState_t *seqState)
133528 -       seq_t seq;
133530 -       U32 const llCode = FSE_peekSymbol(&seqState->stateLL);
133531 -       U32 const mlCode = FSE_peekSymbol(&seqState->stateML);
133532 -       U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */
133534 -       U32 const llBits = LL_bits[llCode];
133535 -       U32 const mlBits = ML_bits[mlCode];
133536 -       U32 const ofBits = ofCode;
133537 -       U32 const totalBits = llBits + mlBits + ofBits;
133539 -       static const U32 LL_base[MaxLL + 1] = {0,  1,  2,  3,  4,  5,  6,  7,  8,    9,     10,    11,    12,    13,     14,     15,     16,     18,
133540 -                                              20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000};
133542 -       static const U32 ML_base[MaxML + 1] = {3,  4,  5,  6,  7,  8,  9,  10,   11,    12,    13,    14,    15,     16,     17,     18,     19,     20,
133543 -                                              21, 22, 23, 24, 25, 26, 27, 28,   29,    30,    31,    32,    33,     34,     35,     37,     39,     41,
133544 -                                              43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003};
133546 -       static const U32 OF_base[MaxOff + 1] = {0,       1,     1,      5,      0xD,      0x1D,      0x3D,      0x7D,      0xFD,     0x1FD,
133547 -                                               0x3FD,   0x7FD,    0xFFD,    0x1FFD,   0x3FFD,   0x7FFD,    0xFFFD,    0x1FFFD,   0x3FFFD,  0x7FFFD,
133548 -                                               0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD};
133550 -       /* sequence */
133551 -       {
133552 -               size_t offset;
133553 -               if (!ofCode)
133554 -                       offset = 0;
133555 -               else {
133556 -                       offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */
133557 -                       if (ZSTD_32bits())
133558 -                               BIT_reloadDStream(&seqState->DStream);
133559 -               }
133561 -               if (ofCode <= 1) {
133562 -                       offset += (llCode == 0);
133563 -                       if (offset) {
133564 -                               size_t temp = (offset == 3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
133565 -                               temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
133566 -                               if (offset != 1)
133567 -                                       seqState->prevOffset[2] = seqState->prevOffset[1];
133568 -                               seqState->prevOffset[1] = seqState->prevOffset[0];
133569 -                               seqState->prevOffset[0] = offset = temp;
133570 -                       } else {
133571 -                               offset = seqState->prevOffset[0];
133572 -                       }
133573 -               } else {
133574 -                       seqState->prevOffset[2] = seqState->prevOffset[1];
133575 -                       seqState->prevOffset[1] = seqState->prevOffset[0];
133576 -                       seqState->prevOffset[0] = offset;
133577 -               }
133578 -               seq.offset = offset;
133579 -       }
133581 -       seq.matchLength = ML_base[mlCode] + ((mlCode > 31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <=  16 bits */
133582 -       if (ZSTD_32bits() && (mlBits + llBits > 24))
133583 -               BIT_reloadDStream(&seqState->DStream);
133585 -       seq.litLength = LL_base[llCode] + ((llCode > 15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <=  16 bits */
133586 -       if (ZSTD_32bits() || (totalBits > 64 - 7 - (LLFSELog + MLFSELog + OffFSELog)))
133587 -               BIT_reloadDStream(&seqState->DStream);
133589 -       /* ANS state update */
133590 -       FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <=  9 bits */
133591 -       FSE_updateState(&seqState->stateML, &seqState->DStream); /* <=  9 bits */
133592 -       if (ZSTD_32bits())
133593 -               BIT_reloadDStream(&seqState->DStream);             /* <= 18 bits */
133594 -       FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <=  8 bits */
133596 -       seq.match = NULL;
133598 -       return seq;
133601 -FORCE_INLINE
133602 -size_t ZSTD_execSequence(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base,
133603 -                        const BYTE *const vBase, const BYTE *const dictEnd)
133605 -       BYTE *const oLitEnd = op + sequence.litLength;
133606 -       size_t const sequenceLength = sequence.litLength + sequence.matchLength;
133607 -       BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
133608 -       BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH;
133609 -       const BYTE *const iLitEnd = *litPtr + sequence.litLength;
133610 -       const BYTE *match = oLitEnd - sequence.offset;
133612 -       /* check */
133613 -       if (oMatchEnd > oend)
133614 -               return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
133615 -       if (iLitEnd > litLimit)
133616 -               return ERROR(corruption_detected); /* over-read beyond lit buffer */
133617 -       if (oLitEnd > oend_w)
133618 -               return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd);
133620 -       /* copy Literals */
133621 -       ZSTD_copy8(op, *litPtr);
133622 -       if (sequence.litLength > 8)
133623 -               ZSTD_wildcopy(op + 8, (*litPtr) + 8,
133624 -                             sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
133625 -       op = oLitEnd;
133626 -       *litPtr = iLitEnd; /* update for next sequence */
133628 -       /* copy Match */
133629 -       if (sequence.offset > (size_t)(oLitEnd - base)) {
133630 -               /* offset beyond prefix */
133631 -               if (sequence.offset > (size_t)(oLitEnd - vBase))
133632 -                       return ERROR(corruption_detected);
133633 -               match = dictEnd + (match - base);
133634 -               if (match + sequence.matchLength <= dictEnd) {
133635 -                       memmove(oLitEnd, match, sequence.matchLength);
133636 -                       return sequenceLength;
133637 -               }
133638 -               /* span extDict & currPrefixSegment */
133639 -               {
133640 -                       size_t const length1 = dictEnd - match;
133641 -                       memmove(oLitEnd, match, length1);
133642 -                       op = oLitEnd + length1;
133643 -                       sequence.matchLength -= length1;
133644 -                       match = base;
133645 -                       if (op > oend_w || sequence.matchLength < MINMATCH) {
133646 -                               U32 i;
133647 -                               for (i = 0; i < sequence.matchLength; ++i)
133648 -                                       op[i] = match[i];
133649 -                               return sequenceLength;
133650 -                       }
133651 -               }
133652 -       }
133653 -       /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
133655 -       /* match within prefix */
133656 -       if (sequence.offset < 8) {
133657 -               /* close range match, overlap */
133658 -               static const U32 dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */
133659 -               static const int dec64table[] = {8, 8, 8, 7, 8, 9, 10, 11}; /* subtracted */
133660 -               int const sub2 = dec64table[sequence.offset];
133661 -               op[0] = match[0];
133662 -               op[1] = match[1];
133663 -               op[2] = match[2];
133664 -               op[3] = match[3];
133665 -               match += dec32table[sequence.offset];
133666 -               ZSTD_copy4(op + 4, match);
133667 -               match -= sub2;
133668 -       } else {
133669 -               ZSTD_copy8(op, match);
133670 -       }
133671 -       op += 8;
133672 -       match += 8;
133674 -       if (oMatchEnd > oend - (16 - MINMATCH)) {
133675 -               if (op < oend_w) {
133676 -                       ZSTD_wildcopy(op, match, oend_w - op);
133677 -                       match += oend_w - op;
133678 -                       op = oend_w;
133679 -               }
133680 -               while (op < oMatchEnd)
133681 -                       *op++ = *match++;
133682 -       } else {
133683 -               ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8); /* works even if matchLength < 8 */
133684 -       }
133685 -       return sequenceLength;
133688 -static size_t ZSTD_decompressSequences(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize)
133690 -       const BYTE *ip = (const BYTE *)seqStart;
133691 -       const BYTE *const iend = ip + seqSize;
133692 -       BYTE *const ostart = (BYTE * const)dst;
133693 -       BYTE *const oend = ostart + maxDstSize;
133694 -       BYTE *op = ostart;
133695 -       const BYTE *litPtr = dctx->litPtr;
133696 -       const BYTE *const litEnd = litPtr + dctx->litSize;
133697 -       const BYTE *const base = (const BYTE *)(dctx->base);
133698 -       const BYTE *const vBase = (const BYTE *)(dctx->vBase);
133699 -       const BYTE *const dictEnd = (const BYTE *)(dctx->dictEnd);
133700 -       int nbSeq;
133702 -       /* Build Decoding Tables */
133703 -       {
133704 -               size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize);
133705 -               if (ZSTD_isError(seqHSize))
133706 -                       return seqHSize;
133707 -               ip += seqHSize;
133708 -       }
133710 -       /* Regen sequences */
133711 -       if (nbSeq) {
133712 -               seqState_t seqState;
133713 -               dctx->fseEntropy = 1;
133714 -               {
133715 -                       U32 i;
133716 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
133717 -                               seqState.prevOffset[i] = dctx->entropy.rep[i];
133718 -               }
133719 -               CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend - ip), corruption_detected);
133720 -               FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
133721 -               FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
133722 -               FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
133724 -               for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq;) {
133725 -                       nbSeq--;
133726 -                       {
133727 -                               seq_t const sequence = ZSTD_decodeSequence(&seqState);
133728 -                               size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
133729 -                               if (ZSTD_isError(oneSeqSize))
133730 -                                       return oneSeqSize;
133731 -                               op += oneSeqSize;
133732 -                       }
133733 -               }
133735 -               /* check if reached exact end */
133736 -               if (nbSeq)
133737 -                       return ERROR(corruption_detected);
133738 -               /* save reps for next block */
133739 -               {
133740 -                       U32 i;
133741 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
133742 -                               dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]);
133743 -               }
133744 -       }
133746 -       /* last literal segment */
133747 -       {
133748 -               size_t const lastLLSize = litEnd - litPtr;
133749 -               if (lastLLSize > (size_t)(oend - op))
133750 -                       return ERROR(dstSize_tooSmall);
133751 -               memcpy(op, litPtr, lastLLSize);
133752 -               op += lastLLSize;
133753 -       }
133755 -       return op - ostart;
133758 -FORCE_INLINE seq_t ZSTD_decodeSequenceLong_generic(seqState_t *seqState, int const longOffsets)
133760 -       seq_t seq;
133762 -       U32 const llCode = FSE_peekSymbol(&seqState->stateLL);
133763 -       U32 const mlCode = FSE_peekSymbol(&seqState->stateML);
133764 -       U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */
133766 -       U32 const llBits = LL_bits[llCode];
133767 -       U32 const mlBits = ML_bits[mlCode];
133768 -       U32 const ofBits = ofCode;
133769 -       U32 const totalBits = llBits + mlBits + ofBits;
133771 -       static const U32 LL_base[MaxLL + 1] = {0,  1,  2,  3,  4,  5,  6,  7,  8,    9,     10,    11,    12,    13,     14,     15,     16,     18,
133772 -                                              20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000};
133774 -       static const U32 ML_base[MaxML + 1] = {3,  4,  5,  6,  7,  8,  9,  10,   11,    12,    13,    14,    15,     16,     17,     18,     19,     20,
133775 -                                              21, 22, 23, 24, 25, 26, 27, 28,   29,    30,    31,    32,    33,     34,     35,     37,     39,     41,
133776 -                                              43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003};
133778 -       static const U32 OF_base[MaxOff + 1] = {0,       1,     1,      5,      0xD,      0x1D,      0x3D,      0x7D,      0xFD,     0x1FD,
133779 -                                               0x3FD,   0x7FD,    0xFFD,    0x1FFD,   0x3FFD,   0x7FFD,    0xFFFD,    0x1FFFD,   0x3FFFD,  0x7FFFD,
133780 -                                               0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD};
133782 -       /* sequence */
133783 -       {
133784 -               size_t offset;
133785 -               if (!ofCode)
133786 -                       offset = 0;
133787 -               else {
133788 -                       if (longOffsets) {
133789 -                               int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN);
133790 -                               offset = OF_base[ofCode] + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
133791 -                               if (ZSTD_32bits() || extraBits)
133792 -                                       BIT_reloadDStream(&seqState->DStream);
133793 -                               if (extraBits)
133794 -                                       offset += BIT_readBitsFast(&seqState->DStream, extraBits);
133795 -                       } else {
133796 -                               offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */
133797 -                               if (ZSTD_32bits())
133798 -                                       BIT_reloadDStream(&seqState->DStream);
133799 -                       }
133800 -               }
133802 -               if (ofCode <= 1) {
133803 -                       offset += (llCode == 0);
133804 -                       if (offset) {
133805 -                               size_t temp = (offset == 3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
133806 -                               temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
133807 -                               if (offset != 1)
133808 -                                       seqState->prevOffset[2] = seqState->prevOffset[1];
133809 -                               seqState->prevOffset[1] = seqState->prevOffset[0];
133810 -                               seqState->prevOffset[0] = offset = temp;
133811 -                       } else {
133812 -                               offset = seqState->prevOffset[0];
133813 -                       }
133814 -               } else {
133815 -                       seqState->prevOffset[2] = seqState->prevOffset[1];
133816 -                       seqState->prevOffset[1] = seqState->prevOffset[0];
133817 -                       seqState->prevOffset[0] = offset;
133818 -               }
133819 -               seq.offset = offset;
133820 -       }
133822 -       seq.matchLength = ML_base[mlCode] + ((mlCode > 31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <=  16 bits */
133823 -       if (ZSTD_32bits() && (mlBits + llBits > 24))
133824 -               BIT_reloadDStream(&seqState->DStream);
133826 -       seq.litLength = LL_base[llCode] + ((llCode > 15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <=  16 bits */
133827 -       if (ZSTD_32bits() || (totalBits > 64 - 7 - (LLFSELog + MLFSELog + OffFSELog)))
133828 -               BIT_reloadDStream(&seqState->DStream);
133830 -       {
133831 -               size_t const pos = seqState->pos + seq.litLength;
133832 -               seq.match = seqState->base + pos - seq.offset; /* single memory segment */
133833 -               if (seq.offset > pos)
133834 -                       seq.match += seqState->gotoDict; /* separate memory segment */
133835 -               seqState->pos = pos + seq.matchLength;
133836 -       }
133838 -       /* ANS state update */
133839 -       FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <=  9 bits */
133840 -       FSE_updateState(&seqState->stateML, &seqState->DStream); /* <=  9 bits */
133841 -       if (ZSTD_32bits())
133842 -               BIT_reloadDStream(&seqState->DStream);             /* <= 18 bits */
133843 -       FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <=  8 bits */
133845 -       return seq;
133848 -static seq_t ZSTD_decodeSequenceLong(seqState_t *seqState, unsigned const windowSize)
133850 -       if (ZSTD_highbit32(windowSize) > STREAM_ACCUMULATOR_MIN) {
133851 -               return ZSTD_decodeSequenceLong_generic(seqState, 1);
133852 -       } else {
133853 -               return ZSTD_decodeSequenceLong_generic(seqState, 0);
133854 -       }
133857 -FORCE_INLINE
133858 -size_t ZSTD_execSequenceLong(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base,
133859 -                            const BYTE *const vBase, const BYTE *const dictEnd)
133861 -       BYTE *const oLitEnd = op + sequence.litLength;
133862 -       size_t const sequenceLength = sequence.litLength + sequence.matchLength;
133863 -       BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
133864 -       BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH;
133865 -       const BYTE *const iLitEnd = *litPtr + sequence.litLength;
133866 -       const BYTE *match = sequence.match;
133868 -       /* check */
133869 -       if (oMatchEnd > oend)
133870 -               return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
133871 -       if (iLitEnd > litLimit)
133872 -               return ERROR(corruption_detected); /* over-read beyond lit buffer */
133873 -       if (oLitEnd > oend_w)
133874 -               return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd);
133876 -       /* copy Literals */
133877 -       ZSTD_copy8(op, *litPtr);
133878 -       if (sequence.litLength > 8)
133879 -               ZSTD_wildcopy(op + 8, (*litPtr) + 8,
133880 -                             sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
133881 -       op = oLitEnd;
133882 -       *litPtr = iLitEnd; /* update for next sequence */
133884 -       /* copy Match */
133885 -       if (sequence.offset > (size_t)(oLitEnd - base)) {
133886 -               /* offset beyond prefix */
133887 -               if (sequence.offset > (size_t)(oLitEnd - vBase))
133888 -                       return ERROR(corruption_detected);
133889 -               if (match + sequence.matchLength <= dictEnd) {
133890 -                       memmove(oLitEnd, match, sequence.matchLength);
133891 -                       return sequenceLength;
133892 -               }
133893 -               /* span extDict & currPrefixSegment */
133894 -               {
133895 -                       size_t const length1 = dictEnd - match;
133896 -                       memmove(oLitEnd, match, length1);
133897 -                       op = oLitEnd + length1;
133898 -                       sequence.matchLength -= length1;
133899 -                       match = base;
133900 -                       if (op > oend_w || sequence.matchLength < MINMATCH) {
133901 -                               U32 i;
133902 -                               for (i = 0; i < sequence.matchLength; ++i)
133903 -                                       op[i] = match[i];
133904 -                               return sequenceLength;
133905 -                       }
133906 -               }
133907 -       }
133908 -       /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
133910 -       /* match within prefix */
133911 -       if (sequence.offset < 8) {
133912 -               /* close range match, overlap */
133913 -               static const U32 dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */
133914 -               static const int dec64table[] = {8, 8, 8, 7, 8, 9, 10, 11}; /* subtracted */
133915 -               int const sub2 = dec64table[sequence.offset];
133916 -               op[0] = match[0];
133917 -               op[1] = match[1];
133918 -               op[2] = match[2];
133919 -               op[3] = match[3];
133920 -               match += dec32table[sequence.offset];
133921 -               ZSTD_copy4(op + 4, match);
133922 -               match -= sub2;
133923 -       } else {
133924 -               ZSTD_copy8(op, match);
133925 -       }
133926 -       op += 8;
133927 -       match += 8;
133929 -       if (oMatchEnd > oend - (16 - MINMATCH)) {
133930 -               if (op < oend_w) {
133931 -                       ZSTD_wildcopy(op, match, oend_w - op);
133932 -                       match += oend_w - op;
133933 -                       op = oend_w;
133934 -               }
133935 -               while (op < oMatchEnd)
133936 -                       *op++ = *match++;
133937 -       } else {
133938 -               ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8); /* works even if matchLength < 8 */
133939 -       }
133940 -       return sequenceLength;
133943 -static size_t ZSTD_decompressSequencesLong(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize)
133945 -       const BYTE *ip = (const BYTE *)seqStart;
133946 -       const BYTE *const iend = ip + seqSize;
133947 -       BYTE *const ostart = (BYTE * const)dst;
133948 -       BYTE *const oend = ostart + maxDstSize;
133949 -       BYTE *op = ostart;
133950 -       const BYTE *litPtr = dctx->litPtr;
133951 -       const BYTE *const litEnd = litPtr + dctx->litSize;
133952 -       const BYTE *const base = (const BYTE *)(dctx->base);
133953 -       const BYTE *const vBase = (const BYTE *)(dctx->vBase);
133954 -       const BYTE *const dictEnd = (const BYTE *)(dctx->dictEnd);
133955 -       unsigned const windowSize = dctx->fParams.windowSize;
133956 -       int nbSeq;
133958 -       /* Build Decoding Tables */
133959 -       {
133960 -               size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize);
133961 -               if (ZSTD_isError(seqHSize))
133962 -                       return seqHSize;
133963 -               ip += seqHSize;
133964 -       }
133966 -       /* Regen sequences */
133967 -       if (nbSeq) {
133968 -#define STORED_SEQS 4
133969 -#define STOSEQ_MASK (STORED_SEQS - 1)
133970 -#define ADVANCED_SEQS 4
133971 -               seq_t *sequences = (seq_t *)dctx->entropy.workspace;
133972 -               int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
133973 -               seqState_t seqState;
133974 -               int seqNb;
133975 -               ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.workspace) >= sizeof(seq_t) * STORED_SEQS);
133976 -               dctx->fseEntropy = 1;
133977 -               {
133978 -                       U32 i;
133979 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
133980 -                               seqState.prevOffset[i] = dctx->entropy.rep[i];
133981 -               }
133982 -               seqState.base = base;
133983 -               seqState.pos = (size_t)(op - base);
133984 -               seqState.gotoDict = (uPtrDiff)dictEnd - (uPtrDiff)base; /* cast to avoid undefined behaviour */
133985 -               CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend - ip), corruption_detected);
133986 -               FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
133987 -               FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
133988 -               FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
133990 -               /* prepare in advance */
133991 -               for (seqNb = 0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && seqNb < seqAdvance; seqNb++) {
133992 -                       sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, windowSize);
133993 -               }
133994 -               if (seqNb < seqAdvance)
133995 -                       return ERROR(corruption_detected);
133997 -               /* decode and decompress */
133998 -               for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && seqNb < nbSeq; seqNb++) {
133999 -                       seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, windowSize);
134000 -                       size_t const oneSeqSize =
134001 -                           ZSTD_execSequenceLong(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd);
134002 -                       if (ZSTD_isError(oneSeqSize))
134003 -                               return oneSeqSize;
134004 -                       ZSTD_PREFETCH(sequence.match);
134005 -                       sequences[seqNb & STOSEQ_MASK] = sequence;
134006 -                       op += oneSeqSize;
134007 -               }
134008 -               if (seqNb < nbSeq)
134009 -                       return ERROR(corruption_detected);
134011 -               /* finish queue */
134012 -               seqNb -= seqAdvance;
134013 -               for (; seqNb < nbSeq; seqNb++) {
134014 -                       size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd);
134015 -                       if (ZSTD_isError(oneSeqSize))
134016 -                               return oneSeqSize;
134017 -                       op += oneSeqSize;
134018 -               }
134020 -               /* save reps for next block */
134021 -               {
134022 -                       U32 i;
134023 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
134024 -                               dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]);
134025 -               }
134026 -       }
134028 -       /* last literal segment */
134029 -       {
134030 -               size_t const lastLLSize = litEnd - litPtr;
134031 -               if (lastLLSize > (size_t)(oend - op))
134032 -                       return ERROR(dstSize_tooSmall);
134033 -               memcpy(op, litPtr, lastLLSize);
134034 -               op += lastLLSize;
134035 -       }
134037 -       return op - ostart;
134040 -static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
134041 -{ /* blockType == blockCompressed */
134042 -       const BYTE *ip = (const BYTE *)src;
134044 -       if (srcSize >= ZSTD_BLOCKSIZE_ABSOLUTEMAX)
134045 -               return ERROR(srcSize_wrong);
134047 -       /* Decode literals section */
134048 -       {
134049 -               size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
134050 -               if (ZSTD_isError(litCSize))
134051 -                       return litCSize;
134052 -               ip += litCSize;
134053 -               srcSize -= litCSize;
134054 -       }
134055 -       if (sizeof(size_t) > 4) /* do not enable prefetching on 32-bits x86, as it's performance detrimental */
134056 -                               /* likely because of register pressure */
134057 -                               /* if that's the correct cause, then 32-bits ARM should be affected differently */
134058 -                               /* it would be good to test this on ARM real hardware, to see if prefetch version improves speed */
134059 -               if (dctx->fParams.windowSize > (1 << 23))
134060 -                       return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize);
134061 -       return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);
134064 -static void ZSTD_checkContinuity(ZSTD_DCtx *dctx, const void *dst)
134066 -       if (dst != dctx->previousDstEnd) { /* not contiguous */
134067 -               dctx->dictEnd = dctx->previousDstEnd;
134068 -               dctx->vBase = (const char *)dst - ((const char *)(dctx->previousDstEnd) - (const char *)(dctx->base));
134069 -               dctx->base = dst;
134070 -               dctx->previousDstEnd = dst;
134071 -       }
134074 -size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
134076 -       size_t dSize;
134077 -       ZSTD_checkContinuity(dctx, dst);
134078 -       dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
134079 -       dctx->previousDstEnd = (char *)dst + dSize;
134080 -       return dSize;
134083 -/** ZSTD_insertBlock() :
134084 -       insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
134085 -size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart, size_t blockSize)
134087 -       ZSTD_checkContinuity(dctx, blockStart);
134088 -       dctx->previousDstEnd = (const char *)blockStart + blockSize;
134089 -       return blockSize;
134092 -size_t ZSTD_generateNxBytes(void *dst, size_t dstCapacity, BYTE byte, size_t length)
134094 -       if (length > dstCapacity)
134095 -               return ERROR(dstSize_tooSmall);
134096 -       memset(dst, byte, length);
134097 -       return length;
134100 -/** ZSTD_findFrameCompressedSize() :
134101 - *  compatible with legacy mode
134102 - *  `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
134103 - *  `srcSize` must be at least as large as the frame contained
134104 - *  @return : the compressed size of the frame starting at `src` */
134105 -size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
134107 -       if (srcSize >= ZSTD_skippableHeaderSize && (ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
134108 -               return ZSTD_skippableHeaderSize + ZSTD_readLE32((const BYTE *)src + 4);
134109 -       } else {
134110 -               const BYTE *ip = (const BYTE *)src;
134111 -               const BYTE *const ipstart = ip;
134112 -               size_t remainingSize = srcSize;
134113 -               ZSTD_frameParams fParams;
134115 -               size_t const headerSize = ZSTD_frameHeaderSize(ip, remainingSize);
134116 -               if (ZSTD_isError(headerSize))
134117 -                       return headerSize;
134119 -               /* Frame Header */
134120 -               {
134121 -                       size_t const ret = ZSTD_getFrameParams(&fParams, ip, remainingSize);
134122 -                       if (ZSTD_isError(ret))
134123 -                               return ret;
134124 -                       if (ret > 0)
134125 -                               return ERROR(srcSize_wrong);
134126 -               }
134128 -               ip += headerSize;
134129 -               remainingSize -= headerSize;
134131 -               /* Loop on each block */
134132 -               while (1) {
134133 -                       blockProperties_t blockProperties;
134134 -                       size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
134135 -                       if (ZSTD_isError(cBlockSize))
134136 -                               return cBlockSize;
134138 -                       if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
134139 -                               return ERROR(srcSize_wrong);
134141 -                       ip += ZSTD_blockHeaderSize + cBlockSize;
134142 -                       remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
134144 -                       if (blockProperties.lastBlock)
134145 -                               break;
134146 -               }
134148 -               if (fParams.checksumFlag) { /* Frame content checksum */
134149 -                       if (remainingSize < 4)
134150 -                               return ERROR(srcSize_wrong);
134151 -                       ip += 4;
134152 -                       remainingSize -= 4;
134153 -               }
134155 -               return ip - ipstart;
134156 -       }
134159 -/*! ZSTD_decompressFrame() :
134160 -*   @dctx must be properly initialized */
134161 -static size_t ZSTD_decompressFrame(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void **srcPtr, size_t *srcSizePtr)
134163 -       const BYTE *ip = (const BYTE *)(*srcPtr);
134164 -       BYTE *const ostart = (BYTE * const)dst;
134165 -       BYTE *const oend = ostart + dstCapacity;
134166 -       BYTE *op = ostart;
134167 -       size_t remainingSize = *srcSizePtr;
134169 -       /* check */
134170 -       if (remainingSize < ZSTD_frameHeaderSize_min + ZSTD_blockHeaderSize)
134171 -               return ERROR(srcSize_wrong);
134173 -       /* Frame Header */
134174 -       {
134175 -               size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix);
134176 -               if (ZSTD_isError(frameHeaderSize))
134177 -                       return frameHeaderSize;
134178 -               if (remainingSize < frameHeaderSize + ZSTD_blockHeaderSize)
134179 -                       return ERROR(srcSize_wrong);
134180 -               CHECK_F(ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize));
134181 -               ip += frameHeaderSize;
134182 -               remainingSize -= frameHeaderSize;
134183 -       }
134185 -       /* Loop on each block */
134186 -       while (1) {
134187 -               size_t decodedSize;
134188 -               blockProperties_t blockProperties;
134189 -               size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
134190 -               if (ZSTD_isError(cBlockSize))
134191 -                       return cBlockSize;
134193 -               ip += ZSTD_blockHeaderSize;
134194 -               remainingSize -= ZSTD_blockHeaderSize;
134195 -               if (cBlockSize > remainingSize)
134196 -                       return ERROR(srcSize_wrong);
134198 -               switch (blockProperties.blockType) {
134199 -               case bt_compressed: decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend - op, ip, cBlockSize); break;
134200 -               case bt_raw: decodedSize = ZSTD_copyRawBlock(op, oend - op, ip, cBlockSize); break;
134201 -               case bt_rle: decodedSize = ZSTD_generateNxBytes(op, oend - op, *ip, blockProperties.origSize); break;
134202 -               case bt_reserved:
134203 -               default: return ERROR(corruption_detected);
134204 -               }
134206 -               if (ZSTD_isError(decodedSize))
134207 -                       return decodedSize;
134208 -               if (dctx->fParams.checksumFlag)
134209 -                       xxh64_update(&dctx->xxhState, op, decodedSize);
134210 -               op += decodedSize;
134211 -               ip += cBlockSize;
134212 -               remainingSize -= cBlockSize;
134213 -               if (blockProperties.lastBlock)
134214 -                       break;
134215 -       }
134217 -       if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
134218 -               U32 const checkCalc = (U32)xxh64_digest(&dctx->xxhState);
134219 -               U32 checkRead;
134220 -               if (remainingSize < 4)
134221 -                       return ERROR(checksum_wrong);
134222 -               checkRead = ZSTD_readLE32(ip);
134223 -               if (checkRead != checkCalc)
134224 -                       return ERROR(checksum_wrong);
134225 -               ip += 4;
134226 -               remainingSize -= 4;
134227 -       }
134229 -       /* Allow caller to get size read */
134230 -       *srcPtr = ip;
134231 -       *srcSizePtr = remainingSize;
134232 -       return op - ostart;
134235 -static const void *ZSTD_DDictDictContent(const ZSTD_DDict *ddict);
134236 -static size_t ZSTD_DDictDictSize(const ZSTD_DDict *ddict);
134238 -static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
134239 -                                       const ZSTD_DDict *ddict)
134241 -       void *const dststart = dst;
134243 -       if (ddict) {
134244 -               if (dict) {
134245 -                       /* programmer error, these two cases should be mutually exclusive */
134246 -                       return ERROR(GENERIC);
134247 -               }
134249 -               dict = ZSTD_DDictDictContent(ddict);
134250 -               dictSize = ZSTD_DDictDictSize(ddict);
134251 -       }
134253 -       while (srcSize >= ZSTD_frameHeaderSize_prefix) {
134254 -               U32 magicNumber;
134256 -               magicNumber = ZSTD_readLE32(src);
134257 -               if (magicNumber != ZSTD_MAGICNUMBER) {
134258 -                       if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
134259 -                               size_t skippableSize;
134260 -                               if (srcSize < ZSTD_skippableHeaderSize)
134261 -                                       return ERROR(srcSize_wrong);
134262 -                               skippableSize = ZSTD_readLE32((const BYTE *)src + 4) + ZSTD_skippableHeaderSize;
134263 -                               if (srcSize < skippableSize) {
134264 -                                       return ERROR(srcSize_wrong);
134265 -                               }
134267 -                               src = (const BYTE *)src + skippableSize;
134268 -                               srcSize -= skippableSize;
134269 -                               continue;
134270 -                       } else {
134271 -                               return ERROR(prefix_unknown);
134272 -                       }
134273 -               }
134275 -               if (ddict) {
134276 -                       /* we were called from ZSTD_decompress_usingDDict */
134277 -                       ZSTD_refDDict(dctx, ddict);
134278 -               } else {
134279 -                       /* this will initialize correctly with no dict if dict == NULL, so
134280 -                        * use this in all cases but ddict */
134281 -                       CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
134282 -               }
134283 -               ZSTD_checkContinuity(dctx, dst);
134285 -               {
134286 -                       const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, &src, &srcSize);
134287 -                       if (ZSTD_isError(res))
134288 -                               return res;
134289 -                       /* don't need to bounds check this, ZSTD_decompressFrame will have
134290 -                        * already */
134291 -                       dst = (BYTE *)dst + res;
134292 -                       dstCapacity -= res;
134293 -               }
134294 -       }
134296 -       if (srcSize)
134297 -               return ERROR(srcSize_wrong); /* input not entirely consumed */
134299 -       return (BYTE *)dst - (BYTE *)dststart;
134302 -size_t ZSTD_decompress_usingDict(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize)
134304 -       return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL);
134307 -size_t ZSTD_decompressDCtx(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
134309 -       return ZSTD_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);
134312 -/*-**************************************
134313 -*   Advanced Streaming Decompression API
134314 -*   Bufferless and synchronous
134315 -****************************************/
134316 -size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx) { return dctx->expected; }
134318 -ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx)
134320 -       switch (dctx->stage) {
134321 -       default: /* should not happen */
134322 -       case ZSTDds_getFrameHeaderSize:
134323 -       case ZSTDds_decodeFrameHeader: return ZSTDnit_frameHeader;
134324 -       case ZSTDds_decodeBlockHeader: return ZSTDnit_blockHeader;
134325 -       case ZSTDds_decompressBlock: return ZSTDnit_block;
134326 -       case ZSTDds_decompressLastBlock: return ZSTDnit_lastBlock;
134327 -       case ZSTDds_checkChecksum: return ZSTDnit_checksum;
134328 -       case ZSTDds_decodeSkippableHeader:
134329 -       case ZSTDds_skipFrame: return ZSTDnit_skippableFrame;
134330 -       }
134333 -int ZSTD_isSkipFrame(ZSTD_DCtx *dctx) { return dctx->stage == ZSTDds_skipFrame; } /* for zbuff */
134335 -/** ZSTD_decompressContinue() :
134336 -*   @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
134337 -*             or an error code, which can be tested using ZSTD_isError() */
134338 -size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
134340 -       /* Sanity check */
134341 -       if (srcSize != dctx->expected)
134342 -               return ERROR(srcSize_wrong);
134343 -       if (dstCapacity)
134344 -               ZSTD_checkContinuity(dctx, dst);
134346 -       switch (dctx->stage) {
134347 -       case ZSTDds_getFrameHeaderSize:
134348 -               if (srcSize != ZSTD_frameHeaderSize_prefix)
134349 -                       return ERROR(srcSize_wrong);                                    /* impossible */
134350 -               if ((ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
134351 -                       memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix);
134352 -                       dctx->expected = ZSTD_skippableHeaderSize - ZSTD_frameHeaderSize_prefix; /* magic number + skippable frame length */
134353 -                       dctx->stage = ZSTDds_decodeSkippableHeader;
134354 -                       return 0;
134355 -               }
134356 -               dctx->headerSize = ZSTD_frameHeaderSize(src, ZSTD_frameHeaderSize_prefix);
134357 -               if (ZSTD_isError(dctx->headerSize))
134358 -                       return dctx->headerSize;
134359 -               memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix);
134360 -               if (dctx->headerSize > ZSTD_frameHeaderSize_prefix) {
134361 -                       dctx->expected = dctx->headerSize - ZSTD_frameHeaderSize_prefix;
134362 -                       dctx->stage = ZSTDds_decodeFrameHeader;
134363 -                       return 0;
134364 -               }
134365 -               dctx->expected = 0; /* not necessary to copy more */
134366 -               fallthrough;
134368 -       case ZSTDds_decodeFrameHeader:
134369 -               memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected);
134370 -               CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
134371 -               dctx->expected = ZSTD_blockHeaderSize;
134372 -               dctx->stage = ZSTDds_decodeBlockHeader;
134373 -               return 0;
134375 -       case ZSTDds_decodeBlockHeader: {
134376 -               blockProperties_t bp;
134377 -               size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
134378 -               if (ZSTD_isError(cBlockSize))
134379 -                       return cBlockSize;
134380 -               dctx->expected = cBlockSize;
134381 -               dctx->bType = bp.blockType;
134382 -               dctx->rleSize = bp.origSize;
134383 -               if (cBlockSize) {
134384 -                       dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock;
134385 -                       return 0;
134386 -               }
134387 -               /* empty block */
134388 -               if (bp.lastBlock) {
134389 -                       if (dctx->fParams.checksumFlag) {
134390 -                               dctx->expected = 4;
134391 -                               dctx->stage = ZSTDds_checkChecksum;
134392 -                       } else {
134393 -                               dctx->expected = 0; /* end of frame */
134394 -                               dctx->stage = ZSTDds_getFrameHeaderSize;
134395 -                       }
134396 -               } else {
134397 -                       dctx->expected = 3; /* go directly to next header */
134398 -                       dctx->stage = ZSTDds_decodeBlockHeader;
134399 -               }
134400 -               return 0;
134401 -       }
134402 -       case ZSTDds_decompressLastBlock:
134403 -       case ZSTDds_decompressBlock: {
134404 -               size_t rSize;
134405 -               switch (dctx->bType) {
134406 -               case bt_compressed: rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); break;
134407 -               case bt_raw: rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); break;
134408 -               case bt_rle: rSize = ZSTD_setRleBlock(dst, dstCapacity, src, srcSize, dctx->rleSize); break;
134409 -               case bt_reserved: /* should never happen */
134410 -               default: return ERROR(corruption_detected);
134411 -               }
134412 -               if (ZSTD_isError(rSize))
134413 -                       return rSize;
134414 -               if (dctx->fParams.checksumFlag)
134415 -                       xxh64_update(&dctx->xxhState, dst, rSize);
134417 -               if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */
134418 -                       if (dctx->fParams.checksumFlag) {       /* another round for frame checksum */
134419 -                               dctx->expected = 4;
134420 -                               dctx->stage = ZSTDds_checkChecksum;
134421 -                       } else {
134422 -                               dctx->expected = 0; /* ends here */
134423 -                               dctx->stage = ZSTDds_getFrameHeaderSize;
134424 -                       }
134425 -               } else {
134426 -                       dctx->stage = ZSTDds_decodeBlockHeader;
134427 -                       dctx->expected = ZSTD_blockHeaderSize;
134428 -                       dctx->previousDstEnd = (char *)dst + rSize;
134429 -               }
134430 -               return rSize;
134431 -       }
134432 -       case ZSTDds_checkChecksum: {
134433 -               U32 const h32 = (U32)xxh64_digest(&dctx->xxhState);
134434 -               U32 const check32 = ZSTD_readLE32(src); /* srcSize == 4, guaranteed by dctx->expected */
134435 -               if (check32 != h32)
134436 -                       return ERROR(checksum_wrong);
134437 -               dctx->expected = 0;
134438 -               dctx->stage = ZSTDds_getFrameHeaderSize;
134439 -               return 0;
134440 -       }
134441 -       case ZSTDds_decodeSkippableHeader: {
134442 -               memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected);
134443 -               dctx->expected = ZSTD_readLE32(dctx->headerBuffer + 4);
134444 -               dctx->stage = ZSTDds_skipFrame;
134445 -               return 0;
134446 -       }
134447 -       case ZSTDds_skipFrame: {
134448 -               dctx->expected = 0;
134449 -               dctx->stage = ZSTDds_getFrameHeaderSize;
134450 -               return 0;
134451 -       }
134452 -       default:
134453 -               return ERROR(GENERIC); /* impossible */
134454 -       }
134457 -static size_t ZSTD_refDictContent(ZSTD_DCtx *dctx, const void *dict, size_t dictSize)
134459 -       dctx->dictEnd = dctx->previousDstEnd;
134460 -       dctx->vBase = (const char *)dict - ((const char *)(dctx->previousDstEnd) - (const char *)(dctx->base));
134461 -       dctx->base = dict;
134462 -       dctx->previousDstEnd = (const char *)dict + dictSize;
134463 -       return 0;
134466 -/* ZSTD_loadEntropy() :
134467 - * dict : must point at beginning of a valid zstd dictionary
134468 - * @return : size of entropy tables read */
134469 -static size_t ZSTD_loadEntropy(ZSTD_entropyTables_t *entropy, const void *const dict, size_t const dictSize)
134471 -       const BYTE *dictPtr = (const BYTE *)dict;
134472 -       const BYTE *const dictEnd = dictPtr + dictSize;
134474 -       if (dictSize <= 8)
134475 -               return ERROR(dictionary_corrupted);
134476 -       dictPtr += 8; /* skip header = magic + dictID */
134478 -       {
134479 -               size_t const hSize = HUF_readDTableX4_wksp(entropy->hufTable, dictPtr, dictEnd - dictPtr, entropy->workspace, sizeof(entropy->workspace));
134480 -               if (HUF_isError(hSize))
134481 -                       return ERROR(dictionary_corrupted);
134482 -               dictPtr += hSize;
134483 -       }
134485 -       {
134486 -               short offcodeNCount[MaxOff + 1];
134487 -               U32 offcodeMaxValue = MaxOff, offcodeLog;
134488 -               size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd - dictPtr);
134489 -               if (FSE_isError(offcodeHeaderSize))
134490 -                       return ERROR(dictionary_corrupted);
134491 -               if (offcodeLog > OffFSELog)
134492 -                       return ERROR(dictionary_corrupted);
134493 -               CHECK_E(FSE_buildDTable_wksp(entropy->OFTable, offcodeNCount, offcodeMaxValue, offcodeLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted);
134494 -               dictPtr += offcodeHeaderSize;
134495 -       }
134497 -       {
134498 -               short matchlengthNCount[MaxML + 1];
134499 -               unsigned matchlengthMaxValue = MaxML, matchlengthLog;
134500 -               size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd - dictPtr);
134501 -               if (FSE_isError(matchlengthHeaderSize))
134502 -                       return ERROR(dictionary_corrupted);
134503 -               if (matchlengthLog > MLFSELog)
134504 -                       return ERROR(dictionary_corrupted);
134505 -               CHECK_E(FSE_buildDTable_wksp(entropy->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted);
134506 -               dictPtr += matchlengthHeaderSize;
134507 -       }
134509 -       {
134510 -               short litlengthNCount[MaxLL + 1];
134511 -               unsigned litlengthMaxValue = MaxLL, litlengthLog;
134512 -               size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd - dictPtr);
134513 -               if (FSE_isError(litlengthHeaderSize))
134514 -                       return ERROR(dictionary_corrupted);
134515 -               if (litlengthLog > LLFSELog)
134516 -                       return ERROR(dictionary_corrupted);
134517 -               CHECK_E(FSE_buildDTable_wksp(entropy->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted);
134518 -               dictPtr += litlengthHeaderSize;
134519 -       }
134521 -       if (dictPtr + 12 > dictEnd)
134522 -               return ERROR(dictionary_corrupted);
134523 -       {
134524 -               int i;
134525 -               size_t const dictContentSize = (size_t)(dictEnd - (dictPtr + 12));
134526 -               for (i = 0; i < 3; i++) {
134527 -                       U32 const rep = ZSTD_readLE32(dictPtr);
134528 -                       dictPtr += 4;
134529 -                       if (rep == 0 || rep >= dictContentSize)
134530 -                               return ERROR(dictionary_corrupted);
134531 -                       entropy->rep[i] = rep;
134532 -               }
134533 -       }
134535 -       return dictPtr - (const BYTE *)dict;
134538 -static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx *dctx, const void *dict, size_t dictSize)
134540 -       if (dictSize < 8)
134541 -               return ZSTD_refDictContent(dctx, dict, dictSize);
134542 -       {
134543 -               U32 const magic = ZSTD_readLE32(dict);
134544 -               if (magic != ZSTD_DICT_MAGIC) {
134545 -                       return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */
134546 -               }
134547 -       }
134548 -       dctx->dictID = ZSTD_readLE32((const char *)dict + 4);
134550 -       /* load entropy tables */
134551 -       {
134552 -               size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize);
134553 -               if (ZSTD_isError(eSize))
134554 -                       return ERROR(dictionary_corrupted);
134555 -               dict = (const char *)dict + eSize;
134556 -               dictSize -= eSize;
134557 -       }
134558 -       dctx->litEntropy = dctx->fseEntropy = 1;
134560 -       /* reference dictionary content */
134561 -       return ZSTD_refDictContent(dctx, dict, dictSize);
134564 -size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict, size_t dictSize)
134566 -       CHECK_F(ZSTD_decompressBegin(dctx));
134567 -       if (dict && dictSize)
134568 -               CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted);
134569 -       return 0;
134572 -/* ======   ZSTD_DDict   ====== */
134574 -struct ZSTD_DDict_s {
134575 -       void *dictBuffer;
134576 -       const void *dictContent;
134577 -       size_t dictSize;
134578 -       ZSTD_entropyTables_t entropy;
134579 -       U32 dictID;
134580 -       U32 entropyPresent;
134581 -       ZSTD_customMem cMem;
134582 -}; /* typedef'd to ZSTD_DDict within "zstd.h" */
134584 -size_t ZSTD_DDictWorkspaceBound(void) { return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_DDict)); }
134586 -static const void *ZSTD_DDictDictContent(const ZSTD_DDict *ddict) { return ddict->dictContent; }
134588 -static size_t ZSTD_DDictDictSize(const ZSTD_DDict *ddict) { return ddict->dictSize; }
134590 -static void ZSTD_refDDict(ZSTD_DCtx *dstDCtx, const ZSTD_DDict *ddict)
134592 -       ZSTD_decompressBegin(dstDCtx); /* init */
134593 -       if (ddict) {                   /* support refDDict on NULL */
134594 -               dstDCtx->dictID = ddict->dictID;
134595 -               dstDCtx->base = ddict->dictContent;
134596 -               dstDCtx->vBase = ddict->dictContent;
134597 -               dstDCtx->dictEnd = (const BYTE *)ddict->dictContent + ddict->dictSize;
134598 -               dstDCtx->previousDstEnd = dstDCtx->dictEnd;
134599 -               if (ddict->entropyPresent) {
134600 -                       dstDCtx->litEntropy = 1;
134601 -                       dstDCtx->fseEntropy = 1;
134602 -                       dstDCtx->LLTptr = ddict->entropy.LLTable;
134603 -                       dstDCtx->MLTptr = ddict->entropy.MLTable;
134604 -                       dstDCtx->OFTptr = ddict->entropy.OFTable;
134605 -                       dstDCtx->HUFptr = ddict->entropy.hufTable;
134606 -                       dstDCtx->entropy.rep[0] = ddict->entropy.rep[0];
134607 -                       dstDCtx->entropy.rep[1] = ddict->entropy.rep[1];
134608 -                       dstDCtx->entropy.rep[2] = ddict->entropy.rep[2];
134609 -               } else {
134610 -                       dstDCtx->litEntropy = 0;
134611 -                       dstDCtx->fseEntropy = 0;
134612 -               }
134613 -       }
134616 -static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict *ddict)
134618 -       ddict->dictID = 0;
134619 -       ddict->entropyPresent = 0;
134620 -       if (ddict->dictSize < 8)
134621 -               return 0;
134622 -       {
134623 -               U32 const magic = ZSTD_readLE32(ddict->dictContent);
134624 -               if (magic != ZSTD_DICT_MAGIC)
134625 -                       return 0; /* pure content mode */
134626 -       }
134627 -       ddict->dictID = ZSTD_readLE32((const char *)ddict->dictContent + 4);
134629 -       /* load entropy tables */
134630 -       CHECK_E(ZSTD_loadEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize), dictionary_corrupted);
134631 -       ddict->entropyPresent = 1;
134632 -       return 0;
134635 -static ZSTD_DDict *ZSTD_createDDict_advanced(const void *dict, size_t dictSize, unsigned byReference, ZSTD_customMem customMem)
134637 -       if (!customMem.customAlloc || !customMem.customFree)
134638 -               return NULL;
134640 -       {
134641 -               ZSTD_DDict *const ddict = (ZSTD_DDict *)ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
134642 -               if (!ddict)
134643 -                       return NULL;
134644 -               ddict->cMem = customMem;
134646 -               if ((byReference) || (!dict) || (!dictSize)) {
134647 -                       ddict->dictBuffer = NULL;
134648 -                       ddict->dictContent = dict;
134649 -               } else {
134650 -                       void *const internalBuffer = ZSTD_malloc(dictSize, customMem);
134651 -                       if (!internalBuffer) {
134652 -                               ZSTD_freeDDict(ddict);
134653 -                               return NULL;
134654 -                       }
134655 -                       memcpy(internalBuffer, dict, dictSize);
134656 -                       ddict->dictBuffer = internalBuffer;
134657 -                       ddict->dictContent = internalBuffer;
134658 -               }
134659 -               ddict->dictSize = dictSize;
134660 -               ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
134661 -               /* parse dictionary content */
134662 -               {
134663 -                       size_t const errorCode = ZSTD_loadEntropy_inDDict(ddict);
134664 -                       if (ZSTD_isError(errorCode)) {
134665 -                               ZSTD_freeDDict(ddict);
134666 -                               return NULL;
134667 -                       }
134668 -               }
134670 -               return ddict;
134671 -       }
134674 -/*! ZSTD_initDDict() :
134675 -*   Create a digested dictionary, to start decompression without startup delay.
134676 -*   `dict` content is copied inside DDict.
134677 -*   Consequently, `dict` can be released after `ZSTD_DDict` creation */
134678 -ZSTD_DDict *ZSTD_initDDict(const void *dict, size_t dictSize, void *workspace, size_t workspaceSize)
134680 -       ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
134681 -       return ZSTD_createDDict_advanced(dict, dictSize, 1, stackMem);
134684 -size_t ZSTD_freeDDict(ZSTD_DDict *ddict)
134686 -       if (ddict == NULL)
134687 -               return 0; /* support free on NULL */
134688 -       {
134689 -               ZSTD_customMem const cMem = ddict->cMem;
134690 -               ZSTD_free(ddict->dictBuffer, cMem);
134691 -               ZSTD_free(ddict, cMem);
134692 -               return 0;
134693 -       }
134696 -/*! ZSTD_getDictID_fromDict() :
134697 - *  Provides the dictID stored within dictionary.
134698 - *  if @return == 0, the dictionary is not conformant with Zstandard specification.
134699 - *  It can still be loaded, but as a content-only dictionary. */
134700 -unsigned ZSTD_getDictID_fromDict(const void *dict, size_t dictSize)
134702 -       if (dictSize < 8)
134703 -               return 0;
134704 -       if (ZSTD_readLE32(dict) != ZSTD_DICT_MAGIC)
134705 -               return 0;
134706 -       return ZSTD_readLE32((const char *)dict + 4);
134709 -/*! ZSTD_getDictID_fromDDict() :
134710 - *  Provides the dictID of the dictionary loaded into `ddict`.
134711 - *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
134712 - *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
134713 -unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict)
134715 -       if (ddict == NULL)
134716 -               return 0;
134717 -       return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
134720 -/*! ZSTD_getDictID_fromFrame() :
134721 - *  Provides the dictID required to decompressed the frame stored within `src`.
134722 - *  If @return == 0, the dictID could not be decoded.
134723 - *  This could for one of the following reasons :
134724 - *  - The frame does not require a dictionary to be decoded (most common case).
134725 - *  - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
134726 - *    Note : this use case also happens when using a non-conformant dictionary.
134727 - *  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
134728 - *  - This is not a Zstandard frame.
134729 - *  When identifying the exact failure cause, it's possible to used ZSTD_getFrameParams(), which will provide a more precise error code. */
134730 -unsigned ZSTD_getDictID_fromFrame(const void *src, size_t srcSize)
134732 -       ZSTD_frameParams zfp = {0, 0, 0, 0};
134733 -       size_t const hError = ZSTD_getFrameParams(&zfp, src, srcSize);
134734 -       if (ZSTD_isError(hError))
134735 -               return 0;
134736 -       return zfp.dictID;
134739 -/*! ZSTD_decompress_usingDDict() :
134740 -*   Decompression using a pre-digested Dictionary
134741 -*   Use dictionary without significant overhead. */
134742 -size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_DDict *ddict)
134744 -       /* pass content and size in case legacy frames are encountered */
134745 -       return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, NULL, 0, ddict);
134748 -/*=====================================
134749 -*   Streaming decompression
134750 -*====================================*/
134752 -typedef enum { zdss_init, zdss_loadHeader, zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
134754 -/* *** Resource management *** */
134755 -struct ZSTD_DStream_s {
134756 -       ZSTD_DCtx *dctx;
134757 -       ZSTD_DDict *ddictLocal;
134758 -       const ZSTD_DDict *ddict;
134759 -       ZSTD_frameParams fParams;
134760 -       ZSTD_dStreamStage stage;
134761 -       char *inBuff;
134762 -       size_t inBuffSize;
134763 -       size_t inPos;
134764 -       size_t maxWindowSize;
134765 -       char *outBuff;
134766 -       size_t outBuffSize;
134767 -       size_t outStart;
134768 -       size_t outEnd;
134769 -       size_t blockSize;
134770 -       BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; /* tmp buffer to store frame header */
134771 -       size_t lhSize;
134772 -       ZSTD_customMem customMem;
134773 -       void *legacyContext;
134774 -       U32 previousLegacyVersion;
134775 -       U32 legacyVersion;
134776 -       U32 hostageByte;
134777 -}; /* typedef'd to ZSTD_DStream within "zstd.h" */
134779 -size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize)
134781 -       size_t const blockSize = MIN(maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
134782 -       size_t const inBuffSize = blockSize;
134783 -       size_t const outBuffSize = maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
134784 -       return ZSTD_DCtxWorkspaceBound() + ZSTD_ALIGN(sizeof(ZSTD_DStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize);
134787 -static ZSTD_DStream *ZSTD_createDStream_advanced(ZSTD_customMem customMem)
134789 -       ZSTD_DStream *zds;
134791 -       if (!customMem.customAlloc || !customMem.customFree)
134792 -               return NULL;
134794 -       zds = (ZSTD_DStream *)ZSTD_malloc(sizeof(ZSTD_DStream), customMem);
134795 -       if (zds == NULL)
134796 -               return NULL;
134797 -       memset(zds, 0, sizeof(ZSTD_DStream));
134798 -       memcpy(&zds->customMem, &customMem, sizeof(ZSTD_customMem));
134799 -       zds->dctx = ZSTD_createDCtx_advanced(customMem);
134800 -       if (zds->dctx == NULL) {
134801 -               ZSTD_freeDStream(zds);
134802 -               return NULL;
134803 -       }
134804 -       zds->stage = zdss_init;
134805 -       zds->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
134806 -       return zds;
134809 -ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace, size_t workspaceSize)
134811 -       ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
134812 -       ZSTD_DStream *zds = ZSTD_createDStream_advanced(stackMem);
134813 -       if (!zds) {
134814 -               return NULL;
134815 -       }
134817 -       zds->maxWindowSize = maxWindowSize;
134818 -       zds->stage = zdss_loadHeader;
134819 -       zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
134820 -       ZSTD_freeDDict(zds->ddictLocal);
134821 -       zds->ddictLocal = NULL;
134822 -       zds->ddict = zds->ddictLocal;
134823 -       zds->legacyVersion = 0;
134824 -       zds->hostageByte = 0;
134826 -       {
134827 -               size_t const blockSize = MIN(zds->maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
134828 -               size_t const neededOutSize = zds->maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
134830 -               zds->inBuff = (char *)ZSTD_malloc(blockSize, zds->customMem);
134831 -               zds->inBuffSize = blockSize;
134832 -               zds->outBuff = (char *)ZSTD_malloc(neededOutSize, zds->customMem);
134833 -               zds->outBuffSize = neededOutSize;
134834 -               if (zds->inBuff == NULL || zds->outBuff == NULL) {
134835 -                       ZSTD_freeDStream(zds);
134836 -                       return NULL;
134837 -               }
134838 -       }
134839 -       return zds;
134842 -ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize, const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize)
134844 -       ZSTD_DStream *zds = ZSTD_initDStream(maxWindowSize, workspace, workspaceSize);
134845 -       if (zds) {
134846 -               zds->ddict = ddict;
134847 -       }
134848 -       return zds;
134851 -size_t ZSTD_freeDStream(ZSTD_DStream *zds)
134853 -       if (zds == NULL)
134854 -               return 0; /* support free on null */
134855 -       {
134856 -               ZSTD_customMem const cMem = zds->customMem;
134857 -               ZSTD_freeDCtx(zds->dctx);
134858 -               zds->dctx = NULL;
134859 -               ZSTD_freeDDict(zds->ddictLocal);
134860 -               zds->ddictLocal = NULL;
134861 -               ZSTD_free(zds->inBuff, cMem);
134862 -               zds->inBuff = NULL;
134863 -               ZSTD_free(zds->outBuff, cMem);
134864 -               zds->outBuff = NULL;
134865 -               ZSTD_free(zds, cMem);
134866 -               return 0;
134867 -       }
134870 -/* *** Initialization *** */
134872 -size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX + ZSTD_blockHeaderSize; }
134873 -size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; }
134875 -size_t ZSTD_resetDStream(ZSTD_DStream *zds)
134877 -       zds->stage = zdss_loadHeader;
134878 -       zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
134879 -       zds->legacyVersion = 0;
134880 -       zds->hostageByte = 0;
134881 -       return ZSTD_frameHeaderSize_prefix;
134884 -/* *****   Decompression   ***** */
134886 -ZSTD_STATIC size_t ZSTD_limitCopy(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
134888 -       size_t const length = MIN(dstCapacity, srcSize);
134889 -       memcpy(dst, src, length);
134890 -       return length;
134893 -size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inBuffer *input)
134895 -       const char *const istart = (const char *)(input->src) + input->pos;
134896 -       const char *const iend = (const char *)(input->src) + input->size;
134897 -       const char *ip = istart;
134898 -       char *const ostart = (char *)(output->dst) + output->pos;
134899 -       char *const oend = (char *)(output->dst) + output->size;
134900 -       char *op = ostart;
134901 -       U32 someMoreWork = 1;
134903 -       while (someMoreWork) {
134904 -               switch (zds->stage) {
134905 -               case zdss_init:
134906 -                       ZSTD_resetDStream(zds); /* transparent reset on starting decoding a new frame */
134907 -                       fallthrough;
134909 -               case zdss_loadHeader: {
134910 -                       size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize);
134911 -                       if (ZSTD_isError(hSize))
134912 -                               return hSize;
134913 -                       if (hSize != 0) {                                  /* need more input */
134914 -                               size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */
134915 -                               if (toLoad > (size_t)(iend - ip)) {     /* not enough input to load full header */
134916 -                                       memcpy(zds->headerBuffer + zds->lhSize, ip, iend - ip);
134917 -                                       zds->lhSize += iend - ip;
134918 -                                       input->pos = input->size;
134919 -                                       return (MAX(ZSTD_frameHeaderSize_min, hSize) - zds->lhSize) +
134920 -                                              ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
134921 -                               }
134922 -                               memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad);
134923 -                               zds->lhSize = hSize;
134924 -                               ip += toLoad;
134925 -                               break;
134926 -                       }
134928 -                       /* check for single-pass mode opportunity */
134929 -                       if (zds->fParams.frameContentSize && zds->fParams.windowSize /* skippable frame if == 0 */
134930 -                           && (U64)(size_t)(oend - op) >= zds->fParams.frameContentSize) {
134931 -                               size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend - istart);
134932 -                               if (cSize <= (size_t)(iend - istart)) {
134933 -                                       size_t const decompressedSize = ZSTD_decompress_usingDDict(zds->dctx, op, oend - op, istart, cSize, zds->ddict);
134934 -                                       if (ZSTD_isError(decompressedSize))
134935 -                                               return decompressedSize;
134936 -                                       ip = istart + cSize;
134937 -                                       op += decompressedSize;
134938 -                                       zds->dctx->expected = 0;
134939 -                                       zds->stage = zdss_init;
134940 -                                       someMoreWork = 0;
134941 -                                       break;
134942 -                               }
134943 -                       }
134945 -                       /* Consume header */
134946 -                       ZSTD_refDDict(zds->dctx, zds->ddict);
134947 -                       {
134948 -                               size_t const h1Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); /* == ZSTD_frameHeaderSize_prefix */
134949 -                               CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer, h1Size));
134950 -                               {
134951 -                                       size_t const h2Size = ZSTD_nextSrcSizeToDecompress(zds->dctx);
134952 -                                       CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer + h1Size, h2Size));
134953 -                               }
134954 -                       }
134956 -                       zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
134957 -                       if (zds->fParams.windowSize > zds->maxWindowSize)
134958 -                               return ERROR(frameParameter_windowTooLarge);
134960 -                       /* Buffers are preallocated, but double check */
134961 -                       {
134962 -                               size_t const blockSize = MIN(zds->maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
134963 -                               size_t const neededOutSize = zds->maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
134964 -                               if (zds->inBuffSize < blockSize) {
134965 -                                       return ERROR(GENERIC);
134966 -                               }
134967 -                               if (zds->outBuffSize < neededOutSize) {
134968 -                                       return ERROR(GENERIC);
134969 -                               }
134970 -                               zds->blockSize = blockSize;
134971 -                       }
134972 -                       zds->stage = zdss_read;
134973 -               }
134974 -                       fallthrough;
134976 -               case zdss_read: {
134977 -                       size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
134978 -                       if (neededInSize == 0) { /* end of frame */
134979 -                               zds->stage = zdss_init;
134980 -                               someMoreWork = 0;
134981 -                               break;
134982 -                       }
134983 -                       if ((size_t)(iend - ip) >= neededInSize) { /* decode directly from src */
134984 -                               const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx);
134985 -                               size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, zds->outBuff + zds->outStart,
134986 -                                                                                  (isSkipFrame ? 0 : zds->outBuffSize - zds->outStart), ip, neededInSize);
134987 -                               if (ZSTD_isError(decodedSize))
134988 -                                       return decodedSize;
134989 -                               ip += neededInSize;
134990 -                               if (!decodedSize && !isSkipFrame)
134991 -                                       break; /* this was just a header */
134992 -                               zds->outEnd = zds->outStart + decodedSize;
134993 -                               zds->stage = zdss_flush;
134994 -                               break;
134995 -                       }
134996 -                       if (ip == iend) {
134997 -                               someMoreWork = 0;
134998 -                               break;
134999 -                       } /* no more input */
135000 -                       zds->stage = zdss_load;
135001 -                       /* pass-through */
135002 -               }
135003 -                       fallthrough;
135005 -               case zdss_load: {
135006 -                       size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
135007 -                       size_t const toLoad = neededInSize - zds->inPos; /* should always be <= remaining space within inBuff */
135008 -                       size_t loadedSize;
135009 -                       if (toLoad > zds->inBuffSize - zds->inPos)
135010 -                               return ERROR(corruption_detected); /* should never happen */
135011 -                       loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend - ip);
135012 -                       ip += loadedSize;
135013 -                       zds->inPos += loadedSize;
135014 -                       if (loadedSize < toLoad) {
135015 -                               someMoreWork = 0;
135016 -                               break;
135017 -                       } /* not enough input, wait for more */
135019 -                       /* decode loaded input */
135020 -                       {
135021 -                               const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx);
135022 -                               size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, zds->outBuff + zds->outStart, zds->outBuffSize - zds->outStart,
135023 -                                                                                  zds->inBuff, neededInSize);
135024 -                               if (ZSTD_isError(decodedSize))
135025 -                                       return decodedSize;
135026 -                               zds->inPos = 0; /* input is consumed */
135027 -                               if (!decodedSize && !isSkipFrame) {
135028 -                                       zds->stage = zdss_read;
135029 -                                       break;
135030 -                               } /* this was just a header */
135031 -                               zds->outEnd = zds->outStart + decodedSize;
135032 -                               zds->stage = zdss_flush;
135033 -                               /* pass-through */
135034 -                       }
135035 -               }
135036 -                       fallthrough;
135038 -               case zdss_flush: {
135039 -                       size_t const toFlushSize = zds->outEnd - zds->outStart;
135040 -                       size_t const flushedSize = ZSTD_limitCopy(op, oend - op, zds->outBuff + zds->outStart, toFlushSize);
135041 -                       op += flushedSize;
135042 -                       zds->outStart += flushedSize;
135043 -                       if (flushedSize == toFlushSize) { /* flush completed */
135044 -                               zds->stage = zdss_read;
135045 -                               if (zds->outStart + zds->blockSize > zds->outBuffSize)
135046 -                                       zds->outStart = zds->outEnd = 0;
135047 -                               break;
135048 -                       }
135049 -                       /* cannot complete flush */
135050 -                       someMoreWork = 0;
135051 -                       break;
135052 -               }
135053 -               default:
135054 -                       return ERROR(GENERIC); /* impossible */
135055 -               }
135056 -       }
135058 -       /* result */
135059 -       input->pos += (size_t)(ip - istart);
135060 -       output->pos += (size_t)(op - ostart);
135061 -       {
135062 -               size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds->dctx);
135063 -               if (!nextSrcSizeHint) {                     /* frame fully decoded */
135064 -                       if (zds->outEnd == zds->outStart) { /* output fully flushed */
135065 -                               if (zds->hostageByte) {
135066 -                                       if (input->pos >= input->size) {
135067 -                                               zds->stage = zdss_read;
135068 -                                               return 1;
135069 -                                       }            /* can't release hostage (not present) */
135070 -                                       input->pos++; /* release hostage */
135071 -                               }
135072 -                               return 0;
135073 -                       }
135074 -                       if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */
135075 -                               input->pos--;    /* note : pos > 0, otherwise, impossible to finish reading last block */
135076 -                               zds->hostageByte = 1;
135077 -                       }
135078 -                       return 1;
135079 -               }
135080 -               nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds->dctx) == ZSTDnit_block); /* preload header of next block */
135081 -               if (zds->inPos > nextSrcSizeHint)
135082 -                       return ERROR(GENERIC); /* should never happen */
135083 -               nextSrcSizeHint -= zds->inPos; /* already loaded*/
135084 -               return nextSrcSizeHint;
135085 -       }
135088 -EXPORT_SYMBOL(ZSTD_DCtxWorkspaceBound);
135089 -EXPORT_SYMBOL(ZSTD_initDCtx);
135090 -EXPORT_SYMBOL(ZSTD_decompressDCtx);
135091 -EXPORT_SYMBOL(ZSTD_decompress_usingDict);
135093 -EXPORT_SYMBOL(ZSTD_DDictWorkspaceBound);
135094 -EXPORT_SYMBOL(ZSTD_initDDict);
135095 -EXPORT_SYMBOL(ZSTD_decompress_usingDDict);
135097 -EXPORT_SYMBOL(ZSTD_DStreamWorkspaceBound);
135098 -EXPORT_SYMBOL(ZSTD_initDStream);
135099 -EXPORT_SYMBOL(ZSTD_initDStream_usingDDict);
135100 -EXPORT_SYMBOL(ZSTD_resetDStream);
135101 -EXPORT_SYMBOL(ZSTD_decompressStream);
135102 -EXPORT_SYMBOL(ZSTD_DStreamInSize);
135103 -EXPORT_SYMBOL(ZSTD_DStreamOutSize);
135105 -EXPORT_SYMBOL(ZSTD_findFrameCompressedSize);
135106 -EXPORT_SYMBOL(ZSTD_getFrameContentSize);
135107 -EXPORT_SYMBOL(ZSTD_findDecompressedSize);
135109 -EXPORT_SYMBOL(ZSTD_isFrame);
135110 -EXPORT_SYMBOL(ZSTD_getDictID_fromDict);
135111 -EXPORT_SYMBOL(ZSTD_getDictID_fromDDict);
135112 -EXPORT_SYMBOL(ZSTD_getDictID_fromFrame);
135114 -EXPORT_SYMBOL(ZSTD_getFrameParams);
135115 -EXPORT_SYMBOL(ZSTD_decompressBegin);
135116 -EXPORT_SYMBOL(ZSTD_decompressBegin_usingDict);
135117 -EXPORT_SYMBOL(ZSTD_copyDCtx);
135118 -EXPORT_SYMBOL(ZSTD_nextSrcSizeToDecompress);
135119 -EXPORT_SYMBOL(ZSTD_decompressContinue);
135120 -EXPORT_SYMBOL(ZSTD_nextInputType);
135122 -EXPORT_SYMBOL(ZSTD_decompressBlock);
135123 -EXPORT_SYMBOL(ZSTD_insertBlock);
135125 -MODULE_LICENSE("Dual BSD/GPL");
135126 -MODULE_DESCRIPTION("Zstd Decompressor");
135127 diff --git a/lib/zstd/decompress/huf_decompress.c b/lib/zstd/decompress/huf_decompress.c
135128 new file mode 100644
135129 index 000000000000..dee939434873
135130 --- /dev/null
135131 +++ b/lib/zstd/decompress/huf_decompress.c
135132 @@ -0,0 +1,1205 @@
135133 +/* ******************************************************************
135134 + * huff0 huffman decoder,
135135 + * part of Finite State Entropy library
135136 + * Copyright (c) Yann Collet, Facebook, Inc.
135138 + *  You can contact the author at :
135139 + *  - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
135141 + * This source code is licensed under both the BSD-style license (found in the
135142 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
135143 + * in the COPYING file in the root directory of this source tree).
135144 + * You may select, at your option, one of the above-listed licenses.
135145 +****************************************************************** */
135147 +/* **************************************************************
135148 +*  Dependencies
135149 +****************************************************************/
135150 +#include "../common/zstd_deps.h"  /* ZSTD_memcpy, ZSTD_memset */
135151 +#include "../common/compiler.h"
135152 +#include "../common/bitstream.h"  /* BIT_* */
135153 +#include "../common/fse.h"        /* to compress headers */
135154 +#define HUF_STATIC_LINKING_ONLY
135155 +#include "../common/huf.h"
135156 +#include "../common/error_private.h"
135158 +/* **************************************************************
135159 +*  Macros
135160 +****************************************************************/
135162 +/* These two optional macros force the use one way or another of the two
135163 + * Huffman decompression implementations. You can't force in both directions
135164 + * at the same time.
135165 + */
135166 +#if defined(HUF_FORCE_DECOMPRESS_X1) && \
135167 +    defined(HUF_FORCE_DECOMPRESS_X2)
135168 +#error "Cannot force the use of the X1 and X2 decoders at the same time!"
135169 +#endif
135172 +/* **************************************************************
135173 +*  Error Management
135174 +****************************************************************/
135175 +#define HUF_isError ERR_isError
135178 +/* **************************************************************
135179 +*  Byte alignment for workSpace management
135180 +****************************************************************/
135181 +#define HUF_ALIGN(x, a)         HUF_ALIGN_MASK((x), (a) - 1)
135182 +#define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
135185 +/* **************************************************************
135186 +*  BMI2 Variant Wrappers
135187 +****************************************************************/
135188 +#if DYNAMIC_BMI2
135190 +#define HUF_DGEN(fn)                                                        \
135191 +                                                                            \
135192 +    static size_t fn##_default(                                             \
135193 +                  void* dst,  size_t dstSize,                               \
135194 +            const void* cSrc, size_t cSrcSize,                              \
135195 +            const HUF_DTable* DTable)                                       \
135196 +    {                                                                       \
135197 +        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \
135198 +    }                                                                       \
135199 +                                                                            \
135200 +    static TARGET_ATTRIBUTE("bmi2") size_t fn##_bmi2(                       \
135201 +                  void* dst,  size_t dstSize,                               \
135202 +            const void* cSrc, size_t cSrcSize,                              \
135203 +            const HUF_DTable* DTable)                                       \
135204 +    {                                                                       \
135205 +        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \
135206 +    }                                                                       \
135207 +                                                                            \
135208 +    static size_t fn(void* dst, size_t dstSize, void const* cSrc,           \
135209 +                     size_t cSrcSize, HUF_DTable const* DTable, int bmi2)   \
135210 +    {                                                                       \
135211 +        if (bmi2) {                                                         \
135212 +            return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);         \
135213 +        }                                                                   \
135214 +        return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable);          \
135215 +    }
135217 +#else
135219 +#define HUF_DGEN(fn)                                                        \
135220 +    static size_t fn(void* dst, size_t dstSize, void const* cSrc,           \
135221 +                     size_t cSrcSize, HUF_DTable const* DTable, int bmi2)   \
135222 +    {                                                                       \
135223 +        (void)bmi2;                                                         \
135224 +        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \
135225 +    }
135227 +#endif
135230 +/*-***************************/
135231 +/*  generic DTableDesc       */
135232 +/*-***************************/
135233 +typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc;
135235 +static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
135237 +    DTableDesc dtd;
135238 +    ZSTD_memcpy(&dtd, table, sizeof(dtd));
135239 +    return dtd;
135243 +#ifndef HUF_FORCE_DECOMPRESS_X2
135245 +/*-***************************/
135246 +/*  single-symbol decoding   */
135247 +/*-***************************/
135248 +typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX1;   /* single-symbol decoding */
135251 + * Packs 4 HUF_DEltX1 structs into a U64. This is used to lay down 4 entries at
135252 + * a time.
135253 + */
135254 +static U64 HUF_DEltX1_set4(BYTE symbol, BYTE nbBits) {
135255 +    U64 D4;
135256 +    if (MEM_isLittleEndian()) {
135257 +        D4 = symbol + (nbBits << 8);
135258 +    } else {
135259 +        D4 = (symbol << 8) + nbBits;
135260 +    }
135261 +    D4 *= 0x0001000100010001ULL;
135262 +    return D4;
135265 +typedef struct {
135266 +        U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];
135267 +        U32 rankStart[HUF_TABLELOG_ABSOLUTEMAX + 1];
135268 +        U32 statsWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
135269 +        BYTE symbols[HUF_SYMBOLVALUE_MAX + 1];
135270 +        BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];
135271 +} HUF_ReadDTableX1_Workspace;
135274 +size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
135276 +    return HUF_readDTableX1_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0);
135279 +size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2)
135281 +    U32 tableLog = 0;
135282 +    U32 nbSymbols = 0;
135283 +    size_t iSize;
135284 +    void* const dtPtr = DTable + 1;
135285 +    HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr;
135286 +    HUF_ReadDTableX1_Workspace* wksp = (HUF_ReadDTableX1_Workspace*)workSpace;
135288 +    DEBUG_STATIC_ASSERT(HUF_DECOMPRESS_WORKSPACE_SIZE >= sizeof(*wksp));
135289 +    if (sizeof(*wksp) > wkspSize) return ERROR(tableLog_tooLarge);
135291 +    DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
135292 +    /* ZSTD_memset(huffWeight, 0, sizeof(huffWeight)); */   /* is not necessary, even though some analyzer complain ... */
135294 +    iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), bmi2);
135295 +    if (HUF_isError(iSize)) return iSize;
135297 +    /* Table header */
135298 +    {   DTableDesc dtd = HUF_getDTableDesc(DTable);
135299 +        if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge);   /* DTable too small, Huffman tree cannot fit in */
135300 +        dtd.tableType = 0;
135301 +        dtd.tableLog = (BYTE)tableLog;
135302 +        ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
135303 +    }
135305 +    /* Compute symbols and rankStart given rankVal:
135306 +     *
135307 +     * rankVal already contains the number of values of each weight.
135308 +     *
135309 +     * symbols contains the symbols ordered by weight. First are the rankVal[0]
135310 +     * weight 0 symbols, followed by the rankVal[1] weight 1 symbols, and so on.
135311 +     * symbols[0] is filled (but unused) to avoid a branch.
135312 +     *
135313 +     * rankStart contains the offset where each rank belongs in the DTable.
135314 +     * rankStart[0] is not filled because there are no entries in the table for
135315 +     * weight 0.
135316 +     */
135317 +    {
135318 +        int n;
135319 +        int nextRankStart = 0;
135320 +        int const unroll = 4;
135321 +        int const nLimit = (int)nbSymbols - unroll + 1;
135322 +        for (n=0; n<(int)tableLog+1; n++) {
135323 +            U32 const curr = nextRankStart;
135324 +            nextRankStart += wksp->rankVal[n];
135325 +            wksp->rankStart[n] = curr;
135326 +        }
135327 +        for (n=0; n < nLimit; n += unroll) {
135328 +            int u;
135329 +            for (u=0; u < unroll; ++u) {
135330 +                size_t const w = wksp->huffWeight[n+u];
135331 +                wksp->symbols[wksp->rankStart[w]++] = (BYTE)(n+u);
135332 +            }
135333 +        }
135334 +        for (; n < (int)nbSymbols; ++n) {
135335 +            size_t const w = wksp->huffWeight[n];
135336 +            wksp->symbols[wksp->rankStart[w]++] = (BYTE)n;
135337 +        }
135338 +    }
135340 +    /* fill DTable
135341 +     * We fill all entries of each weight in order.
135342 +     * That way length is a constant for each iteration of the outter loop.
135343 +     * We can switch based on the length to a different inner loop which is
135344 +     * optimized for that particular case.
135345 +     */
135346 +    {
135347 +        U32 w;
135348 +        int symbol=wksp->rankVal[0];
135349 +        int rankStart=0;
135350 +        for (w=1; w<tableLog+1; ++w) {
135351 +            int const symbolCount = wksp->rankVal[w];
135352 +            int const length = (1 << w) >> 1;
135353 +            int uStart = rankStart;
135354 +            BYTE const nbBits = (BYTE)(tableLog + 1 - w);
135355 +            int s;
135356 +            int u;
135357 +            switch (length) {
135358 +            case 1:
135359 +                for (s=0; s<symbolCount; ++s) {
135360 +                    HUF_DEltX1 D;
135361 +                    D.byte = wksp->symbols[symbol + s];
135362 +                    D.nbBits = nbBits;
135363 +                    dt[uStart] = D;
135364 +                    uStart += 1;
135365 +                }
135366 +                break;
135367 +            case 2:
135368 +                for (s=0; s<symbolCount; ++s) {
135369 +                    HUF_DEltX1 D;
135370 +                    D.byte = wksp->symbols[symbol + s];
135371 +                    D.nbBits = nbBits;
135372 +                    dt[uStart+0] = D;
135373 +                    dt[uStart+1] = D;
135374 +                    uStart += 2;
135375 +                }
135376 +                break;
135377 +            case 4:
135378 +                for (s=0; s<symbolCount; ++s) {
135379 +                    U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
135380 +                    MEM_write64(dt + uStart, D4);
135381 +                    uStart += 4;
135382 +                }
135383 +                break;
135384 +            case 8:
135385 +                for (s=0; s<symbolCount; ++s) {
135386 +                    U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
135387 +                    MEM_write64(dt + uStart, D4);
135388 +                    MEM_write64(dt + uStart + 4, D4);
135389 +                    uStart += 8;
135390 +                }
135391 +                break;
135392 +            default:
135393 +                for (s=0; s<symbolCount; ++s) {
135394 +                    U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
135395 +                    for (u=0; u < length; u += 16) {
135396 +                        MEM_write64(dt + uStart + u + 0, D4);
135397 +                        MEM_write64(dt + uStart + u + 4, D4);
135398 +                        MEM_write64(dt + uStart + u + 8, D4);
135399 +                        MEM_write64(dt + uStart + u + 12, D4);
135400 +                    }
135401 +                    assert(u == length);
135402 +                    uStart += length;
135403 +                }
135404 +                break;
135405 +            }
135406 +            symbol += symbolCount;
135407 +            rankStart += symbolCount * length;
135408 +        }
135409 +    }
135410 +    return iSize;
135413 +FORCE_INLINE_TEMPLATE BYTE
135414 +HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog)
135416 +    size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
135417 +    BYTE const c = dt[val].byte;
135418 +    BIT_skipBits(Dstream, dt[val].nbBits);
135419 +    return c;
135422 +#define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \
135423 +    *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog)
135425 +#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr)  \
135426 +    if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
135427 +        HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
135429 +#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \
135430 +    if (MEM_64bits()) \
135431 +        HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
135433 +HINT_INLINE size_t
135434 +HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog)
135436 +    BYTE* const pStart = p;
135438 +    /* up to 4 symbols at a time */
135439 +    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) {
135440 +        HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
135441 +        HUF_DECODE_SYMBOLX1_1(p, bitDPtr);
135442 +        HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
135443 +        HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
135444 +    }
135446 +    /* [0-3] symbols remaining */
135447 +    if (MEM_32bits())
135448 +        while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd))
135449 +            HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
135451 +    /* no more data to retrieve from bitstream, no need to reload */
135452 +    while (p < pEnd)
135453 +        HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
135455 +    return pEnd-pStart;
135458 +FORCE_INLINE_TEMPLATE size_t
135459 +HUF_decompress1X1_usingDTable_internal_body(
135460 +          void* dst,  size_t dstSize,
135461 +    const void* cSrc, size_t cSrcSize,
135462 +    const HUF_DTable* DTable)
135464 +    BYTE* op = (BYTE*)dst;
135465 +    BYTE* const oend = op + dstSize;
135466 +    const void* dtPtr = DTable + 1;
135467 +    const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
135468 +    BIT_DStream_t bitD;
135469 +    DTableDesc const dtd = HUF_getDTableDesc(DTable);
135470 +    U32 const dtLog = dtd.tableLog;
135472 +    CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
135474 +    HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog);
135476 +    if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
135478 +    return dstSize;
135481 +FORCE_INLINE_TEMPLATE size_t
135482 +HUF_decompress4X1_usingDTable_internal_body(
135483 +          void* dst,  size_t dstSize,
135484 +    const void* cSrc, size_t cSrcSize,
135485 +    const HUF_DTable* DTable)
135487 +    /* Check */
135488 +    if (cSrcSize < 10) return ERROR(corruption_detected);  /* strict minimum : jump table + 1 byte per stream */
135490 +    {   const BYTE* const istart = (const BYTE*) cSrc;
135491 +        BYTE* const ostart = (BYTE*) dst;
135492 +        BYTE* const oend = ostart + dstSize;
135493 +        BYTE* const olimit = oend - 3;
135494 +        const void* const dtPtr = DTable + 1;
135495 +        const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
135497 +        /* Init */
135498 +        BIT_DStream_t bitD1;
135499 +        BIT_DStream_t bitD2;
135500 +        BIT_DStream_t bitD3;
135501 +        BIT_DStream_t bitD4;
135502 +        size_t const length1 = MEM_readLE16(istart);
135503 +        size_t const length2 = MEM_readLE16(istart+2);
135504 +        size_t const length3 = MEM_readLE16(istart+4);
135505 +        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
135506 +        const BYTE* const istart1 = istart + 6;  /* jumpTable */
135507 +        const BYTE* const istart2 = istart1 + length1;
135508 +        const BYTE* const istart3 = istart2 + length2;
135509 +        const BYTE* const istart4 = istart3 + length3;
135510 +        const size_t segmentSize = (dstSize+3) / 4;
135511 +        BYTE* const opStart2 = ostart + segmentSize;
135512 +        BYTE* const opStart3 = opStart2 + segmentSize;
135513 +        BYTE* const opStart4 = opStart3 + segmentSize;
135514 +        BYTE* op1 = ostart;
135515 +        BYTE* op2 = opStart2;
135516 +        BYTE* op3 = opStart3;
135517 +        BYTE* op4 = opStart4;
135518 +        DTableDesc const dtd = HUF_getDTableDesc(DTable);
135519 +        U32 const dtLog = dtd.tableLog;
135520 +        U32 endSignal = 1;
135522 +        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
135523 +        CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
135524 +        CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
135525 +        CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
135526 +        CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
135528 +        /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */
135529 +        for ( ; (endSignal) & (op4 < olimit) ; ) {
135530 +            HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
135531 +            HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
135532 +            HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
135533 +            HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
135534 +            HUF_DECODE_SYMBOLX1_1(op1, &bitD1);
135535 +            HUF_DECODE_SYMBOLX1_1(op2, &bitD2);
135536 +            HUF_DECODE_SYMBOLX1_1(op3, &bitD3);
135537 +            HUF_DECODE_SYMBOLX1_1(op4, &bitD4);
135538 +            HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
135539 +            HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
135540 +            HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
135541 +            HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
135542 +            HUF_DECODE_SYMBOLX1_0(op1, &bitD1);
135543 +            HUF_DECODE_SYMBOLX1_0(op2, &bitD2);
135544 +            HUF_DECODE_SYMBOLX1_0(op3, &bitD3);
135545 +            HUF_DECODE_SYMBOLX1_0(op4, &bitD4);
135546 +            endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
135547 +            endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
135548 +            endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
135549 +            endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
135550 +        }
135552 +        /* check corruption */
135553 +        /* note : should not be necessary : op# advance in lock step, and we control op4.
135554 +         *        but curiously, binary generated by gcc 7.2 & 7.3 with -mbmi2 runs faster when >=1 test is present */
135555 +        if (op1 > opStart2) return ERROR(corruption_detected);
135556 +        if (op2 > opStart3) return ERROR(corruption_detected);
135557 +        if (op3 > opStart4) return ERROR(corruption_detected);
135558 +        /* note : op4 supposed already verified within main loop */
135560 +        /* finish bitStreams one by one */
135561 +        HUF_decodeStreamX1(op1, &bitD1, opStart2, dt, dtLog);
135562 +        HUF_decodeStreamX1(op2, &bitD2, opStart3, dt, dtLog);
135563 +        HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog);
135564 +        HUF_decodeStreamX1(op4, &bitD4, oend,     dt, dtLog);
135566 +        /* check */
135567 +        { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
135568 +          if (!endCheck) return ERROR(corruption_detected); }
135570 +        /* decoded size */
135571 +        return dstSize;
135572 +    }
135576 +typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize,
135577 +                                               const void *cSrc,
135578 +                                               size_t cSrcSize,
135579 +                                               const HUF_DTable *DTable);
135581 +HUF_DGEN(HUF_decompress1X1_usingDTable_internal)
135582 +HUF_DGEN(HUF_decompress4X1_usingDTable_internal)
135586 +size_t HUF_decompress1X1_usingDTable(
135587 +          void* dst,  size_t dstSize,
135588 +    const void* cSrc, size_t cSrcSize,
135589 +    const HUF_DTable* DTable)
135591 +    DTableDesc dtd = HUF_getDTableDesc(DTable);
135592 +    if (dtd.tableType != 0) return ERROR(GENERIC);
135593 +    return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
135596 +size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
135597 +                                   const void* cSrc, size_t cSrcSize,
135598 +                                   void* workSpace, size_t wkspSize)
135600 +    const BYTE* ip = (const BYTE*) cSrc;
135602 +    size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
135603 +    if (HUF_isError(hSize)) return hSize;
135604 +    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
135605 +    ip += hSize; cSrcSize -= hSize;
135607 +    return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
135611 +size_t HUF_decompress4X1_usingDTable(
135612 +          void* dst,  size_t dstSize,
135613 +    const void* cSrc, size_t cSrcSize,
135614 +    const HUF_DTable* DTable)
135616 +    DTableDesc dtd = HUF_getDTableDesc(DTable);
135617 +    if (dtd.tableType != 0) return ERROR(GENERIC);
135618 +    return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
135621 +static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
135622 +                                   const void* cSrc, size_t cSrcSize,
135623 +                                   void* workSpace, size_t wkspSize, int bmi2)
135625 +    const BYTE* ip = (const BYTE*) cSrc;
135627 +    size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
135628 +    if (HUF_isError(hSize)) return hSize;
135629 +    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
135630 +    ip += hSize; cSrcSize -= hSize;
135632 +    return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
135635 +size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
135636 +                                   const void* cSrc, size_t cSrcSize,
135637 +                                   void* workSpace, size_t wkspSize)
135639 +    return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0);
135643 +#endif /* HUF_FORCE_DECOMPRESS_X2 */
135646 +#ifndef HUF_FORCE_DECOMPRESS_X1
135648 +/* *************************/
135649 +/* double-symbols decoding */
135650 +/* *************************/
135652 +typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2;  /* double-symbols decoding */
135653 +typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
135654 +typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
135655 +typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX];
135658 +/* HUF_fillDTableX2Level2() :
135659 + * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
135660 +static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 sizeLog, const U32 consumed,
135661 +                           const U32* rankValOrigin, const int minWeight,
135662 +                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
135663 +                           U32 nbBitsBaseline, U16 baseSeq, U32* wksp, size_t wkspSize)
135665 +    HUF_DEltX2 DElt;
135666 +    U32* rankVal = wksp;
135668 +    assert(wkspSize >= HUF_TABLELOG_MAX + 1);
135669 +    (void)wkspSize;
135670 +    /* get pre-calculated rankVal */
135671 +    ZSTD_memcpy(rankVal, rankValOrigin, sizeof(U32) * (HUF_TABLELOG_MAX + 1));
135673 +    /* fill skipped values */
135674 +    if (minWeight>1) {
135675 +        U32 i, skipSize = rankVal[minWeight];
135676 +        MEM_writeLE16(&(DElt.sequence), baseSeq);
135677 +        DElt.nbBits   = (BYTE)(consumed);
135678 +        DElt.length   = 1;
135679 +        for (i = 0; i < skipSize; i++)
135680 +            DTable[i] = DElt;
135681 +    }
135683 +    /* fill DTable */
135684 +    {   U32 s; for (s=0; s<sortedListSize; s++) {   /* note : sortedSymbols already skipped */
135685 +            const U32 symbol = sortedSymbols[s].symbol;
135686 +            const U32 weight = sortedSymbols[s].weight;
135687 +            const U32 nbBits = nbBitsBaseline - weight;
135688 +            const U32 length = 1 << (sizeLog-nbBits);
135689 +            const U32 start = rankVal[weight];
135690 +            U32 i = start;
135691 +            const U32 end = start + length;
135693 +            MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
135694 +            DElt.nbBits = (BYTE)(nbBits + consumed);
135695 +            DElt.length = 2;
135696 +            do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */
135698 +            rankVal[weight] += length;
135699 +    }   }
135703 +static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,
135704 +                           const sortedSymbol_t* sortedList, const U32 sortedListSize,
135705 +                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
135706 +                           const U32 nbBitsBaseline, U32* wksp, size_t wkspSize)
135708 +    U32* rankVal = wksp;
135709 +    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */
135710 +    const U32 minBits  = nbBitsBaseline - maxWeight;
135711 +    U32 s;
135713 +    assert(wkspSize >= HUF_TABLELOG_MAX + 1);
135714 +    wksp += HUF_TABLELOG_MAX + 1;
135715 +    wkspSize -= HUF_TABLELOG_MAX + 1;
135717 +    ZSTD_memcpy(rankVal, rankValOrigin, sizeof(U32) * (HUF_TABLELOG_MAX + 1));
135719 +    /* fill DTable */
135720 +    for (s=0; s<sortedListSize; s++) {
135721 +        const U16 symbol = sortedList[s].symbol;
135722 +        const U32 weight = sortedList[s].weight;
135723 +        const U32 nbBits = nbBitsBaseline - weight;
135724 +        const U32 start = rankVal[weight];
135725 +        const U32 length = 1 << (targetLog-nbBits);
135727 +        if (targetLog-nbBits >= minBits) {   /* enough room for a second symbol */
135728 +            U32 sortedRank;
135729 +            int minWeight = nbBits + scaleLog;
135730 +            if (minWeight < 1) minWeight = 1;
135731 +            sortedRank = rankStart[minWeight];
135732 +            HUF_fillDTableX2Level2(DTable+start, targetLog-nbBits, nbBits,
135733 +                           rankValOrigin[nbBits], minWeight,
135734 +                           sortedList+sortedRank, sortedListSize-sortedRank,
135735 +                           nbBitsBaseline, symbol, wksp, wkspSize);
135736 +        } else {
135737 +            HUF_DEltX2 DElt;
135738 +            MEM_writeLE16(&(DElt.sequence), symbol);
135739 +            DElt.nbBits = (BYTE)(nbBits);
135740 +            DElt.length = 1;
135741 +            {   U32 const end = start + length;
135742 +                U32 u;
135743 +                for (u = start; u < end; u++) DTable[u] = DElt;
135744 +        }   }
135745 +        rankVal[weight] += length;
135746 +    }
135749 +typedef struct {
135750 +    rankValCol_t rankVal[HUF_TABLELOG_MAX];
135751 +    U32 rankStats[HUF_TABLELOG_MAX + 1];
135752 +    U32 rankStart0[HUF_TABLELOG_MAX + 2];
135753 +    sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1];
135754 +    BYTE weightList[HUF_SYMBOLVALUE_MAX + 1];
135755 +    U32 calleeWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
135756 +} HUF_ReadDTableX2_Workspace;
135758 +size_t HUF_readDTableX2_wksp(HUF_DTable* DTable,
135759 +                       const void* src, size_t srcSize,
135760 +                             void* workSpace, size_t wkspSize)
135762 +    U32 tableLog, maxW, sizeOfSort, nbSymbols;
135763 +    DTableDesc dtd = HUF_getDTableDesc(DTable);
135764 +    U32 const maxTableLog = dtd.maxTableLog;
135765 +    size_t iSize;
135766 +    void* dtPtr = DTable+1;   /* force compiler to avoid strict-aliasing */
135767 +    HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
135768 +    U32 *rankStart;
135770 +    HUF_ReadDTableX2_Workspace* const wksp = (HUF_ReadDTableX2_Workspace*)workSpace;
135772 +    if (sizeof(*wksp) > wkspSize) return ERROR(GENERIC);
135774 +    rankStart = wksp->rankStart0 + 1;
135775 +    ZSTD_memset(wksp->rankStats, 0, sizeof(wksp->rankStats));
135776 +    ZSTD_memset(wksp->rankStart0, 0, sizeof(wksp->rankStart0));
135778 +    DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable));   /* if compiler fails here, assertion is wrong */
135779 +    if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
135780 +    /* ZSTD_memset(weightList, 0, sizeof(weightList)); */  /* is not necessary, even though some analyzer complain ... */
135782 +    iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), /* bmi2 */ 0);
135783 +    if (HUF_isError(iSize)) return iSize;
135785 +    /* check result */
135786 +    if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */
135788 +    /* find maxWeight */
135789 +    for (maxW = tableLog; wksp->rankStats[maxW]==0; maxW--) {}  /* necessarily finds a solution before 0 */
135791 +    /* Get start index of each weight */
135792 +    {   U32 w, nextRankStart = 0;
135793 +        for (w=1; w<maxW+1; w++) {
135794 +            U32 curr = nextRankStart;
135795 +            nextRankStart += wksp->rankStats[w];
135796 +            rankStart[w] = curr;
135797 +        }
135798 +        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/
135799 +        sizeOfSort = nextRankStart;
135800 +    }
135802 +    /* sort symbols by weight */
135803 +    {   U32 s;
135804 +        for (s=0; s<nbSymbols; s++) {
135805 +            U32 const w = wksp->weightList[s];
135806 +            U32 const r = rankStart[w]++;
135807 +            wksp->sortedSymbol[r].symbol = (BYTE)s;
135808 +            wksp->sortedSymbol[r].weight = (BYTE)w;
135809 +        }
135810 +        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */
135811 +    }
135813 +    /* Build rankVal */
135814 +    {   U32* const rankVal0 = wksp->rankVal[0];
135815 +        {   int const rescale = (maxTableLog-tableLog) - 1;   /* tableLog <= maxTableLog */
135816 +            U32 nextRankVal = 0;
135817 +            U32 w;
135818 +            for (w=1; w<maxW+1; w++) {
135819 +                U32 curr = nextRankVal;
135820 +                nextRankVal += wksp->rankStats[w] << (w+rescale);
135821 +                rankVal0[w] = curr;
135822 +        }   }
135823 +        {   U32 const minBits = tableLog+1 - maxW;
135824 +            U32 consumed;
135825 +            for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
135826 +                U32* const rankValPtr = wksp->rankVal[consumed];
135827 +                U32 w;
135828 +                for (w = 1; w < maxW+1; w++) {
135829 +                    rankValPtr[w] = rankVal0[w] >> consumed;
135830 +    }   }   }   }
135832 +    HUF_fillDTableX2(dt, maxTableLog,
135833 +                   wksp->sortedSymbol, sizeOfSort,
135834 +                   wksp->rankStart0, wksp->rankVal, maxW,
135835 +                   tableLog+1,
135836 +                   wksp->calleeWksp, sizeof(wksp->calleeWksp) / sizeof(U32));
135838 +    dtd.tableLog = (BYTE)maxTableLog;
135839 +    dtd.tableType = 1;
135840 +    ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
135841 +    return iSize;
135845 +FORCE_INLINE_TEMPLATE U32
135846 +HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
135848 +    size_t const val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
135849 +    ZSTD_memcpy(op, dt+val, 2);
135850 +    BIT_skipBits(DStream, dt[val].nbBits);
135851 +    return dt[val].length;
135854 +FORCE_INLINE_TEMPLATE U32
135855 +HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
135857 +    size_t const val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
135858 +    ZSTD_memcpy(op, dt+val, 1);
135859 +    if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
135860 +    else {
135861 +        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
135862 +            BIT_skipBits(DStream, dt[val].nbBits);
135863 +            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
135864 +                /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
135865 +                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);
135866 +    }   }
135867 +    return 1;
135870 +#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
135871 +    ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
135873 +#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
135874 +    if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
135875 +        ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
135877 +#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
135878 +    if (MEM_64bits()) \
135879 +        ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
135881 +HINT_INLINE size_t
135882 +HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd,
135883 +                const HUF_DEltX2* const dt, const U32 dtLog)
135885 +    BYTE* const pStart = p;
135887 +    /* up to 8 symbols at a time */
135888 +    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
135889 +        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
135890 +        HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
135891 +        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
135892 +        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
135893 +    }
135895 +    /* closer to end : up to 2 symbols at a time */
135896 +    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
135897 +        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
135899 +    while (p <= pEnd-2)
135900 +        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
135902 +    if (p < pEnd)
135903 +        p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog);
135905 +    return p-pStart;
135908 +FORCE_INLINE_TEMPLATE size_t
135909 +HUF_decompress1X2_usingDTable_internal_body(
135910 +          void* dst,  size_t dstSize,
135911 +    const void* cSrc, size_t cSrcSize,
135912 +    const HUF_DTable* DTable)
135914 +    BIT_DStream_t bitD;
135916 +    /* Init */
135917 +    CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
135919 +    /* decode */
135920 +    {   BYTE* const ostart = (BYTE*) dst;
135921 +        BYTE* const oend = ostart + dstSize;
135922 +        const void* const dtPtr = DTable+1;   /* force compiler to not use strict-aliasing */
135923 +        const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
135924 +        DTableDesc const dtd = HUF_getDTableDesc(DTable);
135925 +        HUF_decodeStreamX2(ostart, &bitD, oend, dt, dtd.tableLog);
135926 +    }
135928 +    /* check */
135929 +    if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
135931 +    /* decoded size */
135932 +    return dstSize;
135935 +FORCE_INLINE_TEMPLATE size_t
135936 +HUF_decompress4X2_usingDTable_internal_body(
135937 +          void* dst,  size_t dstSize,
135938 +    const void* cSrc, size_t cSrcSize,
135939 +    const HUF_DTable* DTable)
135941 +    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
135943 +    {   const BYTE* const istart = (const BYTE*) cSrc;
135944 +        BYTE* const ostart = (BYTE*) dst;
135945 +        BYTE* const oend = ostart + dstSize;
135946 +        BYTE* const olimit = oend - (sizeof(size_t)-1);
135947 +        const void* const dtPtr = DTable+1;
135948 +        const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
135950 +        /* Init */
135951 +        BIT_DStream_t bitD1;
135952 +        BIT_DStream_t bitD2;
135953 +        BIT_DStream_t bitD3;
135954 +        BIT_DStream_t bitD4;
135955 +        size_t const length1 = MEM_readLE16(istart);
135956 +        size_t const length2 = MEM_readLE16(istart+2);
135957 +        size_t const length3 = MEM_readLE16(istart+4);
135958 +        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
135959 +        const BYTE* const istart1 = istart + 6;  /* jumpTable */
135960 +        const BYTE* const istart2 = istart1 + length1;
135961 +        const BYTE* const istart3 = istart2 + length2;
135962 +        const BYTE* const istart4 = istart3 + length3;
135963 +        size_t const segmentSize = (dstSize+3) / 4;
135964 +        BYTE* const opStart2 = ostart + segmentSize;
135965 +        BYTE* const opStart3 = opStart2 + segmentSize;
135966 +        BYTE* const opStart4 = opStart3 + segmentSize;
135967 +        BYTE* op1 = ostart;
135968 +        BYTE* op2 = opStart2;
135969 +        BYTE* op3 = opStart3;
135970 +        BYTE* op4 = opStart4;
135971 +        U32 endSignal = 1;
135972 +        DTableDesc const dtd = HUF_getDTableDesc(DTable);
135973 +        U32 const dtLog = dtd.tableLog;
135975 +        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
135976 +        CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
135977 +        CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
135978 +        CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
135979 +        CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
135981 +        /* 16-32 symbols per loop (4-8 symbols per stream) */
135982 +        for ( ; (endSignal) & (op4 < olimit); ) {
135983 +#if defined(__clang__) && (defined(__x86_64__) || defined(__i386__))
135984 +            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
135985 +            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
135986 +            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
135987 +            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
135988 +            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
135989 +            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
135990 +            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
135991 +            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
135992 +            endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
135993 +            endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
135994 +            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
135995 +            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
135996 +            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
135997 +            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
135998 +            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
135999 +            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
136000 +            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
136001 +            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
136002 +            endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
136003 +            endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
136004 +#else
136005 +            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
136006 +            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
136007 +            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
136008 +            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
136009 +            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
136010 +            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
136011 +            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
136012 +            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
136013 +            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
136014 +            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
136015 +            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
136016 +            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
136017 +            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
136018 +            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
136019 +            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
136020 +            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
136021 +            endSignal = (U32)LIKELY(
136022 +                        (BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished)
136023 +                      & (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished)
136024 +                      & (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished)
136025 +                      & (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished));
136026 +#endif
136027 +        }
136029 +        /* check corruption */
136030 +        if (op1 > opStart2) return ERROR(corruption_detected);
136031 +        if (op2 > opStart3) return ERROR(corruption_detected);
136032 +        if (op3 > opStart4) return ERROR(corruption_detected);
136033 +        /* note : op4 already verified within main loop */
136035 +        /* finish bitStreams one by one */
136036 +        HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
136037 +        HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
136038 +        HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
136039 +        HUF_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
136041 +        /* check */
136042 +        { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
136043 +          if (!endCheck) return ERROR(corruption_detected); }
136045 +        /* decoded size */
136046 +        return dstSize;
136047 +    }
136050 +HUF_DGEN(HUF_decompress1X2_usingDTable_internal)
136051 +HUF_DGEN(HUF_decompress4X2_usingDTable_internal)
136053 +size_t HUF_decompress1X2_usingDTable(
136054 +          void* dst,  size_t dstSize,
136055 +    const void* cSrc, size_t cSrcSize,
136056 +    const HUF_DTable* DTable)
136058 +    DTableDesc dtd = HUF_getDTableDesc(DTable);
136059 +    if (dtd.tableType != 1) return ERROR(GENERIC);
136060 +    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
136063 +size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
136064 +                                   const void* cSrc, size_t cSrcSize,
136065 +                                   void* workSpace, size_t wkspSize)
136067 +    const BYTE* ip = (const BYTE*) cSrc;
136069 +    size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize,
136070 +                                               workSpace, wkspSize);
136071 +    if (HUF_isError(hSize)) return hSize;
136072 +    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
136073 +    ip += hSize; cSrcSize -= hSize;
136075 +    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
136079 +size_t HUF_decompress4X2_usingDTable(
136080 +          void* dst,  size_t dstSize,
136081 +    const void* cSrc, size_t cSrcSize,
136082 +    const HUF_DTable* DTable)
136084 +    DTableDesc dtd = HUF_getDTableDesc(DTable);
136085 +    if (dtd.tableType != 1) return ERROR(GENERIC);
136086 +    return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
136089 +static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
136090 +                                   const void* cSrc, size_t cSrcSize,
136091 +                                   void* workSpace, size_t wkspSize, int bmi2)
136093 +    const BYTE* ip = (const BYTE*) cSrc;
136095 +    size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize,
136096 +                                         workSpace, wkspSize);
136097 +    if (HUF_isError(hSize)) return hSize;
136098 +    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
136099 +    ip += hSize; cSrcSize -= hSize;
136101 +    return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
136104 +size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
136105 +                                   const void* cSrc, size_t cSrcSize,
136106 +                                   void* workSpace, size_t wkspSize)
136108 +    return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0);
136112 +#endif /* HUF_FORCE_DECOMPRESS_X1 */
136115 +/* ***********************************/
136116 +/* Universal decompression selectors */
136117 +/* ***********************************/
136119 +size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize,
136120 +                                    const void* cSrc, size_t cSrcSize,
136121 +                                    const HUF_DTable* DTable)
136123 +    DTableDesc const dtd = HUF_getDTableDesc(DTable);
136124 +#if defined(HUF_FORCE_DECOMPRESS_X1)
136125 +    (void)dtd;
136126 +    assert(dtd.tableType == 0);
136127 +    return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
136128 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
136129 +    (void)dtd;
136130 +    assert(dtd.tableType == 1);
136131 +    return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
136132 +#else
136133 +    return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
136134 +                           HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
136135 +#endif
136138 +size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
136139 +                                    const void* cSrc, size_t cSrcSize,
136140 +                                    const HUF_DTable* DTable)
136142 +    DTableDesc const dtd = HUF_getDTableDesc(DTable);
136143 +#if defined(HUF_FORCE_DECOMPRESS_X1)
136144 +    (void)dtd;
136145 +    assert(dtd.tableType == 0);
136146 +    return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
136147 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
136148 +    (void)dtd;
136149 +    assert(dtd.tableType == 1);
136150 +    return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
136151 +#else
136152 +    return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
136153 +                           HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
136154 +#endif
136158 +#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
136159 +typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
136160 +static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
136162 +    /* single, double, quad */
136163 +    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */
136164 +    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */
136165 +    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */
136166 +    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */
136167 +    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */
136168 +    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */
136169 +    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */
136170 +    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */
136171 +    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */
136172 +    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */
136173 +    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */
136174 +    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */
136175 +    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */
136176 +    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */
136177 +    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */
136178 +    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */
136180 +#endif
136182 +/** HUF_selectDecoder() :
136183 + *  Tells which decoder is likely to decode faster,
136184 + *  based on a set of pre-computed metrics.
136185 + * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
136186 + *  Assumption : 0 < dstSize <= 128 KB */
136187 +U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
136189 +    assert(dstSize > 0);
136190 +    assert(dstSize <= 128*1024);
136191 +#if defined(HUF_FORCE_DECOMPRESS_X1)
136192 +    (void)dstSize;
136193 +    (void)cSrcSize;
136194 +    return 0;
136195 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
136196 +    (void)dstSize;
136197 +    (void)cSrcSize;
136198 +    return 1;
136199 +#else
136200 +    /* decoder timing evaluation */
136201 +    {   U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 */
136202 +        U32 const D256 = (U32)(dstSize >> 8);
136203 +        U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
136204 +        U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
136205 +        DTime1 += DTime1 >> 3;  /* advantage to algorithm using less memory, to reduce cache eviction */
136206 +        return DTime1 < DTime0;
136207 +    }
136208 +#endif
136212 +size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
136213 +                                     size_t dstSize, const void* cSrc,
136214 +                                     size_t cSrcSize, void* workSpace,
136215 +                                     size_t wkspSize)
136217 +    /* validation checks */
136218 +    if (dstSize == 0) return ERROR(dstSize_tooSmall);
136219 +    if (cSrcSize == 0) return ERROR(corruption_detected);
136221 +    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
136222 +#if defined(HUF_FORCE_DECOMPRESS_X1)
136223 +        (void)algoNb;
136224 +        assert(algoNb == 0);
136225 +        return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
136226 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
136227 +        (void)algoNb;
136228 +        assert(algoNb == 1);
136229 +        return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
136230 +#else
136231 +        return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
136232 +                            cSrcSize, workSpace, wkspSize):
136233 +                        HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
136234 +#endif
136235 +    }
136238 +size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
136239 +                                  const void* cSrc, size_t cSrcSize,
136240 +                                  void* workSpace, size_t wkspSize)
136242 +    /* validation checks */
136243 +    if (dstSize == 0) return ERROR(dstSize_tooSmall);
136244 +    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
136245 +    if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
136246 +    if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
136248 +    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
136249 +#if defined(HUF_FORCE_DECOMPRESS_X1)
136250 +        (void)algoNb;
136251 +        assert(algoNb == 0);
136252 +        return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
136253 +                                cSrcSize, workSpace, wkspSize);
136254 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
136255 +        (void)algoNb;
136256 +        assert(algoNb == 1);
136257 +        return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
136258 +                                cSrcSize, workSpace, wkspSize);
136259 +#else
136260 +        return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
136261 +                                cSrcSize, workSpace, wkspSize):
136262 +                        HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
136263 +                                cSrcSize, workSpace, wkspSize);
136264 +#endif
136265 +    }
136269 +size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
136271 +    DTableDesc const dtd = HUF_getDTableDesc(DTable);
136272 +#if defined(HUF_FORCE_DECOMPRESS_X1)
136273 +    (void)dtd;
136274 +    assert(dtd.tableType == 0);
136275 +    return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
136276 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
136277 +    (void)dtd;
136278 +    assert(dtd.tableType == 1);
136279 +    return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
136280 +#else
136281 +    return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
136282 +                           HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
136283 +#endif
136286 +#ifndef HUF_FORCE_DECOMPRESS_X2
136287 +size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
136289 +    const BYTE* ip = (const BYTE*) cSrc;
136291 +    size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
136292 +    if (HUF_isError(hSize)) return hSize;
136293 +    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
136294 +    ip += hSize; cSrcSize -= hSize;
136296 +    return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
136298 +#endif
136300 +size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
136302 +    DTableDesc const dtd = HUF_getDTableDesc(DTable);
136303 +#if defined(HUF_FORCE_DECOMPRESS_X1)
136304 +    (void)dtd;
136305 +    assert(dtd.tableType == 0);
136306 +    return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
136307 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
136308 +    (void)dtd;
136309 +    assert(dtd.tableType == 1);
136310 +    return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
136311 +#else
136312 +    return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
136313 +                           HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
136314 +#endif
136317 +size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
136319 +    /* validation checks */
136320 +    if (dstSize == 0) return ERROR(dstSize_tooSmall);
136321 +    if (cSrcSize == 0) return ERROR(corruption_detected);
136323 +    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
136324 +#if defined(HUF_FORCE_DECOMPRESS_X1)
136325 +        (void)algoNb;
136326 +        assert(algoNb == 0);
136327 +        return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
136328 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
136329 +        (void)algoNb;
136330 +        assert(algoNb == 1);
136331 +        return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
136332 +#else
136333 +        return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) :
136334 +                        HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
136335 +#endif
136336 +    }
136338 diff --git a/lib/zstd/decompress/zstd_ddict.c b/lib/zstd/decompress/zstd_ddict.c
136339 new file mode 100644
136340 index 000000000000..dbbc7919de53
136341 --- /dev/null
136342 +++ b/lib/zstd/decompress/zstd_ddict.c
136343 @@ -0,0 +1,241 @@
136345 + * Copyright (c) Yann Collet, Facebook, Inc.
136346 + * All rights reserved.
136348 + * This source code is licensed under both the BSD-style license (found in the
136349 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
136350 + * in the COPYING file in the root directory of this source tree).
136351 + * You may select, at your option, one of the above-listed licenses.
136352 + */
136354 +/* zstd_ddict.c :
136355 + * concentrates all logic that needs to know the internals of ZSTD_DDict object */
136357 +/*-*******************************************************
136358 +*  Dependencies
136359 +*********************************************************/
136360 +#include "../common/zstd_deps.h"   /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
136361 +#include "../common/cpu.h"         /* bmi2 */
136362 +#include "../common/mem.h"         /* low level memory routines */
136363 +#define FSE_STATIC_LINKING_ONLY
136364 +#include "../common/fse.h"
136365 +#define HUF_STATIC_LINKING_ONLY
136366 +#include "../common/huf.h"
136367 +#include "zstd_decompress_internal.h"
136368 +#include "zstd_ddict.h"
136373 +/*-*******************************************************
136374 +*  Types
136375 +*********************************************************/
136376 +struct ZSTD_DDict_s {
136377 +    void* dictBuffer;
136378 +    const void* dictContent;
136379 +    size_t dictSize;
136380 +    ZSTD_entropyDTables_t entropy;
136381 +    U32 dictID;
136382 +    U32 entropyPresent;
136383 +    ZSTD_customMem cMem;
136384 +};  /* typedef'd to ZSTD_DDict within "zstd.h" */
136386 +const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict)
136388 +    assert(ddict != NULL);
136389 +    return ddict->dictContent;
136392 +size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict)
136394 +    assert(ddict != NULL);
136395 +    return ddict->dictSize;
136398 +void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
136400 +    DEBUGLOG(4, "ZSTD_copyDDictParameters");
136401 +    assert(dctx != NULL);
136402 +    assert(ddict != NULL);
136403 +    dctx->dictID = ddict->dictID;
136404 +    dctx->prefixStart = ddict->dictContent;
136405 +    dctx->virtualStart = ddict->dictContent;
136406 +    dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
136407 +    dctx->previousDstEnd = dctx->dictEnd;
136408 +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
136409 +    dctx->dictContentBeginForFuzzing = dctx->prefixStart;
136410 +    dctx->dictContentEndForFuzzing = dctx->previousDstEnd;
136411 +#endif
136412 +    if (ddict->entropyPresent) {
136413 +        dctx->litEntropy = 1;
136414 +        dctx->fseEntropy = 1;
136415 +        dctx->LLTptr = ddict->entropy.LLTable;
136416 +        dctx->MLTptr = ddict->entropy.MLTable;
136417 +        dctx->OFTptr = ddict->entropy.OFTable;
136418 +        dctx->HUFptr = ddict->entropy.hufTable;
136419 +        dctx->entropy.rep[0] = ddict->entropy.rep[0];
136420 +        dctx->entropy.rep[1] = ddict->entropy.rep[1];
136421 +        dctx->entropy.rep[2] = ddict->entropy.rep[2];
136422 +    } else {
136423 +        dctx->litEntropy = 0;
136424 +        dctx->fseEntropy = 0;
136425 +    }
136429 +static size_t
136430 +ZSTD_loadEntropy_intoDDict(ZSTD_DDict* ddict,
136431 +                           ZSTD_dictContentType_e dictContentType)
136433 +    ddict->dictID = 0;
136434 +    ddict->entropyPresent = 0;
136435 +    if (dictContentType == ZSTD_dct_rawContent) return 0;
136437 +    if (ddict->dictSize < 8) {
136438 +        if (dictContentType == ZSTD_dct_fullDict)
136439 +            return ERROR(dictionary_corrupted);   /* only accept specified dictionaries */
136440 +        return 0;   /* pure content mode */
136441 +    }
136442 +    {   U32 const magic = MEM_readLE32(ddict->dictContent);
136443 +        if (magic != ZSTD_MAGIC_DICTIONARY) {
136444 +            if (dictContentType == ZSTD_dct_fullDict)
136445 +                return ERROR(dictionary_corrupted);   /* only accept specified dictionaries */
136446 +            return 0;   /* pure content mode */
136447 +        }
136448 +    }
136449 +    ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE);
136451 +    /* load entropy tables */
136452 +    RETURN_ERROR_IF(ZSTD_isError(ZSTD_loadDEntropy(
136453 +            &ddict->entropy, ddict->dictContent, ddict->dictSize)),
136454 +        dictionary_corrupted, "");
136455 +    ddict->entropyPresent = 1;
136456 +    return 0;
136460 +static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
136461 +                                      const void* dict, size_t dictSize,
136462 +                                      ZSTD_dictLoadMethod_e dictLoadMethod,
136463 +                                      ZSTD_dictContentType_e dictContentType)
136465 +    if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) {
136466 +        ddict->dictBuffer = NULL;
136467 +        ddict->dictContent = dict;
136468 +        if (!dict) dictSize = 0;
136469 +    } else {
136470 +        void* const internalBuffer = ZSTD_customMalloc(dictSize, ddict->cMem);
136471 +        ddict->dictBuffer = internalBuffer;
136472 +        ddict->dictContent = internalBuffer;
136473 +        if (!internalBuffer) return ERROR(memory_allocation);
136474 +        ZSTD_memcpy(internalBuffer, dict, dictSize);
136475 +    }
136476 +    ddict->dictSize = dictSize;
136477 +    ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001);  /* cover both little and big endian */
136479 +    /* parse dictionary content */
136480 +    FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) , "");
136482 +    return 0;
136485 +ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
136486 +                                      ZSTD_dictLoadMethod_e dictLoadMethod,
136487 +                                      ZSTD_dictContentType_e dictContentType,
136488 +                                      ZSTD_customMem customMem)
136490 +    if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
136492 +    {   ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_customMalloc(sizeof(ZSTD_DDict), customMem);
136493 +        if (ddict == NULL) return NULL;
136494 +        ddict->cMem = customMem;
136495 +        {   size_t const initResult = ZSTD_initDDict_internal(ddict,
136496 +                                            dict, dictSize,
136497 +                                            dictLoadMethod, dictContentType);
136498 +            if (ZSTD_isError(initResult)) {
136499 +                ZSTD_freeDDict(ddict);
136500 +                return NULL;
136501 +        }   }
136502 +        return ddict;
136503 +    }
136506 +/*! ZSTD_createDDict() :
136507 +*   Create a digested dictionary, to start decompression without startup delay.
136508 +*   `dict` content is copied inside DDict.
136509 +*   Consequently, `dict` can be released after `ZSTD_DDict` creation */
136510 +ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
136512 +    ZSTD_customMem const allocator = { NULL, NULL, NULL };
136513 +    return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, allocator);
136516 +/*! ZSTD_createDDict_byReference() :
136517 + *  Create a digested dictionary, to start decompression without startup delay.
136518 + *  Dictionary content is simply referenced, it will be accessed during decompression.
136519 + *  Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */
136520 +ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize)
136522 +    ZSTD_customMem const allocator = { NULL, NULL, NULL };
136523 +    return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, allocator);
136527 +const ZSTD_DDict* ZSTD_initStaticDDict(
136528 +                                void* sBuffer, size_t sBufferSize,
136529 +                                const void* dict, size_t dictSize,
136530 +                                ZSTD_dictLoadMethod_e dictLoadMethod,
136531 +                                ZSTD_dictContentType_e dictContentType)
136533 +    size_t const neededSpace = sizeof(ZSTD_DDict)
136534 +                             + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
136535 +    ZSTD_DDict* const ddict = (ZSTD_DDict*)sBuffer;
136536 +    assert(sBuffer != NULL);
136537 +    assert(dict != NULL);
136538 +    if ((size_t)sBuffer & 7) return NULL;   /* 8-aligned */
136539 +    if (sBufferSize < neededSpace) return NULL;
136540 +    if (dictLoadMethod == ZSTD_dlm_byCopy) {
136541 +        ZSTD_memcpy(ddict+1, dict, dictSize);  /* local copy */
136542 +        dict = ddict+1;
136543 +    }
136544 +    if (ZSTD_isError( ZSTD_initDDict_internal(ddict,
136545 +                                              dict, dictSize,
136546 +                                              ZSTD_dlm_byRef, dictContentType) ))
136547 +        return NULL;
136548 +    return ddict;
136552 +size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
136554 +    if (ddict==NULL) return 0;   /* support free on NULL */
136555 +    {   ZSTD_customMem const cMem = ddict->cMem;
136556 +        ZSTD_customFree(ddict->dictBuffer, cMem);
136557 +        ZSTD_customFree(ddict, cMem);
136558 +        return 0;
136559 +    }
136562 +/*! ZSTD_estimateDDictSize() :
136563 + *  Estimate amount of memory that will be needed to create a dictionary for decompression.
136564 + *  Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */
136565 +size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod)
136567 +    return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
136570 +size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
136572 +    if (ddict==NULL) return 0;   /* support sizeof on NULL */
136573 +    return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ;
136576 +/*! ZSTD_getDictID_fromDDict() :
136577 + *  Provides the dictID of the dictionary loaded into `ddict`.
136578 + *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
136579 + *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
136580 +unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
136582 +    if (ddict==NULL) return 0;
136583 +    return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
136585 diff --git a/lib/zstd/decompress/zstd_ddict.h b/lib/zstd/decompress/zstd_ddict.h
136586 new file mode 100644
136587 index 000000000000..8c1a79d666f8
136588 --- /dev/null
136589 +++ b/lib/zstd/decompress/zstd_ddict.h
136590 @@ -0,0 +1,44 @@
136592 + * Copyright (c) Yann Collet, Facebook, Inc.
136593 + * All rights reserved.
136595 + * This source code is licensed under both the BSD-style license (found in the
136596 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
136597 + * in the COPYING file in the root directory of this source tree).
136598 + * You may select, at your option, one of the above-listed licenses.
136599 + */
136602 +#ifndef ZSTD_DDICT_H
136603 +#define ZSTD_DDICT_H
136605 +/*-*******************************************************
136606 + *  Dependencies
136607 + *********************************************************/
136608 +#include "../common/zstd_deps.h"   /* size_t */
136609 +#include <linux/zstd.h>     /* ZSTD_DDict, and several public functions */
136612 +/*-*******************************************************
136613 + *  Interface
136614 + *********************************************************/
136616 +/* note: several prototypes are already published in `zstd.h` :
136617 + * ZSTD_createDDict()
136618 + * ZSTD_createDDict_byReference()
136619 + * ZSTD_createDDict_advanced()
136620 + * ZSTD_freeDDict()
136621 + * ZSTD_initStaticDDict()
136622 + * ZSTD_sizeof_DDict()
136623 + * ZSTD_estimateDDictSize()
136624 + * ZSTD_getDictID_fromDict()
136625 + */
136627 +const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict);
136628 +size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict);
136630 +void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
136634 +#endif /* ZSTD_DDICT_H */
136635 diff --git a/lib/zstd/decompress/zstd_decompress.c b/lib/zstd/decompress/zstd_decompress.c
136636 new file mode 100644
136637 index 000000000000..16b4ea795a7e
136638 --- /dev/null
136639 +++ b/lib/zstd/decompress/zstd_decompress.c
136640 @@ -0,0 +1,2075 @@
136642 + * Copyright (c) Yann Collet, Facebook, Inc.
136643 + * All rights reserved.
136645 + * This source code is licensed under both the BSD-style license (found in the
136646 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
136647 + * in the COPYING file in the root directory of this source tree).
136648 + * You may select, at your option, one of the above-listed licenses.
136649 + */
136652 +/* ***************************************************************
136653 +*  Tuning parameters
136654 +*****************************************************************/
136656 + * HEAPMODE :
136657 + * Select how default decompression function ZSTD_decompress() allocates its context,
136658 + * on stack (0), or into heap (1, default; requires malloc()).
136659 + * Note that functions with explicit context such as ZSTD_decompressDCtx() are unaffected.
136660 + */
136661 +#ifndef ZSTD_HEAPMODE
136662 +#  define ZSTD_HEAPMODE 1
136663 +#endif
136666 +*  LEGACY_SUPPORT :
136667 +*  if set to 1+, ZSTD_decompress() can decode older formats (v0.1+)
136671 + *  MAXWINDOWSIZE_DEFAULT :
136672 + *  maximum window size accepted by DStream __by default__.
136673 + *  Frames requiring more memory will be rejected.
136674 + *  It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize().
136675 + */
136676 +#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
136677 +#  define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) + 1)
136678 +#endif
136681 + *  NO_FORWARD_PROGRESS_MAX :
136682 + *  maximum allowed nb of calls to ZSTD_decompressStream()
136683 + *  without any forward progress
136684 + *  (defined as: no byte read from input, and no byte flushed to output)
136685 + *  before triggering an error.
136686 + */
136687 +#ifndef ZSTD_NO_FORWARD_PROGRESS_MAX
136688 +#  define ZSTD_NO_FORWARD_PROGRESS_MAX 16
136689 +#endif
136692 +/*-*******************************************************
136693 +*  Dependencies
136694 +*********************************************************/
136695 +#include "../common/zstd_deps.h"   /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
136696 +#include "../common/cpu.h"         /* bmi2 */
136697 +#include "../common/mem.h"         /* low level memory routines */
136698 +#define FSE_STATIC_LINKING_ONLY
136699 +#include "../common/fse.h"
136700 +#define HUF_STATIC_LINKING_ONLY
136701 +#include "../common/huf.h"
136702 +#include <linux/xxhash.h> /* xxh64_reset, xxh64_update, xxh64_digest, XXH64 */
136703 +#include "../common/zstd_internal.h"  /* blockProperties_t */
136704 +#include "zstd_decompress_internal.h"   /* ZSTD_DCtx */
136705 +#include "zstd_ddict.h"  /* ZSTD_DDictDictContent */
136706 +#include "zstd_decompress_block.h"   /* ZSTD_decompressBlock_internal */
136711 +/*************************************
136712 + * Multiple DDicts Hashset internals *
136713 + *************************************/
136715 +#define DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT 4
136716 +#define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT 3   /* These two constants represent SIZE_MULT/COUNT_MULT load factor without using a float.
136717 +                                                     * Currently, that means a 0.75 load factor.
136718 +                                                     * So, if count * COUNT_MULT / size * SIZE_MULT != 0, then we've exceeded
136719 +                                                     * the load factor of the ddict hash set.
136720 +                                                     */
136722 +#define DDICT_HASHSET_TABLE_BASE_SIZE 64
136723 +#define DDICT_HASHSET_RESIZE_FACTOR 2
136725 +/* Hash function to determine starting position of dict insertion within the table
136726 + * Returns an index between [0, hashSet->ddictPtrTableSize]
136727 + */
136728 +static size_t ZSTD_DDictHashSet_getIndex(const ZSTD_DDictHashSet* hashSet, U32 dictID) {
136729 +    const U64 hash = xxh64(&dictID, sizeof(U32), 0);
136730 +    /* DDict ptr table size is a multiple of 2, use size - 1 as mask to get index within [0, hashSet->ddictPtrTableSize) */
136731 +    return hash & (hashSet->ddictPtrTableSize - 1);
136734 +/* Adds DDict to a hashset without resizing it.
136735 + * If inserting a DDict with a dictID that already exists in the set, replaces the one in the set.
136736 + * Returns 0 if successful, or a zstd error code if something went wrong.
136737 + */
136738 +static size_t ZSTD_DDictHashSet_emplaceDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict) {
136739 +    const U32 dictID = ZSTD_getDictID_fromDDict(ddict);
136740 +    size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID);
136741 +    const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1;
136742 +    RETURN_ERROR_IF(hashSet->ddictPtrCount == hashSet->ddictPtrTableSize, GENERIC, "Hash set is full!");
136743 +    DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx);
136744 +    while (hashSet->ddictPtrTable[idx] != NULL) {
136745 +        /* Replace existing ddict if inserting ddict with same dictID */
136746 +        if (ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]) == dictID) {
136747 +            DEBUGLOG(4, "DictID already exists, replacing rather than adding");
136748 +            hashSet->ddictPtrTable[idx] = ddict;
136749 +            return 0;
136750 +        }
136751 +        idx &= idxRangeMask;
136752 +        idx++;
136753 +    }
136754 +    DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx);
136755 +    hashSet->ddictPtrTable[idx] = ddict;
136756 +    hashSet->ddictPtrCount++;
136757 +    return 0;
136760 +/* Expands hash table by factor of DDICT_HASHSET_RESIZE_FACTOR and
136761 + * rehashes all values, allocates new table, frees old table.
136762 + * Returns 0 on success, otherwise a zstd error code.
136763 + */
136764 +static size_t ZSTD_DDictHashSet_expand(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) {
136765 +    size_t newTableSize = hashSet->ddictPtrTableSize * DDICT_HASHSET_RESIZE_FACTOR;
136766 +    const ZSTD_DDict** newTable = (const ZSTD_DDict**)ZSTD_customCalloc(sizeof(ZSTD_DDict*) * newTableSize, customMem);
136767 +    const ZSTD_DDict** oldTable = hashSet->ddictPtrTable;
136768 +    size_t oldTableSize = hashSet->ddictPtrTableSize;
136769 +    size_t i;
136771 +    DEBUGLOG(4, "Expanding DDict hash table! Old size: %zu new size: %zu", oldTableSize, newTableSize);
136772 +    RETURN_ERROR_IF(!newTable, memory_allocation, "Expanded hashset allocation failed!");
136773 +    hashSet->ddictPtrTable = newTable;
136774 +    hashSet->ddictPtrTableSize = newTableSize;
136775 +    hashSet->ddictPtrCount = 0;
136776 +    for (i = 0; i < oldTableSize; ++i) {
136777 +        if (oldTable[i] != NULL) {
136778 +            FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, oldTable[i]), "");
136779 +        }
136780 +    }
136781 +    ZSTD_customFree((void*)oldTable, customMem);
136782 +    DEBUGLOG(4, "Finished re-hash");
136783 +    return 0;
136786 +/* Fetches a DDict with the given dictID
136787 + * Returns the ZSTD_DDict* with the requested dictID. If it doesn't exist, then returns NULL.
136788 + */
136789 +static const ZSTD_DDict* ZSTD_DDictHashSet_getDDict(ZSTD_DDictHashSet* hashSet, U32 dictID) {
136790 +    size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID);
136791 +    const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1;
136792 +    DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx);
136793 +    for (;;) {
136794 +        size_t currDictID = ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]);
136795 +        if (currDictID == dictID || currDictID == 0) {
136796 +            /* currDictID == 0 implies a NULL ddict entry */
136797 +            break;
136798 +        } else {
136799 +            idx &= idxRangeMask;    /* Goes to start of table when we reach the end */
136800 +            idx++;
136801 +        }
136802 +    }
136803 +    DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx);
136804 +    return hashSet->ddictPtrTable[idx];
136807 +/* Allocates space for and returns a ddict hash set
136808 + * The hash set's ZSTD_DDict* table has all values automatically set to NULL to begin with.
136809 + * Returns NULL if allocation failed.
136810 + */
136811 +static ZSTD_DDictHashSet* ZSTD_createDDictHashSet(ZSTD_customMem customMem) {
136812 +    ZSTD_DDictHashSet* ret = (ZSTD_DDictHashSet*)ZSTD_customMalloc(sizeof(ZSTD_DDictHashSet), customMem);
136813 +    DEBUGLOG(4, "Allocating new hash set");
136814 +    ret->ddictPtrTable = (const ZSTD_DDict**)ZSTD_customCalloc(DDICT_HASHSET_TABLE_BASE_SIZE * sizeof(ZSTD_DDict*), customMem);
136815 +    ret->ddictPtrTableSize = DDICT_HASHSET_TABLE_BASE_SIZE;
136816 +    ret->ddictPtrCount = 0;
136817 +    if (!ret || !ret->ddictPtrTable) {
136818 +        return NULL;
136819 +    }
136820 +    return ret;
136823 +/* Frees the table of ZSTD_DDict* within a hashset, then frees the hashset itself.
136824 + * Note: The ZSTD_DDict* within the table are NOT freed.
136825 + */
136826 +static void ZSTD_freeDDictHashSet(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) {
136827 +    DEBUGLOG(4, "Freeing ddict hash set");
136828 +    if (hashSet && hashSet->ddictPtrTable) {
136829 +        ZSTD_customFree((void*)hashSet->ddictPtrTable, customMem);
136830 +    }
136831 +    if (hashSet) {
136832 +        ZSTD_customFree(hashSet, customMem);
136833 +    }
136836 +/* Public function: Adds a DDict into the ZSTD_DDictHashSet, possibly triggering a resize of the hash set.
136837 + * Returns 0 on success, or a ZSTD error.
136838 + */
136839 +static size_t ZSTD_DDictHashSet_addDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict, ZSTD_customMem customMem) {
136840 +    DEBUGLOG(4, "Adding dict ID: %u to hashset with - Count: %zu Tablesize: %zu", ZSTD_getDictID_fromDDict(ddict), hashSet->ddictPtrCount, hashSet->ddictPtrTableSize);
136841 +    if (hashSet->ddictPtrCount * DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT / hashSet->ddictPtrTableSize * DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT != 0) {
136842 +        FORWARD_IF_ERROR(ZSTD_DDictHashSet_expand(hashSet, customMem), "");
136843 +    }
136844 +    FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, ddict), "");
136845 +    return 0;
136848 +/*-*************************************************************
136849 +*   Context management
136850 +***************************************************************/
136851 +size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx)
136853 +    if (dctx==NULL) return 0;   /* support sizeof NULL */
136854 +    return sizeof(*dctx)
136855 +           + ZSTD_sizeof_DDict(dctx->ddictLocal)
136856 +           + dctx->inBuffSize + dctx->outBuffSize;
136859 +size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); }
136862 +static size_t ZSTD_startingInputLength(ZSTD_format_e format)
136864 +    size_t const startingInputLength = ZSTD_FRAMEHEADERSIZE_PREFIX(format);
136865 +    /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */
136866 +    assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) );
136867 +    return startingInputLength;
136870 +static void ZSTD_DCtx_resetParameters(ZSTD_DCtx* dctx)
136872 +    assert(dctx->streamStage == zdss_init);
136873 +    dctx->format = ZSTD_f_zstd1;
136874 +    dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
136875 +    dctx->outBufferMode = ZSTD_bm_buffered;
136876 +    dctx->forceIgnoreChecksum = ZSTD_d_validateChecksum;
136877 +    dctx->refMultipleDDicts = ZSTD_rmd_refSingleDDict;
136880 +static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
136882 +    dctx->staticSize  = 0;
136883 +    dctx->ddict       = NULL;
136884 +    dctx->ddictLocal  = NULL;
136885 +    dctx->dictEnd     = NULL;
136886 +    dctx->ddictIsCold = 0;
136887 +    dctx->dictUses = ZSTD_dont_use;
136888 +    dctx->inBuff      = NULL;
136889 +    dctx->inBuffSize  = 0;
136890 +    dctx->outBuffSize = 0;
136891 +    dctx->streamStage = zdss_init;
136892 +    dctx->legacyContext = NULL;
136893 +    dctx->previousLegacyVersion = 0;
136894 +    dctx->noForwardProgress = 0;
136895 +    dctx->oversizedDuration = 0;
136896 +    dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
136897 +    dctx->ddictSet = NULL;
136898 +    ZSTD_DCtx_resetParameters(dctx);
136899 +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
136900 +    dctx->dictContentEndForFuzzing = NULL;
136901 +#endif
136904 +ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize)
136906 +    ZSTD_DCtx* const dctx = (ZSTD_DCtx*) workspace;
136908 +    if ((size_t)workspace & 7) return NULL;  /* 8-aligned */
136909 +    if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL;  /* minimum size */
136911 +    ZSTD_initDCtx_internal(dctx);
136912 +    dctx->staticSize = workspaceSize;
136913 +    dctx->inBuff = (char*)(dctx+1);
136914 +    return dctx;
136917 +ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
136919 +    if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
136921 +    {   ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_customMalloc(sizeof(*dctx), customMem);
136922 +        if (!dctx) return NULL;
136923 +        dctx->customMem = customMem;
136924 +        ZSTD_initDCtx_internal(dctx);
136925 +        return dctx;
136926 +    }
136929 +ZSTD_DCtx* ZSTD_createDCtx(void)
136931 +    DEBUGLOG(3, "ZSTD_createDCtx");
136932 +    return ZSTD_createDCtx_advanced(ZSTD_defaultCMem);
136935 +static void ZSTD_clearDict(ZSTD_DCtx* dctx)
136937 +    ZSTD_freeDDict(dctx->ddictLocal);
136938 +    dctx->ddictLocal = NULL;
136939 +    dctx->ddict = NULL;
136940 +    dctx->dictUses = ZSTD_dont_use;
136943 +size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
136945 +    if (dctx==NULL) return 0;   /* support free on NULL */
136946 +    RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "not compatible with static DCtx");
136947 +    {   ZSTD_customMem const cMem = dctx->customMem;
136948 +        ZSTD_clearDict(dctx);
136949 +        ZSTD_customFree(dctx->inBuff, cMem);
136950 +        dctx->inBuff = NULL;
136951 +        if (dctx->ddictSet) {
136952 +            ZSTD_freeDDictHashSet(dctx->ddictSet, cMem);
136953 +            dctx->ddictSet = NULL;
136954 +        }
136955 +        ZSTD_customFree(dctx, cMem);
136956 +        return 0;
136957 +    }
136960 +/* no longer useful */
136961 +void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
136963 +    size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx);
136964 +    ZSTD_memcpy(dstDCtx, srcDCtx, toCopy);  /* no need to copy workspace */
136967 +/* Given a dctx with a digested frame params, re-selects the correct ZSTD_DDict based on
136968 + * the requested dict ID from the frame. If there exists a reference to the correct ZSTD_DDict, then
136969 + * accordingly sets the ddict to be used to decompress the frame.
136971 + * If no DDict is found, then no action is taken, and the ZSTD_DCtx::ddict remains as-is.
136973 + * ZSTD_d_refMultipleDDicts must be enabled for this function to be called.
136974 + */
136975 +static void ZSTD_DCtx_selectFrameDDict(ZSTD_DCtx* dctx) {
136976 +    assert(dctx->refMultipleDDicts && dctx->ddictSet);
136977 +    DEBUGLOG(4, "Adjusting DDict based on requested dict ID from frame");
136978 +    if (dctx->ddict) {
136979 +        const ZSTD_DDict* frameDDict = ZSTD_DDictHashSet_getDDict(dctx->ddictSet, dctx->fParams.dictID);
136980 +        if (frameDDict) {
136981 +            DEBUGLOG(4, "DDict found!");
136982 +            ZSTD_clearDict(dctx);
136983 +            dctx->dictID = dctx->fParams.dictID;
136984 +            dctx->ddict = frameDDict;
136985 +            dctx->dictUses = ZSTD_use_indefinitely;
136986 +        }
136987 +    }
136991 +/*-*************************************************************
136992 + *   Frame header decoding
136993 + ***************************************************************/
136995 +/*! ZSTD_isFrame() :
136996 + *  Tells if the content of `buffer` starts with a valid Frame Identifier.
136997 + *  Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
136998 + *  Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
136999 + *  Note 3 : Skippable Frame Identifiers are considered valid. */
137000 +unsigned ZSTD_isFrame(const void* buffer, size_t size)
137002 +    if (size < ZSTD_FRAMEIDSIZE) return 0;
137003 +    {   U32 const magic = MEM_readLE32(buffer);
137004 +        if (magic == ZSTD_MAGICNUMBER) return 1;
137005 +        if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
137006 +    }
137007 +    return 0;
137010 +/** ZSTD_frameHeaderSize_internal() :
137011 + *  srcSize must be large enough to reach header size fields.
137012 + *  note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless.
137013 + * @return : size of the Frame Header
137014 + *           or an error code, which can be tested with ZSTD_isError() */
137015 +static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format)
137017 +    size_t const minInputSize = ZSTD_startingInputLength(format);
137018 +    RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong, "");
137020 +    {   BYTE const fhd = ((const BYTE*)src)[minInputSize-1];
137021 +        U32 const dictID= fhd & 3;
137022 +        U32 const singleSegment = (fhd >> 5) & 1;
137023 +        U32 const fcsId = fhd >> 6;
137024 +        return minInputSize + !singleSegment
137025 +             + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId]
137026 +             + (singleSegment && !fcsId);
137027 +    }
137030 +/** ZSTD_frameHeaderSize() :
137031 + *  srcSize must be >= ZSTD_frameHeaderSize_prefix.
137032 + * @return : size of the Frame Header,
137033 + *           or an error code (if srcSize is too small) */
137034 +size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
137036 +    return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1);
137040 +/** ZSTD_getFrameHeader_advanced() :
137041 + *  decode Frame Header, or require larger `srcSize`.
137042 + *  note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless
137043 + * @return : 0, `zfhPtr` is correctly filled,
137044 + *          >0, `srcSize` is too small, value is wanted `srcSize` amount,
137045 + *           or an error code, which can be tested using ZSTD_isError() */
137046 +size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
137048 +    const BYTE* ip = (const BYTE*)src;
137049 +    size_t const minInputSize = ZSTD_startingInputLength(format);
137051 +    ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr));   /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
137052 +    if (srcSize < minInputSize) return minInputSize;
137053 +    RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter");
137055 +    if ( (format != ZSTD_f_zstd1_magicless)
137056 +      && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
137057 +        if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
137058 +            /* skippable frame */
137059 +            if (srcSize < ZSTD_SKIPPABLEHEADERSIZE)
137060 +                return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */
137061 +            ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr));
137062 +            zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE);
137063 +            zfhPtr->frameType = ZSTD_skippableFrame;
137064 +            return 0;
137065 +        }
137066 +        RETURN_ERROR(prefix_unknown, "");
137067 +    }
137069 +    /* ensure there is enough `srcSize` to fully read/decode frame header */
137070 +    {   size_t const fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format);
137071 +        if (srcSize < fhsize) return fhsize;
137072 +        zfhPtr->headerSize = (U32)fhsize;
137073 +    }
137075 +    {   BYTE const fhdByte = ip[minInputSize-1];
137076 +        size_t pos = minInputSize;
137077 +        U32 const dictIDSizeCode = fhdByte&3;
137078 +        U32 const checksumFlag = (fhdByte>>2)&1;
137079 +        U32 const singleSegment = (fhdByte>>5)&1;
137080 +        U32 const fcsID = fhdByte>>6;
137081 +        U64 windowSize = 0;
137082 +        U32 dictID = 0;
137083 +        U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN;
137084 +        RETURN_ERROR_IF((fhdByte & 0x08) != 0, frameParameter_unsupported,
137085 +                        "reserved bits, must be zero");
137087 +        if (!singleSegment) {
137088 +            BYTE const wlByte = ip[pos++];
137089 +            U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
137090 +            RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge, "");
137091 +            windowSize = (1ULL << windowLog);
137092 +            windowSize += (windowSize >> 3) * (wlByte&7);
137093 +        }
137094 +        switch(dictIDSizeCode)
137095 +        {
137096 +            default: assert(0);  /* impossible */
137097 +            case 0 : break;
137098 +            case 1 : dictID = ip[pos]; pos++; break;
137099 +            case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break;
137100 +            case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break;
137101 +        }
137102 +        switch(fcsID)
137103 +        {
137104 +            default: assert(0);  /* impossible */
137105 +            case 0 : if (singleSegment) frameContentSize = ip[pos]; break;
137106 +            case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break;
137107 +            case 2 : frameContentSize = MEM_readLE32(ip+pos); break;
137108 +            case 3 : frameContentSize = MEM_readLE64(ip+pos); break;
137109 +        }
137110 +        if (singleSegment) windowSize = frameContentSize;
137112 +        zfhPtr->frameType = ZSTD_frame;
137113 +        zfhPtr->frameContentSize = frameContentSize;
137114 +        zfhPtr->windowSize = windowSize;
137115 +        zfhPtr->blockSizeMax = (unsigned) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
137116 +        zfhPtr->dictID = dictID;
137117 +        zfhPtr->checksumFlag = checksumFlag;
137118 +    }
137119 +    return 0;
137122 +/** ZSTD_getFrameHeader() :
137123 + *  decode Frame Header, or require larger `srcSize`.
137124 + *  note : this function does not consume input, it only reads it.
137125 + * @return : 0, `zfhPtr` is correctly filled,
137126 + *          >0, `srcSize` is too small, value is wanted `srcSize` amount,
137127 + *           or an error code, which can be tested using ZSTD_isError() */
137128 +size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize)
137130 +    return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1);
137134 +/** ZSTD_getFrameContentSize() :
137135 + *  compatible with legacy mode
137136 + * @return : decompressed size of the single frame pointed to be `src` if known, otherwise
137137 + *         - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
137138 + *         - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
137139 +unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
137141 +    {   ZSTD_frameHeader zfh;
137142 +        if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0)
137143 +            return ZSTD_CONTENTSIZE_ERROR;
137144 +        if (zfh.frameType == ZSTD_skippableFrame) {
137145 +            return 0;
137146 +        } else {
137147 +            return zfh.frameContentSize;
137148 +    }   }
137151 +static size_t readSkippableFrameSize(void const* src, size_t srcSize)
137153 +    size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE;
137154 +    U32 sizeU32;
137156 +    RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong, "");
137158 +    sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE);
137159 +    RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32,
137160 +                    frameParameter_unsupported, "");
137161 +    {
137162 +        size_t const skippableSize = skippableHeaderSize + sizeU32;
137163 +        RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong, "");
137164 +        return skippableSize;
137165 +    }
137168 +/** ZSTD_findDecompressedSize() :
137169 + *  compatible with legacy mode
137170 + *  `srcSize` must be the exact length of some number of ZSTD compressed and/or
137171 + *      skippable frames
137172 + *  @return : decompressed size of the frames contained */
137173 +unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
137175 +    unsigned long long totalDstSize = 0;
137177 +    while (srcSize >= ZSTD_startingInputLength(ZSTD_f_zstd1)) {
137178 +        U32 const magicNumber = MEM_readLE32(src);
137180 +        if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
137181 +            size_t const skippableSize = readSkippableFrameSize(src, srcSize);
137182 +            if (ZSTD_isError(skippableSize)) {
137183 +                return ZSTD_CONTENTSIZE_ERROR;
137184 +            }
137185 +            assert(skippableSize <= srcSize);
137187 +            src = (const BYTE *)src + skippableSize;
137188 +            srcSize -= skippableSize;
137189 +            continue;
137190 +        }
137192 +        {   unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
137193 +            if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret;
137195 +            /* check for overflow */
137196 +            if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR;
137197 +            totalDstSize += ret;
137198 +        }
137199 +        {   size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
137200 +            if (ZSTD_isError(frameSrcSize)) {
137201 +                return ZSTD_CONTENTSIZE_ERROR;
137202 +            }
137204 +            src = (const BYTE *)src + frameSrcSize;
137205 +            srcSize -= frameSrcSize;
137206 +        }
137207 +    }  /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
137209 +    if (srcSize) return ZSTD_CONTENTSIZE_ERROR;
137211 +    return totalDstSize;
137214 +/** ZSTD_getDecompressedSize() :
137215 + *  compatible with legacy mode
137216 + * @return : decompressed size if known, 0 otherwise
137217 +             note : 0 can mean any of the following :
137218 +                   - frame content is empty
137219 +                   - decompressed size field is not present in frame header
137220 +                   - frame header unknown / not supported
137221 +                   - frame header not complete (`srcSize` too small) */
137222 +unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize)
137224 +    unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
137225 +    ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN);
137226 +    return (ret >= ZSTD_CONTENTSIZE_ERROR) ? 0 : ret;
137230 +/** ZSTD_decodeFrameHeader() :
137231 + * `headerSize` must be the size provided by ZSTD_frameHeaderSize().
137232 + * If multiple DDict references are enabled, also will choose the correct DDict to use.
137233 + * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
137234 +static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize)
137236 +    size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format);
137237 +    if (ZSTD_isError(result)) return result;    /* invalid header */
137238 +    RETURN_ERROR_IF(result>0, srcSize_wrong, "headerSize too small");
137240 +    /* Reference DDict requested by frame if dctx references multiple ddicts */
137241 +    if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts && dctx->ddictSet) {
137242 +        ZSTD_DCtx_selectFrameDDict(dctx);
137243 +    }
137245 +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
137246 +    /* Skip the dictID check in fuzzing mode, because it makes the search
137247 +     * harder.
137248 +     */
137249 +    RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID),
137250 +                    dictionary_wrong, "");
137251 +#endif
137252 +    dctx->validateChecksum = (dctx->fParams.checksumFlag && !dctx->forceIgnoreChecksum) ? 1 : 0;
137253 +    if (dctx->validateChecksum) xxh64_reset(&dctx->xxhState, 0);
137254 +    dctx->processedCSize += headerSize;
137255 +    return 0;
137258 +static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret)
137260 +    ZSTD_frameSizeInfo frameSizeInfo;
137261 +    frameSizeInfo.compressedSize = ret;
137262 +    frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;
137263 +    return frameSizeInfo;
137266 +static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize)
137268 +    ZSTD_frameSizeInfo frameSizeInfo;
137269 +    ZSTD_memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo));
137272 +    if ((srcSize >= ZSTD_SKIPPABLEHEADERSIZE)
137273 +        && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
137274 +        frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize);
137275 +        assert(ZSTD_isError(frameSizeInfo.compressedSize) ||
137276 +               frameSizeInfo.compressedSize <= srcSize);
137277 +        return frameSizeInfo;
137278 +    } else {
137279 +        const BYTE* ip = (const BYTE*)src;
137280 +        const BYTE* const ipstart = ip;
137281 +        size_t remainingSize = srcSize;
137282 +        size_t nbBlocks = 0;
137283 +        ZSTD_frameHeader zfh;
137285 +        /* Extract Frame Header */
137286 +        {   size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
137287 +            if (ZSTD_isError(ret))
137288 +                return ZSTD_errorFrameSizeInfo(ret);
137289 +            if (ret > 0)
137290 +                return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
137291 +        }
137293 +        ip += zfh.headerSize;
137294 +        remainingSize -= zfh.headerSize;
137296 +        /* Iterate over each block */
137297 +        while (1) {
137298 +            blockProperties_t blockProperties;
137299 +            size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
137300 +            if (ZSTD_isError(cBlockSize))
137301 +                return ZSTD_errorFrameSizeInfo(cBlockSize);
137303 +            if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
137304 +                return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
137306 +            ip += ZSTD_blockHeaderSize + cBlockSize;
137307 +            remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
137308 +            nbBlocks++;
137310 +            if (blockProperties.lastBlock) break;
137311 +        }
137313 +        /* Final frame content checksum */
137314 +        if (zfh.checksumFlag) {
137315 +            if (remainingSize < 4)
137316 +                return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
137317 +            ip += 4;
137318 +        }
137320 +        frameSizeInfo.compressedSize = (size_t)(ip - ipstart);
137321 +        frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN)
137322 +                                        ? zfh.frameContentSize
137323 +                                        : nbBlocks * zfh.blockSizeMax;
137324 +        return frameSizeInfo;
137325 +    }
137328 +/** ZSTD_findFrameCompressedSize() :
137329 + *  compatible with legacy mode
137330 + *  `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
137331 + *  `srcSize` must be at least as large as the frame contained
137332 + *  @return : the compressed size of the frame starting at `src` */
137333 +size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
137335 +    ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
137336 +    return frameSizeInfo.compressedSize;
137339 +/** ZSTD_decompressBound() :
137340 + *  compatible with legacy mode
137341 + *  `src` must point to the start of a ZSTD frame or a skippeable frame
137342 + *  `srcSize` must be at least as large as the frame contained
137343 + *  @return : the maximum decompressed size of the compressed source
137344 + */
137345 +unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)
137347 +    unsigned long long bound = 0;
137348 +    /* Iterate over each frame */
137349 +    while (srcSize > 0) {
137350 +        ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
137351 +        size_t const compressedSize = frameSizeInfo.compressedSize;
137352 +        unsigned long long const decompressedBound = frameSizeInfo.decompressedBound;
137353 +        if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR)
137354 +            return ZSTD_CONTENTSIZE_ERROR;
137355 +        assert(srcSize >= compressedSize);
137356 +        src = (const BYTE*)src + compressedSize;
137357 +        srcSize -= compressedSize;
137358 +        bound += decompressedBound;
137359 +    }
137360 +    return bound;
137364 +/*-*************************************************************
137365 + *   Frame decoding
137366 + ***************************************************************/
137368 +/** ZSTD_insertBlock() :
137369 + *  insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
137370 +size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)
137372 +    DEBUGLOG(5, "ZSTD_insertBlock: %u bytes", (unsigned)blockSize);
137373 +    ZSTD_checkContinuity(dctx, blockStart, blockSize);
137374 +    dctx->previousDstEnd = (const char*)blockStart + blockSize;
137375 +    return blockSize;
137379 +static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
137380 +                          const void* src, size_t srcSize)
137382 +    DEBUGLOG(5, "ZSTD_copyRawBlock");
137383 +    RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall, "");
137384 +    if (dst == NULL) {
137385 +        if (srcSize == 0) return 0;
137386 +        RETURN_ERROR(dstBuffer_null, "");
137387 +    }
137388 +    ZSTD_memcpy(dst, src, srcSize);
137389 +    return srcSize;
137392 +static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
137393 +                               BYTE b,
137394 +                               size_t regenSize)
137396 +    RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall, "");
137397 +    if (dst == NULL) {
137398 +        if (regenSize == 0) return 0;
137399 +        RETURN_ERROR(dstBuffer_null, "");
137400 +    }
137401 +    ZSTD_memset(dst, b, regenSize);
137402 +    return regenSize;
137405 +static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, unsigned streaming)
137407 +    (void)dctx;
137408 +    (void)uncompressedSize;
137409 +    (void)compressedSize;
137410 +    (void)streaming;
137414 +/*! ZSTD_decompressFrame() :
137415 + * @dctx must be properly initialized
137416 + *  will update *srcPtr and *srcSizePtr,
137417 + *  to make *srcPtr progress by one frame. */
137418 +static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
137419 +                                   void* dst, size_t dstCapacity,
137420 +                             const void** srcPtr, size_t *srcSizePtr)
137422 +    const BYTE* const istart = (const BYTE*)(*srcPtr);
137423 +    const BYTE* ip = istart;
137424 +    BYTE* const ostart = (BYTE*)dst;
137425 +    BYTE* const oend = dstCapacity != 0 ? ostart + dstCapacity : ostart;
137426 +    BYTE* op = ostart;
137427 +    size_t remainingSrcSize = *srcSizePtr;
137429 +    DEBUGLOG(4, "ZSTD_decompressFrame (srcSize:%i)", (int)*srcSizePtr);
137431 +    /* check */
137432 +    RETURN_ERROR_IF(
137433 +        remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN(dctx->format)+ZSTD_blockHeaderSize,
137434 +        srcSize_wrong, "");
137436 +    /* Frame Header */
137437 +    {   size_t const frameHeaderSize = ZSTD_frameHeaderSize_internal(
137438 +                ip, ZSTD_FRAMEHEADERSIZE_PREFIX(dctx->format), dctx->format);
137439 +        if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
137440 +        RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize,
137441 +                        srcSize_wrong, "");
137442 +        FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) , "");
137443 +        ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize;
137444 +    }
137446 +    /* Loop on each block */
137447 +    while (1) {
137448 +        size_t decodedSize;
137449 +        blockProperties_t blockProperties;
137450 +        size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties);
137451 +        if (ZSTD_isError(cBlockSize)) return cBlockSize;
137453 +        ip += ZSTD_blockHeaderSize;
137454 +        remainingSrcSize -= ZSTD_blockHeaderSize;
137455 +        RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong, "");
137457 +        switch(blockProperties.blockType)
137458 +        {
137459 +        case bt_compressed:
137460 +            decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oend-op), ip, cBlockSize, /* frame */ 1);
137461 +            break;
137462 +        case bt_raw :
137463 +            decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize);
137464 +            break;
137465 +        case bt_rle :
137466 +            decodedSize = ZSTD_setRleBlock(op, (size_t)(oend-op), *ip, blockProperties.origSize);
137467 +            break;
137468 +        case bt_reserved :
137469 +        default:
137470 +            RETURN_ERROR(corruption_detected, "invalid block type");
137471 +        }
137473 +        if (ZSTD_isError(decodedSize)) return decodedSize;
137474 +        if (dctx->validateChecksum)
137475 +            xxh64_update(&dctx->xxhState, op, decodedSize);
137476 +        if (decodedSize != 0)
137477 +            op += decodedSize;
137478 +        assert(ip != NULL);
137479 +        ip += cBlockSize;
137480 +        remainingSrcSize -= cBlockSize;
137481 +        if (blockProperties.lastBlock) break;
137482 +    }
137484 +    if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
137485 +        RETURN_ERROR_IF((U64)(op-ostart) != dctx->fParams.frameContentSize,
137486 +                        corruption_detected, "");
137487 +    }
137488 +    if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
137489 +        RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong, "");
137490 +        if (!dctx->forceIgnoreChecksum) {
137491 +            U32 const checkCalc = (U32)xxh64_digest(&dctx->xxhState);
137492 +            U32 checkRead;
137493 +            checkRead = MEM_readLE32(ip);
137494 +            RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong, "");
137495 +        }
137496 +        ip += 4;
137497 +        remainingSrcSize -= 4;
137498 +    }
137499 +    ZSTD_DCtx_trace_end(dctx, (U64)(op-ostart), (U64)(ip-istart), /* streaming */ 0);
137500 +    /* Allow caller to get size read */
137501 +    *srcPtr = ip;
137502 +    *srcSizePtr = remainingSrcSize;
137503 +    return (size_t)(op-ostart);
137506 +static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
137507 +                                        void* dst, size_t dstCapacity,
137508 +                                  const void* src, size_t srcSize,
137509 +                                  const void* dict, size_t dictSize,
137510 +                                  const ZSTD_DDict* ddict)
137512 +    void* const dststart = dst;
137513 +    int moreThan1Frame = 0;
137515 +    DEBUGLOG(5, "ZSTD_decompressMultiFrame");
137516 +    assert(dict==NULL || ddict==NULL);  /* either dict or ddict set, not both */
137518 +    if (ddict) {
137519 +        dict = ZSTD_DDict_dictContent(ddict);
137520 +        dictSize = ZSTD_DDict_dictSize(ddict);
137521 +    }
137523 +    while (srcSize >= ZSTD_startingInputLength(dctx->format)) {
137526 +        {   U32 const magicNumber = MEM_readLE32(src);
137527 +            DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
137528 +                        (unsigned)magicNumber, ZSTD_MAGICNUMBER);
137529 +            if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
137530 +                size_t const skippableSize = readSkippableFrameSize(src, srcSize);
137531 +                FORWARD_IF_ERROR(skippableSize, "readSkippableFrameSize failed");
137532 +                assert(skippableSize <= srcSize);
137534 +                src = (const BYTE *)src + skippableSize;
137535 +                srcSize -= skippableSize;
137536 +                continue;
137537 +        }   }
137539 +        if (ddict) {
137540 +            /* we were called from ZSTD_decompress_usingDDict */
137541 +            FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict), "");
137542 +        } else {
137543 +            /* this will initialize correctly with no dict if dict == NULL, so
137544 +             * use this in all cases but ddict */
137545 +            FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize), "");
137546 +        }
137547 +        ZSTD_checkContinuity(dctx, dst, dstCapacity);
137549 +        {   const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
137550 +                                                    &src, &srcSize);
137551 +            RETURN_ERROR_IF(
137552 +                (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown)
137553 +             && (moreThan1Frame==1),
137554 +                srcSize_wrong,
137555 +                "At least one frame successfully completed, "
137556 +                "but following bytes are garbage: "
137557 +                "it's more likely to be a srcSize error, "
137558 +                "specifying more input bytes than size of frame(s). "
137559 +                "Note: one could be unlucky, it might be a corruption error instead, "
137560 +                "happening right at the place where we expect zstd magic bytes. "
137561 +                "But this is _much_ less likely than a srcSize field error.");
137562 +            if (ZSTD_isError(res)) return res;
137563 +            assert(res <= dstCapacity);
137564 +            if (res != 0)
137565 +                dst = (BYTE*)dst + res;
137566 +            dstCapacity -= res;
137567 +        }
137568 +        moreThan1Frame = 1;
137569 +    }  /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
137571 +    RETURN_ERROR_IF(srcSize, srcSize_wrong, "input not entirely consumed");
137573 +    return (size_t)((BYTE*)dst - (BYTE*)dststart);
137576 +size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
137577 +                                 void* dst, size_t dstCapacity,
137578 +                           const void* src, size_t srcSize,
137579 +                           const void* dict, size_t dictSize)
137581 +    return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL);
137585 +static ZSTD_DDict const* ZSTD_getDDict(ZSTD_DCtx* dctx)
137587 +    switch (dctx->dictUses) {
137588 +    default:
137589 +        assert(0 /* Impossible */);
137590 +        /* fall-through */
137591 +    case ZSTD_dont_use:
137592 +        ZSTD_clearDict(dctx);
137593 +        return NULL;
137594 +    case ZSTD_use_indefinitely:
137595 +        return dctx->ddict;
137596 +    case ZSTD_use_once:
137597 +        dctx->dictUses = ZSTD_dont_use;
137598 +        return dctx->ddict;
137599 +    }
137602 +size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
137604 +    return ZSTD_decompress_usingDDict(dctx, dst, dstCapacity, src, srcSize, ZSTD_getDDict(dctx));
137608 +size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
137610 +#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
137611 +    size_t regenSize;
137612 +    ZSTD_DCtx* const dctx = ZSTD_createDCtx();
137613 +    RETURN_ERROR_IF(dctx==NULL, memory_allocation, "NULL pointer!");
137614 +    regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
137615 +    ZSTD_freeDCtx(dctx);
137616 +    return regenSize;
137617 +#else   /* stack mode */
137618 +    ZSTD_DCtx dctx;
137619 +    ZSTD_initDCtx_internal(&dctx);
137620 +    return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);
137621 +#endif
137625 +/*-**************************************
137626 +*   Advanced Streaming Decompression API
137627 +*   Bufferless and synchronous
137628 +****************************************/
137629 +size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; }
137632 + * Similar to ZSTD_nextSrcSizeToDecompress(), but when when a block input can be streamed,
137633 + * we allow taking a partial block as the input. Currently only raw uncompressed blocks can
137634 + * be streamed.
137636 + * For blocks that can be streamed, this allows us to reduce the latency until we produce
137637 + * output, and avoid copying the input.
137639 + * @param inputSize - The total amount of input that the caller currently has.
137640 + */
137641 +static size_t ZSTD_nextSrcSizeToDecompressWithInputSize(ZSTD_DCtx* dctx, size_t inputSize) {
137642 +    if (!(dctx->stage == ZSTDds_decompressBlock || dctx->stage == ZSTDds_decompressLastBlock))
137643 +        return dctx->expected;
137644 +    if (dctx->bType != bt_raw)
137645 +        return dctx->expected;
137646 +    return MIN(MAX(inputSize, 1), dctx->expected);
137649 +ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) {
137650 +    switch(dctx->stage)
137651 +    {
137652 +    default:   /* should not happen */
137653 +        assert(0);
137654 +    case ZSTDds_getFrameHeaderSize:
137655 +    case ZSTDds_decodeFrameHeader:
137656 +        return ZSTDnit_frameHeader;
137657 +    case ZSTDds_decodeBlockHeader:
137658 +        return ZSTDnit_blockHeader;
137659 +    case ZSTDds_decompressBlock:
137660 +        return ZSTDnit_block;
137661 +    case ZSTDds_decompressLastBlock:
137662 +        return ZSTDnit_lastBlock;
137663 +    case ZSTDds_checkChecksum:
137664 +        return ZSTDnit_checksum;
137665 +    case ZSTDds_decodeSkippableHeader:
137666 +    case ZSTDds_skipFrame:
137667 +        return ZSTDnit_skippableFrame;
137668 +    }
137671 +static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; }
137673 +/** ZSTD_decompressContinue() :
137674 + *  srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress())
137675 + *  @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
137676 + *            or an error code, which can be tested using ZSTD_isError() */
137677 +size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
137679 +    DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize);
137680 +    /* Sanity check */
137681 +    RETURN_ERROR_IF(srcSize != ZSTD_nextSrcSizeToDecompressWithInputSize(dctx, srcSize), srcSize_wrong, "not allowed");
137682 +    ZSTD_checkContinuity(dctx, dst, dstCapacity);
137684 +    dctx->processedCSize += srcSize;
137686 +    switch (dctx->stage)
137687 +    {
137688 +    case ZSTDds_getFrameHeaderSize :
137689 +        assert(src != NULL);
137690 +        if (dctx->format == ZSTD_f_zstd1) {  /* allows header */
137691 +            assert(srcSize >= ZSTD_FRAMEIDSIZE);  /* to read skippable magic number */
137692 +            if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {        /* skippable frame */
137693 +                ZSTD_memcpy(dctx->headerBuffer, src, srcSize);
137694 +                dctx->expected = ZSTD_SKIPPABLEHEADERSIZE - srcSize;  /* remaining to load to get full skippable frame header */
137695 +                dctx->stage = ZSTDds_decodeSkippableHeader;
137696 +                return 0;
137697 +        }   }
137698 +        dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format);
137699 +        if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize;
137700 +        ZSTD_memcpy(dctx->headerBuffer, src, srcSize);
137701 +        dctx->expected = dctx->headerSize - srcSize;
137702 +        dctx->stage = ZSTDds_decodeFrameHeader;
137703 +        return 0;
137705 +    case ZSTDds_decodeFrameHeader:
137706 +        assert(src != NULL);
137707 +        ZSTD_memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize);
137708 +        FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize), "");
137709 +        dctx->expected = ZSTD_blockHeaderSize;
137710 +        dctx->stage = ZSTDds_decodeBlockHeader;
137711 +        return 0;
137713 +    case ZSTDds_decodeBlockHeader:
137714 +        {   blockProperties_t bp;
137715 +            size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
137716 +            if (ZSTD_isError(cBlockSize)) return cBlockSize;
137717 +            RETURN_ERROR_IF(cBlockSize > dctx->fParams.blockSizeMax, corruption_detected, "Block Size Exceeds Maximum");
137718 +            dctx->expected = cBlockSize;
137719 +            dctx->bType = bp.blockType;
137720 +            dctx->rleSize = bp.origSize;
137721 +            if (cBlockSize) {
137722 +                dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock;
137723 +                return 0;
137724 +            }
137725 +            /* empty block */
137726 +            if (bp.lastBlock) {
137727 +                if (dctx->fParams.checksumFlag) {
137728 +                    dctx->expected = 4;
137729 +                    dctx->stage = ZSTDds_checkChecksum;
137730 +                } else {
137731 +                    dctx->expected = 0; /* end of frame */
137732 +                    dctx->stage = ZSTDds_getFrameHeaderSize;
137733 +                }
137734 +            } else {
137735 +                dctx->expected = ZSTD_blockHeaderSize;  /* jump to next header */
137736 +                dctx->stage = ZSTDds_decodeBlockHeader;
137737 +            }
137738 +            return 0;
137739 +        }
137741 +    case ZSTDds_decompressLastBlock:
137742 +    case ZSTDds_decompressBlock:
137743 +        DEBUGLOG(5, "ZSTD_decompressContinue: case ZSTDds_decompressBlock");
137744 +        {   size_t rSize;
137745 +            switch(dctx->bType)
137746 +            {
137747 +            case bt_compressed:
137748 +                DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed");
137749 +                rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1);
137750 +                dctx->expected = 0;  /* Streaming not supported */
137751 +                break;
137752 +            case bt_raw :
137753 +                assert(srcSize <= dctx->expected);
137754 +                rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize);
137755 +                FORWARD_IF_ERROR(rSize, "ZSTD_copyRawBlock failed");
137756 +                assert(rSize == srcSize);
137757 +                dctx->expected -= rSize;
137758 +                break;
137759 +            case bt_rle :
137760 +                rSize = ZSTD_setRleBlock(dst, dstCapacity, *(const BYTE*)src, dctx->rleSize);
137761 +                dctx->expected = 0;  /* Streaming not supported */
137762 +                break;
137763 +            case bt_reserved :   /* should never happen */
137764 +            default:
137765 +                RETURN_ERROR(corruption_detected, "invalid block type");
137766 +            }
137767 +            FORWARD_IF_ERROR(rSize, "");
137768 +            RETURN_ERROR_IF(rSize > dctx->fParams.blockSizeMax, corruption_detected, "Decompressed Block Size Exceeds Maximum");
137769 +            DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize);
137770 +            dctx->decodedSize += rSize;
137771 +            if (dctx->validateChecksum) xxh64_update(&dctx->xxhState, dst, rSize);
137772 +            dctx->previousDstEnd = (char*)dst + rSize;
137774 +            /* Stay on the same stage until we are finished streaming the block. */
137775 +            if (dctx->expected > 0) {
137776 +                return rSize;
137777 +            }
137779 +            if (dctx->stage == ZSTDds_decompressLastBlock) {   /* end of frame */
137780 +                DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize);
137781 +                RETURN_ERROR_IF(
137782 +                    dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
137783 +                 && dctx->decodedSize != dctx->fParams.frameContentSize,
137784 +                    corruption_detected, "");
137785 +                if (dctx->fParams.checksumFlag) {  /* another round for frame checksum */
137786 +                    dctx->expected = 4;
137787 +                    dctx->stage = ZSTDds_checkChecksum;
137788 +                } else {
137789 +                    ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1);
137790 +                    dctx->expected = 0;   /* ends here */
137791 +                    dctx->stage = ZSTDds_getFrameHeaderSize;
137792 +                }
137793 +            } else {
137794 +                dctx->stage = ZSTDds_decodeBlockHeader;
137795 +                dctx->expected = ZSTD_blockHeaderSize;
137796 +            }
137797 +            return rSize;
137798 +        }
137800 +    case ZSTDds_checkChecksum:
137801 +        assert(srcSize == 4);  /* guaranteed by dctx->expected */
137802 +        {
137803 +            if (dctx->validateChecksum) {
137804 +                U32 const h32 = (U32)xxh64_digest(&dctx->xxhState);
137805 +                U32 const check32 = MEM_readLE32(src);
137806 +                DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32);
137807 +                RETURN_ERROR_IF(check32 != h32, checksum_wrong, "");
137808 +            }
137809 +            ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1);
137810 +            dctx->expected = 0;
137811 +            dctx->stage = ZSTDds_getFrameHeaderSize;
137812 +            return 0;
137813 +        }
137815 +    case ZSTDds_decodeSkippableHeader:
137816 +        assert(src != NULL);
137817 +        assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE);
137818 +        ZSTD_memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize);   /* complete skippable header */
137819 +        dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE);   /* note : dctx->expected can grow seriously large, beyond local buffer size */
137820 +        dctx->stage = ZSTDds_skipFrame;
137821 +        return 0;
137823 +    case ZSTDds_skipFrame:
137824 +        dctx->expected = 0;
137825 +        dctx->stage = ZSTDds_getFrameHeaderSize;
137826 +        return 0;
137828 +    default:
137829 +        assert(0);   /* impossible */
137830 +        RETURN_ERROR(GENERIC, "impossible to reach");   /* some compiler require default to do something */
137831 +    }
137835 +static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
137837 +    dctx->dictEnd = dctx->previousDstEnd;
137838 +    dctx->virtualStart = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
137839 +    dctx->prefixStart = dict;
137840 +    dctx->previousDstEnd = (const char*)dict + dictSize;
137841 +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
137842 +    dctx->dictContentBeginForFuzzing = dctx->prefixStart;
137843 +    dctx->dictContentEndForFuzzing = dctx->previousDstEnd;
137844 +#endif
137845 +    return 0;
137848 +/*! ZSTD_loadDEntropy() :
137849 + *  dict : must point at beginning of a valid zstd dictionary.
137850 + * @return : size of entropy tables read */
137851 +size_t
137852 +ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
137853 +                  const void* const dict, size_t const dictSize)
137855 +    const BYTE* dictPtr = (const BYTE*)dict;
137856 +    const BYTE* const dictEnd = dictPtr + dictSize;
137858 +    RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted, "dict is too small");
137859 +    assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY);   /* dict must be valid */
137860 +    dictPtr += 8;   /* skip header = magic + dictID */
137862 +    ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, OFTable) == offsetof(ZSTD_entropyDTables_t, LLTable) + sizeof(entropy->LLTable));
137863 +    ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, MLTable) == offsetof(ZSTD_entropyDTables_t, OFTable) + sizeof(entropy->OFTable));
137864 +    ZSTD_STATIC_ASSERT(sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable) >= HUF_DECOMPRESS_WORKSPACE_SIZE);
137865 +    {   void* const workspace = &entropy->LLTable;   /* use fse tables as temporary workspace; implies fse tables are grouped together */
137866 +        size_t const workspaceSize = sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable);
137867 +#ifdef HUF_FORCE_DECOMPRESS_X1
137868 +        /* in minimal huffman, we always use X1 variants */
137869 +        size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable,
137870 +                                                dictPtr, dictEnd - dictPtr,
137871 +                                                workspace, workspaceSize);
137872 +#else
137873 +        size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable,
137874 +                                                dictPtr, (size_t)(dictEnd - dictPtr),
137875 +                                                workspace, workspaceSize);
137876 +#endif
137877 +        RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted, "");
137878 +        dictPtr += hSize;
137879 +    }
137881 +    {   short offcodeNCount[MaxOff+1];
137882 +        unsigned offcodeMaxValue = MaxOff, offcodeLog;
137883 +        size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (size_t)(dictEnd-dictPtr));
137884 +        RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
137885 +        RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted, "");
137886 +        RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
137887 +        ZSTD_buildFSETable( entropy->OFTable,
137888 +                            offcodeNCount, offcodeMaxValue,
137889 +                            OF_base, OF_bits,
137890 +                            offcodeLog,
137891 +                            entropy->workspace, sizeof(entropy->workspace),
137892 +                            /* bmi2 */0);
137893 +        dictPtr += offcodeHeaderSize;
137894 +    }
137896 +    {   short matchlengthNCount[MaxML+1];
137897 +        unsigned matchlengthMaxValue = MaxML, matchlengthLog;
137898 +        size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));
137899 +        RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
137900 +        RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted, "");
137901 +        RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
137902 +        ZSTD_buildFSETable( entropy->MLTable,
137903 +                            matchlengthNCount, matchlengthMaxValue,
137904 +                            ML_base, ML_bits,
137905 +                            matchlengthLog,
137906 +                            entropy->workspace, sizeof(entropy->workspace),
137907 +                            /* bmi2 */ 0);
137908 +        dictPtr += matchlengthHeaderSize;
137909 +    }
137911 +    {   short litlengthNCount[MaxLL+1];
137912 +        unsigned litlengthMaxValue = MaxLL, litlengthLog;
137913 +        size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));
137914 +        RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
137915 +        RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted, "");
137916 +        RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
137917 +        ZSTD_buildFSETable( entropy->LLTable,
137918 +                            litlengthNCount, litlengthMaxValue,
137919 +                            LL_base, LL_bits,
137920 +                            litlengthLog,
137921 +                            entropy->workspace, sizeof(entropy->workspace),
137922 +                            /* bmi2 */ 0);
137923 +        dictPtr += litlengthHeaderSize;
137924 +    }
137926 +    RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
137927 +    {   int i;
137928 +        size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
137929 +        for (i=0; i<3; i++) {
137930 +            U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
137931 +            RETURN_ERROR_IF(rep==0 || rep > dictContentSize,
137932 +                            dictionary_corrupted, "");
137933 +            entropy->rep[i] = rep;
137934 +    }   }
137936 +    return (size_t)(dictPtr - (const BYTE*)dict);
137939 +static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
137941 +    if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize);
137942 +    {   U32 const magic = MEM_readLE32(dict);
137943 +        if (magic != ZSTD_MAGIC_DICTIONARY) {
137944 +            return ZSTD_refDictContent(dctx, dict, dictSize);   /* pure content mode */
137945 +    }   }
137946 +    dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
137948 +    /* load entropy tables */
137949 +    {   size_t const eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize);
137950 +        RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted, "");
137951 +        dict = (const char*)dict + eSize;
137952 +        dictSize -= eSize;
137953 +    }
137954 +    dctx->litEntropy = dctx->fseEntropy = 1;
137956 +    /* reference dictionary content */
137957 +    return ZSTD_refDictContent(dctx, dict, dictSize);
137960 +size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
137962 +    assert(dctx != NULL);
137963 +    dctx->expected = ZSTD_startingInputLength(dctx->format);  /* dctx->format must be properly set */
137964 +    dctx->stage = ZSTDds_getFrameHeaderSize;
137965 +    dctx->processedCSize = 0;
137966 +    dctx->decodedSize = 0;
137967 +    dctx->previousDstEnd = NULL;
137968 +    dctx->prefixStart = NULL;
137969 +    dctx->virtualStart = NULL;
137970 +    dctx->dictEnd = NULL;
137971 +    dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001);  /* cover both little and big endian */
137972 +    dctx->litEntropy = dctx->fseEntropy = 0;
137973 +    dctx->dictID = 0;
137974 +    dctx->bType = bt_reserved;
137975 +    ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
137976 +    ZSTD_memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue));  /* initial repcodes */
137977 +    dctx->LLTptr = dctx->entropy.LLTable;
137978 +    dctx->MLTptr = dctx->entropy.MLTable;
137979 +    dctx->OFTptr = dctx->entropy.OFTable;
137980 +    dctx->HUFptr = dctx->entropy.hufTable;
137981 +    return 0;
137984 +size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
137986 +    FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , "");
137987 +    if (dict && dictSize)
137988 +        RETURN_ERROR_IF(
137989 +            ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)),
137990 +            dictionary_corrupted, "");
137991 +    return 0;
137995 +/* ======   ZSTD_DDict   ====== */
137997 +size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
137999 +    DEBUGLOG(4, "ZSTD_decompressBegin_usingDDict");
138000 +    assert(dctx != NULL);
138001 +    if (ddict) {
138002 +        const char* const dictStart = (const char*)ZSTD_DDict_dictContent(ddict);
138003 +        size_t const dictSize = ZSTD_DDict_dictSize(ddict);
138004 +        const void* const dictEnd = dictStart + dictSize;
138005 +        dctx->ddictIsCold = (dctx->dictEnd != dictEnd);
138006 +        DEBUGLOG(4, "DDict is %s",
138007 +                    dctx->ddictIsCold ? "~cold~" : "hot!");
138008 +    }
138009 +    FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , "");
138010 +    if (ddict) {   /* NULL ddict is equivalent to no dictionary */
138011 +        ZSTD_copyDDictParameters(dctx, ddict);
138012 +    }
138013 +    return 0;
138016 +/*! ZSTD_getDictID_fromDict() :
138017 + *  Provides the dictID stored within dictionary.
138018 + *  if @return == 0, the dictionary is not conformant with Zstandard specification.
138019 + *  It can still be loaded, but as a content-only dictionary. */
138020 +unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
138022 +    if (dictSize < 8) return 0;
138023 +    if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0;
138024 +    return MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
138027 +/*! ZSTD_getDictID_fromFrame() :
138028 + *  Provides the dictID required to decompress frame stored within `src`.
138029 + *  If @return == 0, the dictID could not be decoded.
138030 + *  This could for one of the following reasons :
138031 + *  - The frame does not require a dictionary (most common case).
138032 + *  - The frame was built with dictID intentionally removed.
138033 + *    Needed dictionary is a hidden information.
138034 + *    Note : this use case also happens when using a non-conformant dictionary.
138035 + *  - `srcSize` is too small, and as a result, frame header could not be decoded.
138036 + *    Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`.
138037 + *  - This is not a Zstandard frame.
138038 + *  When identifying the exact failure cause, it's possible to use
138039 + *  ZSTD_getFrameHeader(), which will provide a more precise error code. */
138040 +unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize)
138042 +    ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 };
138043 +    size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize);
138044 +    if (ZSTD_isError(hError)) return 0;
138045 +    return zfp.dictID;
138049 +/*! ZSTD_decompress_usingDDict() :
138050 +*   Decompression using a pre-digested Dictionary
138051 +*   Use dictionary without significant overhead. */
138052 +size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
138053 +                                  void* dst, size_t dstCapacity,
138054 +                            const void* src, size_t srcSize,
138055 +                            const ZSTD_DDict* ddict)
138057 +    /* pass content and size in case legacy frames are encountered */
138058 +    return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize,
138059 +                                     NULL, 0,
138060 +                                     ddict);
138064 +/*=====================================
138065 +*   Streaming decompression
138066 +*====================================*/
138068 +ZSTD_DStream* ZSTD_createDStream(void)
138070 +    DEBUGLOG(3, "ZSTD_createDStream");
138071 +    return ZSTD_createDStream_advanced(ZSTD_defaultCMem);
138074 +ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize)
138076 +    return ZSTD_initStaticDCtx(workspace, workspaceSize);
138079 +ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem)
138081 +    return ZSTD_createDCtx_advanced(customMem);
138084 +size_t ZSTD_freeDStream(ZSTD_DStream* zds)
138086 +    return ZSTD_freeDCtx(zds);
138090 +/* ***  Initialization  *** */
138092 +size_t ZSTD_DStreamInSize(void)  { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; }
138093 +size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; }
138095 +size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx,
138096 +                                   const void* dict, size_t dictSize,
138097 +                                         ZSTD_dictLoadMethod_e dictLoadMethod,
138098 +                                         ZSTD_dictContentType_e dictContentType)
138100 +    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
138101 +    ZSTD_clearDict(dctx);
138102 +    if (dict && dictSize != 0) {
138103 +        dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
138104 +        RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation, "NULL pointer!");
138105 +        dctx->ddict = dctx->ddictLocal;
138106 +        dctx->dictUses = ZSTD_use_indefinitely;
138107 +    }
138108 +    return 0;
138111 +size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
138113 +    return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
138116 +size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
138118 +    return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
138121 +size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
138123 +    FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType), "");
138124 +    dctx->dictUses = ZSTD_use_once;
138125 +    return 0;
138128 +size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize)
138130 +    return ZSTD_DCtx_refPrefix_advanced(dctx, prefix, prefixSize, ZSTD_dct_rawContent);
138134 +/* ZSTD_initDStream_usingDict() :
138135 + * return : expected size, aka ZSTD_startingInputLength().
138136 + * this function cannot fail */
138137 +size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)
138139 +    DEBUGLOG(4, "ZSTD_initDStream_usingDict");
138140 +    FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) , "");
138141 +    FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) , "");
138142 +    return ZSTD_startingInputLength(zds->format);
138145 +/* note : this variant can't fail */
138146 +size_t ZSTD_initDStream(ZSTD_DStream* zds)
138148 +    DEBUGLOG(4, "ZSTD_initDStream");
138149 +    return ZSTD_initDStream_usingDDict(zds, NULL);
138152 +/* ZSTD_initDStream_usingDDict() :
138153 + * ddict will just be referenced, and must outlive decompression session
138154 + * this function cannot fail */
138155 +size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
138157 +    FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) , "");
138158 +    FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) , "");
138159 +    return ZSTD_startingInputLength(dctx->format);
138162 +/* ZSTD_resetDStream() :
138163 + * return : expected size, aka ZSTD_startingInputLength().
138164 + * this function cannot fail */
138165 +size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
138167 +    FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only), "");
138168 +    return ZSTD_startingInputLength(dctx->format);
138172 +size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
138174 +    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
138175 +    ZSTD_clearDict(dctx);
138176 +    if (ddict) {
138177 +        dctx->ddict = ddict;
138178 +        dctx->dictUses = ZSTD_use_indefinitely;
138179 +        if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts) {
138180 +            if (dctx->ddictSet == NULL) {
138181 +                dctx->ddictSet = ZSTD_createDDictHashSet(dctx->customMem);
138182 +                if (!dctx->ddictSet) {
138183 +                    RETURN_ERROR(memory_allocation, "Failed to allocate memory for hash set!");
138184 +                }
138185 +            }
138186 +            assert(!dctx->staticSize);  /* Impossible: ddictSet cannot have been allocated if static dctx */
138187 +            FORWARD_IF_ERROR(ZSTD_DDictHashSet_addDDict(dctx->ddictSet, ddict, dctx->customMem), "");
138188 +        }
138189 +    }
138190 +    return 0;
138193 +/* ZSTD_DCtx_setMaxWindowSize() :
138194 + * note : no direct equivalence in ZSTD_DCtx_setParameter,
138195 + * since this version sets windowSize, and the other sets windowLog */
138196 +size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
138198 +    ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax);
138199 +    size_t const min = (size_t)1 << bounds.lowerBound;
138200 +    size_t const max = (size_t)1 << bounds.upperBound;
138201 +    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
138202 +    RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound, "");
138203 +    RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound, "");
138204 +    dctx->maxWindowSize = maxWindowSize;
138205 +    return 0;
138208 +size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format)
138210 +    return ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, (int)format);
138213 +ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam)
138215 +    ZSTD_bounds bounds = { 0, 0, 0 };
138216 +    switch(dParam) {
138217 +        case ZSTD_d_windowLogMax:
138218 +            bounds.lowerBound = ZSTD_WINDOWLOG_ABSOLUTEMIN;
138219 +            bounds.upperBound = ZSTD_WINDOWLOG_MAX;
138220 +            return bounds;
138221 +        case ZSTD_d_format:
138222 +            bounds.lowerBound = (int)ZSTD_f_zstd1;
138223 +            bounds.upperBound = (int)ZSTD_f_zstd1_magicless;
138224 +            ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
138225 +            return bounds;
138226 +        case ZSTD_d_stableOutBuffer:
138227 +            bounds.lowerBound = (int)ZSTD_bm_buffered;
138228 +            bounds.upperBound = (int)ZSTD_bm_stable;
138229 +            return bounds;
138230 +        case ZSTD_d_forceIgnoreChecksum:
138231 +            bounds.lowerBound = (int)ZSTD_d_validateChecksum;
138232 +            bounds.upperBound = (int)ZSTD_d_ignoreChecksum;
138233 +            return bounds;
138234 +        case ZSTD_d_refMultipleDDicts:
138235 +            bounds.lowerBound = (int)ZSTD_rmd_refSingleDDict;
138236 +            bounds.upperBound = (int)ZSTD_rmd_refMultipleDDicts;
138237 +            return bounds;
138238 +        default:;
138239 +    }
138240 +    bounds.error = ERROR(parameter_unsupported);
138241 +    return bounds;
138244 +/* ZSTD_dParam_withinBounds:
138245 + * @return 1 if value is within dParam bounds,
138246 + * 0 otherwise */
138247 +static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value)
138249 +    ZSTD_bounds const bounds = ZSTD_dParam_getBounds(dParam);
138250 +    if (ZSTD_isError(bounds.error)) return 0;
138251 +    if (value < bounds.lowerBound) return 0;
138252 +    if (value > bounds.upperBound) return 0;
138253 +    return 1;
138256 +#define CHECK_DBOUNDS(p,v) {                \
138257 +    RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound, ""); \
138260 +size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value)
138262 +    switch (param) {
138263 +        case ZSTD_d_windowLogMax:
138264 +            *value = (int)ZSTD_highbit32((U32)dctx->maxWindowSize);
138265 +            return 0;
138266 +        case ZSTD_d_format:
138267 +            *value = (int)dctx->format;
138268 +            return 0;
138269 +        case ZSTD_d_stableOutBuffer:
138270 +            *value = (int)dctx->outBufferMode;
138271 +            return 0;
138272 +        case ZSTD_d_forceIgnoreChecksum:
138273 +            *value = (int)dctx->forceIgnoreChecksum;
138274 +            return 0;
138275 +        case ZSTD_d_refMultipleDDicts:
138276 +            *value = (int)dctx->refMultipleDDicts;
138277 +            return 0;
138278 +        default:;
138279 +    }
138280 +    RETURN_ERROR(parameter_unsupported, "");
138283 +size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value)
138285 +    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
138286 +    switch(dParam) {
138287 +        case ZSTD_d_windowLogMax:
138288 +            if (value == 0) value = ZSTD_WINDOWLOG_LIMIT_DEFAULT;
138289 +            CHECK_DBOUNDS(ZSTD_d_windowLogMax, value);
138290 +            dctx->maxWindowSize = ((size_t)1) << value;
138291 +            return 0;
138292 +        case ZSTD_d_format:
138293 +            CHECK_DBOUNDS(ZSTD_d_format, value);
138294 +            dctx->format = (ZSTD_format_e)value;
138295 +            return 0;
138296 +        case ZSTD_d_stableOutBuffer:
138297 +            CHECK_DBOUNDS(ZSTD_d_stableOutBuffer, value);
138298 +            dctx->outBufferMode = (ZSTD_bufferMode_e)value;
138299 +            return 0;
138300 +        case ZSTD_d_forceIgnoreChecksum:
138301 +            CHECK_DBOUNDS(ZSTD_d_forceIgnoreChecksum, value);
138302 +            dctx->forceIgnoreChecksum = (ZSTD_forceIgnoreChecksum_e)value;
138303 +            return 0;
138304 +        case ZSTD_d_refMultipleDDicts:
138305 +            CHECK_DBOUNDS(ZSTD_d_refMultipleDDicts, value);
138306 +            if (dctx->staticSize != 0) {
138307 +                RETURN_ERROR(parameter_unsupported, "Static dctx does not support multiple DDicts!");
138308 +            }
138309 +            dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value;
138310 +            return 0;
138311 +        default:;
138312 +    }
138313 +    RETURN_ERROR(parameter_unsupported, "");
138316 +size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset)
138318 +    if ( (reset == ZSTD_reset_session_only)
138319 +      || (reset == ZSTD_reset_session_and_parameters) ) {
138320 +        dctx->streamStage = zdss_init;
138321 +        dctx->noForwardProgress = 0;
138322 +    }
138323 +    if ( (reset == ZSTD_reset_parameters)
138324 +      || (reset == ZSTD_reset_session_and_parameters) ) {
138325 +        RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
138326 +        ZSTD_clearDict(dctx);
138327 +        ZSTD_DCtx_resetParameters(dctx);
138328 +    }
138329 +    return 0;
138333 +size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx)
138335 +    return ZSTD_sizeof_DCtx(dctx);
138338 +size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)
138340 +    size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
138341 +    unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2);
138342 +    unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
138343 +    size_t const minRBSize = (size_t) neededSize;
138344 +    RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize,
138345 +                    frameParameter_windowTooLarge, "");
138346 +    return minRBSize;
138349 +size_t ZSTD_estimateDStreamSize(size_t windowSize)
138351 +    size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
138352 +    size_t const inBuffSize = blockSize;  /* no block can be larger */
138353 +    size_t const outBuffSize = ZSTD_decodingBufferSize_min(windowSize, ZSTD_CONTENTSIZE_UNKNOWN);
138354 +    return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize;
138357 +size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
138359 +    U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX;   /* note : should be user-selectable, but requires an additional parameter (or a dctx) */
138360 +    ZSTD_frameHeader zfh;
138361 +    size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
138362 +    if (ZSTD_isError(err)) return err;
138363 +    RETURN_ERROR_IF(err>0, srcSize_wrong, "");
138364 +    RETURN_ERROR_IF(zfh.windowSize > windowSizeMax,
138365 +                    frameParameter_windowTooLarge, "");
138366 +    return ZSTD_estimateDStreamSize((size_t)zfh.windowSize);
138370 +/* *****   Decompression   ***** */
138372 +static int ZSTD_DCtx_isOverflow(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize)
138374 +    return (zds->inBuffSize + zds->outBuffSize) >= (neededInBuffSize + neededOutBuffSize) * ZSTD_WORKSPACETOOLARGE_FACTOR;
138377 +static void ZSTD_DCtx_updateOversizedDuration(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize)
138379 +    if (ZSTD_DCtx_isOverflow(zds, neededInBuffSize, neededOutBuffSize))
138380 +        zds->oversizedDuration++;
138381 +    else
138382 +        zds->oversizedDuration = 0;
138385 +static int ZSTD_DCtx_isOversizedTooLong(ZSTD_DStream* zds)
138387 +    return zds->oversizedDuration >= ZSTD_WORKSPACETOOLARGE_MAXDURATION;
138390 +/* Checks that the output buffer hasn't changed if ZSTD_obm_stable is used. */
138391 +static size_t ZSTD_checkOutBuffer(ZSTD_DStream const* zds, ZSTD_outBuffer const* output)
138393 +    ZSTD_outBuffer const expect = zds->expectedOutBuffer;
138394 +    /* No requirement when ZSTD_obm_stable is not enabled. */
138395 +    if (zds->outBufferMode != ZSTD_bm_stable)
138396 +        return 0;
138397 +    /* Any buffer is allowed in zdss_init, this must be the same for every other call until
138398 +     * the context is reset.
138399 +     */
138400 +    if (zds->streamStage == zdss_init)
138401 +        return 0;
138402 +    /* The buffer must match our expectation exactly. */
138403 +    if (expect.dst == output->dst && expect.pos == output->pos && expect.size == output->size)
138404 +        return 0;
138405 +    RETURN_ERROR(dstBuffer_wrong, "ZSTD_d_stableOutBuffer enabled but output differs!");
138408 +/* Calls ZSTD_decompressContinue() with the right parameters for ZSTD_decompressStream()
138409 + * and updates the stage and the output buffer state. This call is extracted so it can be
138410 + * used both when reading directly from the ZSTD_inBuffer, and in buffered input mode.
138411 + * NOTE: You must break after calling this function since the streamStage is modified.
138412 + */
138413 +static size_t ZSTD_decompressContinueStream(
138414 +            ZSTD_DStream* zds, char** op, char* oend,
138415 +            void const* src, size_t srcSize) {
138416 +    int const isSkipFrame = ZSTD_isSkipFrame(zds);
138417 +    if (zds->outBufferMode == ZSTD_bm_buffered) {
138418 +        size_t const dstSize = isSkipFrame ? 0 : zds->outBuffSize - zds->outStart;
138419 +        size_t const decodedSize = ZSTD_decompressContinue(zds,
138420 +                zds->outBuff + zds->outStart, dstSize, src, srcSize);
138421 +        FORWARD_IF_ERROR(decodedSize, "");
138422 +        if (!decodedSize && !isSkipFrame) {
138423 +            zds->streamStage = zdss_read;
138424 +        } else {
138425 +            zds->outEnd = zds->outStart + decodedSize;
138426 +            zds->streamStage = zdss_flush;
138427 +        }
138428 +    } else {
138429 +        /* Write directly into the output buffer */
138430 +        size_t const dstSize = isSkipFrame ? 0 : (size_t)(oend - *op);
138431 +        size_t const decodedSize = ZSTD_decompressContinue(zds, *op, dstSize, src, srcSize);
138432 +        FORWARD_IF_ERROR(decodedSize, "");
138433 +        *op += decodedSize;
138434 +        /* Flushing is not needed. */
138435 +        zds->streamStage = zdss_read;
138436 +        assert(*op <= oend);
138437 +        assert(zds->outBufferMode == ZSTD_bm_stable);
138438 +    }
138439 +    return 0;
138442 +size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
138444 +    const char* const src = (const char*)input->src;
138445 +    const char* const istart = input->pos != 0 ? src + input->pos : src;
138446 +    const char* const iend = input->size != 0 ? src + input->size : src;
138447 +    const char* ip = istart;
138448 +    char* const dst = (char*)output->dst;
138449 +    char* const ostart = output->pos != 0 ? dst + output->pos : dst;
138450 +    char* const oend = output->size != 0 ? dst + output->size : dst;
138451 +    char* op = ostart;
138452 +    U32 someMoreWork = 1;
138454 +    DEBUGLOG(5, "ZSTD_decompressStream");
138455 +    RETURN_ERROR_IF(
138456 +        input->pos > input->size,
138457 +        srcSize_wrong,
138458 +        "forbidden. in: pos: %u   vs size: %u",
138459 +        (U32)input->pos, (U32)input->size);
138460 +    RETURN_ERROR_IF(
138461 +        output->pos > output->size,
138462 +        dstSize_tooSmall,
138463 +        "forbidden. out: pos: %u   vs size: %u",
138464 +        (U32)output->pos, (U32)output->size);
138465 +    DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos));
138466 +    FORWARD_IF_ERROR(ZSTD_checkOutBuffer(zds, output), "");
138468 +    while (someMoreWork) {
138469 +        switch(zds->streamStage)
138470 +        {
138471 +        case zdss_init :
138472 +            DEBUGLOG(5, "stage zdss_init => transparent reset ");
138473 +            zds->streamStage = zdss_loadHeader;
138474 +            zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
138475 +            zds->legacyVersion = 0;
138476 +            zds->hostageByte = 0;
138477 +            zds->expectedOutBuffer = *output;
138478 +            /* fall-through */
138480 +        case zdss_loadHeader :
138481 +            DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip));
138482 +            {   size_t const hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format);
138483 +                if (zds->refMultipleDDicts && zds->ddictSet) {
138484 +                    ZSTD_DCtx_selectFrameDDict(zds);
138485 +                }
138486 +                DEBUGLOG(5, "header size : %u", (U32)hSize);
138487 +                if (ZSTD_isError(hSize)) {
138488 +                    return hSize;   /* error */
138489 +                }
138490 +                if (hSize != 0) {   /* need more input */
138491 +                    size_t const toLoad = hSize - zds->lhSize;   /* if hSize!=0, hSize > zds->lhSize */
138492 +                    size_t const remainingInput = (size_t)(iend-ip);
138493 +                    assert(iend >= ip);
138494 +                    if (toLoad > remainingInput) {   /* not enough input to load full header */
138495 +                        if (remainingInput > 0) {
138496 +                            ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput);
138497 +                            zds->lhSize += remainingInput;
138498 +                        }
138499 +                        input->pos = input->size;
138500 +                        return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize;   /* remaining header bytes + next block header */
138501 +                    }
138502 +                    assert(ip != NULL);
138503 +                    ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad;
138504 +                    break;
138505 +            }   }
138507 +            /* check for single-pass mode opportunity */
138508 +            if (zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
138509 +                && zds->fParams.frameType != ZSTD_skippableFrame
138510 +                && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) {
138511 +                size_t const cSize = ZSTD_findFrameCompressedSize(istart, (size_t)(iend-istart));
138512 +                if (cSize <= (size_t)(iend-istart)) {
138513 +                    /* shortcut : using single-pass mode */
138514 +                    size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, (size_t)(oend-op), istart, cSize, ZSTD_getDDict(zds));
138515 +                    if (ZSTD_isError(decompressedSize)) return decompressedSize;
138516 +                    DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
138517 +                    ip = istart + cSize;
138518 +                    op += decompressedSize;
138519 +                    zds->expected = 0;
138520 +                    zds->streamStage = zdss_init;
138521 +                    someMoreWork = 0;
138522 +                    break;
138523 +            }   }
138525 +            /* Check output buffer is large enough for ZSTD_odm_stable. */
138526 +            if (zds->outBufferMode == ZSTD_bm_stable
138527 +                && zds->fParams.frameType != ZSTD_skippableFrame
138528 +                && zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
138529 +                && (U64)(size_t)(oend-op) < zds->fParams.frameContentSize) {
138530 +                RETURN_ERROR(dstSize_tooSmall, "ZSTD_obm_stable passed but ZSTD_outBuffer is too small");
138531 +            }
138533 +            /* Consume header (see ZSTDds_decodeFrameHeader) */
138534 +            DEBUGLOG(4, "Consume header");
138535 +            FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)), "");
138537 +            if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {  /* skippable frame */
138538 +                zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
138539 +                zds->stage = ZSTDds_skipFrame;
138540 +            } else {
138541 +                FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize), "");
138542 +                zds->expected = ZSTD_blockHeaderSize;
138543 +                zds->stage = ZSTDds_decodeBlockHeader;
138544 +            }
138546 +            /* control buffer memory usage */
138547 +            DEBUGLOG(4, "Control max memory usage (%u KB <= max %u KB)",
138548 +                        (U32)(zds->fParams.windowSize >>10),
138549 +                        (U32)(zds->maxWindowSize >> 10) );
138550 +            zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
138551 +            RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize,
138552 +                            frameParameter_windowTooLarge, "");
138554 +            /* Adapt buffer sizes to frame header instructions */
138555 +            {   size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
138556 +                size_t const neededOutBuffSize = zds->outBufferMode == ZSTD_bm_buffered
138557 +                        ? ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize)
138558 +                        : 0;
138560 +                ZSTD_DCtx_updateOversizedDuration(zds, neededInBuffSize, neededOutBuffSize);
138562 +                {   int const tooSmall = (zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize);
138563 +                    int const tooLarge = ZSTD_DCtx_isOversizedTooLong(zds);
138565 +                    if (tooSmall || tooLarge) {
138566 +                        size_t const bufferSize = neededInBuffSize + neededOutBuffSize;
138567 +                        DEBUGLOG(4, "inBuff  : from %u to %u",
138568 +                                    (U32)zds->inBuffSize, (U32)neededInBuffSize);
138569 +                        DEBUGLOG(4, "outBuff : from %u to %u",
138570 +                                    (U32)zds->outBuffSize, (U32)neededOutBuffSize);
138571 +                        if (zds->staticSize) {  /* static DCtx */
138572 +                            DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize);
138573 +                            assert(zds->staticSize >= sizeof(ZSTD_DCtx));  /* controlled at init */
138574 +                            RETURN_ERROR_IF(
138575 +                                bufferSize > zds->staticSize - sizeof(ZSTD_DCtx),
138576 +                                memory_allocation, "");
138577 +                        } else {
138578 +                            ZSTD_customFree(zds->inBuff, zds->customMem);
138579 +                            zds->inBuffSize = 0;
138580 +                            zds->outBuffSize = 0;
138581 +                            zds->inBuff = (char*)ZSTD_customMalloc(bufferSize, zds->customMem);
138582 +                            RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation, "");
138583 +                        }
138584 +                        zds->inBuffSize = neededInBuffSize;
138585 +                        zds->outBuff = zds->inBuff + zds->inBuffSize;
138586 +                        zds->outBuffSize = neededOutBuffSize;
138587 +            }   }   }
138588 +            zds->streamStage = zdss_read;
138589 +            /* fall-through */
138591 +        case zdss_read:
138592 +            DEBUGLOG(5, "stage zdss_read");
138593 +            {   size_t const neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip));
138594 +                DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize);
138595 +                if (neededInSize==0) {  /* end of frame */
138596 +                    zds->streamStage = zdss_init;
138597 +                    someMoreWork = 0;
138598 +                    break;
138599 +                }
138600 +                if ((size_t)(iend-ip) >= neededInSize) {  /* decode directly from src */
138601 +                    FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize), "");
138602 +                    ip += neededInSize;
138603 +                    /* Function modifies the stage so we must break */
138604 +                    break;
138605 +            }   }
138606 +            if (ip==iend) { someMoreWork = 0; break; }   /* no more input */
138607 +            zds->streamStage = zdss_load;
138608 +            /* fall-through */
138610 +        case zdss_load:
138611 +            {   size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
138612 +                size_t const toLoad = neededInSize - zds->inPos;
138613 +                int const isSkipFrame = ZSTD_isSkipFrame(zds);
138614 +                size_t loadedSize;
138615 +                /* At this point we shouldn't be decompressing a block that we can stream. */
138616 +                assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip));
138617 +                if (isSkipFrame) {
138618 +                    loadedSize = MIN(toLoad, (size_t)(iend-ip));
138619 +                } else {
138620 +                    RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos,
138621 +                                    corruption_detected,
138622 +                                    "should never happen");
138623 +                    loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, (size_t)(iend-ip));
138624 +                }
138625 +                ip += loadedSize;
138626 +                zds->inPos += loadedSize;
138627 +                if (loadedSize < toLoad) { someMoreWork = 0; break; }   /* not enough input, wait for more */
138629 +                /* decode loaded input */
138630 +                zds->inPos = 0;   /* input is consumed */
138631 +                FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, zds->inBuff, neededInSize), "");
138632 +                /* Function modifies the stage so we must break */
138633 +                break;
138634 +            }
138635 +        case zdss_flush:
138636 +            {   size_t const toFlushSize = zds->outEnd - zds->outStart;
138637 +                size_t const flushedSize = ZSTD_limitCopy(op, (size_t)(oend-op), zds->outBuff + zds->outStart, toFlushSize);
138638 +                op += flushedSize;
138639 +                zds->outStart += flushedSize;
138640 +                if (flushedSize == toFlushSize) {  /* flush completed */
138641 +                    zds->streamStage = zdss_read;
138642 +                    if ( (zds->outBuffSize < zds->fParams.frameContentSize)
138643 +                      && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) {
138644 +                        DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)",
138645 +                                (int)(zds->outBuffSize - zds->outStart),
138646 +                                (U32)zds->fParams.blockSizeMax);
138647 +                        zds->outStart = zds->outEnd = 0;
138648 +                    }
138649 +                    break;
138650 +            }   }
138651 +            /* cannot complete flush */
138652 +            someMoreWork = 0;
138653 +            break;
138655 +        default:
138656 +            assert(0);    /* impossible */
138657 +            RETURN_ERROR(GENERIC, "impossible to reach");   /* some compiler require default to do something */
138658 +    }   }
138660 +    /* result */
138661 +    input->pos = (size_t)(ip - (const char*)(input->src));
138662 +    output->pos = (size_t)(op - (char*)(output->dst));
138664 +    /* Update the expected output buffer for ZSTD_obm_stable. */
138665 +    zds->expectedOutBuffer = *output;
138667 +    if ((ip==istart) && (op==ostart)) {  /* no forward progress */
138668 +        zds->noForwardProgress ++;
138669 +        if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) {
138670 +            RETURN_ERROR_IF(op==oend, dstSize_tooSmall, "");
138671 +            RETURN_ERROR_IF(ip==iend, srcSize_wrong, "");
138672 +            assert(0);
138673 +        }
138674 +    } else {
138675 +        zds->noForwardProgress = 0;
138676 +    }
138677 +    {   size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds);
138678 +        if (!nextSrcSizeHint) {   /* frame fully decoded */
138679 +            if (zds->outEnd == zds->outStart) {  /* output fully flushed */
138680 +                if (zds->hostageByte) {
138681 +                    if (input->pos >= input->size) {
138682 +                        /* can't release hostage (not present) */
138683 +                        zds->streamStage = zdss_read;
138684 +                        return 1;
138685 +                    }
138686 +                    input->pos++;  /* release hostage */
138687 +                }   /* zds->hostageByte */
138688 +                return 0;
138689 +            }  /* zds->outEnd == zds->outStart */
138690 +            if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */
138691 +                input->pos--;   /* note : pos > 0, otherwise, impossible to finish reading last block */
138692 +                zds->hostageByte=1;
138693 +            }
138694 +            return 1;
138695 +        }  /* nextSrcSizeHint==0 */
138696 +        nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds) == ZSTDnit_block);   /* preload header of next block */
138697 +        assert(zds->inPos <= nextSrcSizeHint);
138698 +        nextSrcSizeHint -= zds->inPos;   /* part already loaded*/
138699 +        return nextSrcSizeHint;
138700 +    }
138703 +size_t ZSTD_decompressStream_simpleArgs (
138704 +                            ZSTD_DCtx* dctx,
138705 +                            void* dst, size_t dstCapacity, size_t* dstPos,
138706 +                      const void* src, size_t srcSize, size_t* srcPos)
138708 +    ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
138709 +    ZSTD_inBuffer  input  = { src, srcSize, *srcPos };
138710 +    /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
138711 +    size_t const cErr = ZSTD_decompressStream(dctx, &output, &input);
138712 +    *dstPos = output.pos;
138713 +    *srcPos = input.pos;
138714 +    return cErr;
138716 diff --git a/lib/zstd/decompress/zstd_decompress_block.c b/lib/zstd/decompress/zstd_decompress_block.c
138717 new file mode 100644
138718 index 000000000000..cd6eba55a21c
138719 --- /dev/null
138720 +++ b/lib/zstd/decompress/zstd_decompress_block.c
138721 @@ -0,0 +1,1540 @@
138723 + * Copyright (c) Yann Collet, Facebook, Inc.
138724 + * All rights reserved.
138726 + * This source code is licensed under both the BSD-style license (found in the
138727 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
138728 + * in the COPYING file in the root directory of this source tree).
138729 + * You may select, at your option, one of the above-listed licenses.
138730 + */
138732 +/* zstd_decompress_block :
138733 + * this module takes care of decompressing _compressed_ block */
138735 +/*-*******************************************************
138736 +*  Dependencies
138737 +*********************************************************/
138738 +#include "../common/zstd_deps.h"   /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
138739 +#include "../common/compiler.h"    /* prefetch */
138740 +#include "../common/cpu.h"         /* bmi2 */
138741 +#include "../common/mem.h"         /* low level memory routines */
138742 +#define FSE_STATIC_LINKING_ONLY
138743 +#include "../common/fse.h"
138744 +#define HUF_STATIC_LINKING_ONLY
138745 +#include "../common/huf.h"
138746 +#include "../common/zstd_internal.h"
138747 +#include "zstd_decompress_internal.h"   /* ZSTD_DCtx */
138748 +#include "zstd_ddict.h"  /* ZSTD_DDictDictContent */
138749 +#include "zstd_decompress_block.h"
138751 +/*_*******************************************************
138752 +*  Macros
138753 +**********************************************************/
138755 +/* These two optional macros force the use one way or another of the two
138756 + * ZSTD_decompressSequences implementations. You can't force in both directions
138757 + * at the same time.
138758 + */
138759 +#if defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
138760 +    defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
138761 +#error "Cannot force the use of the short and the long ZSTD_decompressSequences variants!"
138762 +#endif
138765 +/*_*******************************************************
138766 +*  Memory operations
138767 +**********************************************************/
138768 +static void ZSTD_copy4(void* dst, const void* src) { ZSTD_memcpy(dst, src, 4); }
138771 +/*-*************************************************************
138772 + *   Block decoding
138773 + ***************************************************************/
138775 +/*! ZSTD_getcBlockSize() :
138776 + *  Provides the size of compressed block from block header `src` */
138777 +size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
138778 +                          blockProperties_t* bpPtr)
138780 +    RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong, "");
138782 +    {   U32 const cBlockHeader = MEM_readLE24(src);
138783 +        U32 const cSize = cBlockHeader >> 3;
138784 +        bpPtr->lastBlock = cBlockHeader & 1;
138785 +        bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
138786 +        bpPtr->origSize = cSize;   /* only useful for RLE */
138787 +        if (bpPtr->blockType == bt_rle) return 1;
138788 +        RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected, "");
138789 +        return cSize;
138790 +    }
138794 +/* Hidden declaration for fullbench */
138795 +size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
138796 +                          const void* src, size_t srcSize);
138797 +/*! ZSTD_decodeLiteralsBlock() :
138798 + * @return : nb of bytes read from src (< srcSize )
138799 + *  note : symbol not declared but exposed for fullbench */
138800 +size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
138801 +                          const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */
138803 +    DEBUGLOG(5, "ZSTD_decodeLiteralsBlock");
138804 +    RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, "");
138806 +    {   const BYTE* const istart = (const BYTE*) src;
138807 +        symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
138809 +        switch(litEncType)
138810 +        {
138811 +        case set_repeat:
138812 +            DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block");
138813 +            RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted, "");
138814 +            /* fall-through */
138816 +        case set_compressed:
138817 +            RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3");
138818 +            {   size_t lhSize, litSize, litCSize;
138819 +                U32 singleStream=0;
138820 +                U32 const lhlCode = (istart[0] >> 2) & 3;
138821 +                U32 const lhc = MEM_readLE32(istart);
138822 +                size_t hufSuccess;
138823 +                switch(lhlCode)
138824 +                {
138825 +                case 0: case 1: default:   /* note : default is impossible, since lhlCode into [0..3] */
138826 +                    /* 2 - 2 - 10 - 10 */
138827 +                    singleStream = !lhlCode;
138828 +                    lhSize = 3;
138829 +                    litSize  = (lhc >> 4) & 0x3FF;
138830 +                    litCSize = (lhc >> 14) & 0x3FF;
138831 +                    break;
138832 +                case 2:
138833 +                    /* 2 - 2 - 14 - 14 */
138834 +                    lhSize = 4;
138835 +                    litSize  = (lhc >> 4) & 0x3FFF;
138836 +                    litCSize = lhc >> 18;
138837 +                    break;
138838 +                case 3:
138839 +                    /* 2 - 2 - 18 - 18 */
138840 +                    lhSize = 5;
138841 +                    litSize  = (lhc >> 4) & 0x3FFFF;
138842 +                    litCSize = (lhc >> 22) + ((size_t)istart[4] << 10);
138843 +                    break;
138844 +                }
138845 +                RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
138846 +                RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, "");
138848 +                /* prefetch huffman table if cold */
138849 +                if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
138850 +                    PREFETCH_AREA(dctx->HUFptr, sizeof(dctx->entropy.hufTable));
138851 +                }
138853 +                if (litEncType==set_repeat) {
138854 +                    if (singleStream) {
138855 +                        hufSuccess = HUF_decompress1X_usingDTable_bmi2(
138856 +                            dctx->litBuffer, litSize, istart+lhSize, litCSize,
138857 +                            dctx->HUFptr, dctx->bmi2);
138858 +                    } else {
138859 +                        hufSuccess = HUF_decompress4X_usingDTable_bmi2(
138860 +                            dctx->litBuffer, litSize, istart+lhSize, litCSize,
138861 +                            dctx->HUFptr, dctx->bmi2);
138862 +                    }
138863 +                } else {
138864 +                    if (singleStream) {
138865 +#if defined(HUF_FORCE_DECOMPRESS_X2)
138866 +                        hufSuccess = HUF_decompress1X_DCtx_wksp(
138867 +                            dctx->entropy.hufTable, dctx->litBuffer, litSize,
138868 +                            istart+lhSize, litCSize, dctx->workspace,
138869 +                            sizeof(dctx->workspace));
138870 +#else
138871 +                        hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2(
138872 +                            dctx->entropy.hufTable, dctx->litBuffer, litSize,
138873 +                            istart+lhSize, litCSize, dctx->workspace,
138874 +                            sizeof(dctx->workspace), dctx->bmi2);
138875 +#endif
138876 +                    } else {
138877 +                        hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2(
138878 +                            dctx->entropy.hufTable, dctx->litBuffer, litSize,
138879 +                            istart+lhSize, litCSize, dctx->workspace,
138880 +                            sizeof(dctx->workspace), dctx->bmi2);
138881 +                    }
138882 +                }
138884 +                RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected, "");
138886 +                dctx->litPtr = dctx->litBuffer;
138887 +                dctx->litSize = litSize;
138888 +                dctx->litEntropy = 1;
138889 +                if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable;
138890 +                ZSTD_memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
138891 +                return litCSize + lhSize;
138892 +            }
138894 +        case set_basic:
138895 +            {   size_t litSize, lhSize;
138896 +                U32 const lhlCode = ((istart[0]) >> 2) & 3;
138897 +                switch(lhlCode)
138898 +                {
138899 +                case 0: case 2: default:   /* note : default is impossible, since lhlCode into [0..3] */
138900 +                    lhSize = 1;
138901 +                    litSize = istart[0] >> 3;
138902 +                    break;
138903 +                case 1:
138904 +                    lhSize = 2;
138905 +                    litSize = MEM_readLE16(istart) >> 4;
138906 +                    break;
138907 +                case 3:
138908 +                    lhSize = 3;
138909 +                    litSize = MEM_readLE24(istart) >> 4;
138910 +                    break;
138911 +                }
138913 +                if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) {  /* risk reading beyond src buffer with wildcopy */
138914 +                    RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected, "");
138915 +                    ZSTD_memcpy(dctx->litBuffer, istart+lhSize, litSize);
138916 +                    dctx->litPtr = dctx->litBuffer;
138917 +                    dctx->litSize = litSize;
138918 +                    ZSTD_memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
138919 +                    return lhSize+litSize;
138920 +                }
138921 +                /* direct reference into compressed stream */
138922 +                dctx->litPtr = istart+lhSize;
138923 +                dctx->litSize = litSize;
138924 +                return lhSize+litSize;
138925 +            }
138927 +        case set_rle:
138928 +            {   U32 const lhlCode = ((istart[0]) >> 2) & 3;
138929 +                size_t litSize, lhSize;
138930 +                switch(lhlCode)
138931 +                {
138932 +                case 0: case 2: default:   /* note : default is impossible, since lhlCode into [0..3] */
138933 +                    lhSize = 1;
138934 +                    litSize = istart[0] >> 3;
138935 +                    break;
138936 +                case 1:
138937 +                    lhSize = 2;
138938 +                    litSize = MEM_readLE16(istart) >> 4;
138939 +                    break;
138940 +                case 3:
138941 +                    lhSize = 3;
138942 +                    litSize = MEM_readLE24(istart) >> 4;
138943 +                    RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4");
138944 +                    break;
138945 +                }
138946 +                RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
138947 +                ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
138948 +                dctx->litPtr = dctx->litBuffer;
138949 +                dctx->litSize = litSize;
138950 +                return lhSize+1;
138951 +            }
138952 +        default:
138953 +            RETURN_ERROR(corruption_detected, "impossible");
138954 +        }
138955 +    }
138958 +/* Default FSE distribution tables.
138959 + * These are pre-calculated FSE decoding tables using default distributions as defined in specification :
138960 + * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#default-distributions
138961 + * They were generated programmatically with following method :
138962 + * - start from default distributions, present in /lib/common/zstd_internal.h
138963 + * - generate tables normally, using ZSTD_buildFSETable()
138964 + * - printout the content of tables
138965 + * - pretify output, report below, test with fuzzer to ensure it's correct */
138967 +/* Default FSE distribution table for Literal Lengths */
138968 +static const ZSTD_seqSymbol LL_defaultDTable[(1<<LL_DEFAULTNORMLOG)+1] = {
138969 +     {  1,  1,  1, LL_DEFAULTNORMLOG},  /* header : fastMode, tableLog */
138970 +     /* nextState, nbAddBits, nbBits, baseVal */
138971 +     {  0,  0,  4,    0},  { 16,  0,  4,    0},
138972 +     { 32,  0,  5,    1},  {  0,  0,  5,    3},
138973 +     {  0,  0,  5,    4},  {  0,  0,  5,    6},
138974 +     {  0,  0,  5,    7},  {  0,  0,  5,    9},
138975 +     {  0,  0,  5,   10},  {  0,  0,  5,   12},
138976 +     {  0,  0,  6,   14},  {  0,  1,  5,   16},
138977 +     {  0,  1,  5,   20},  {  0,  1,  5,   22},
138978 +     {  0,  2,  5,   28},  {  0,  3,  5,   32},
138979 +     {  0,  4,  5,   48},  { 32,  6,  5,   64},
138980 +     {  0,  7,  5,  128},  {  0,  8,  6,  256},
138981 +     {  0, 10,  6, 1024},  {  0, 12,  6, 4096},
138982 +     { 32,  0,  4,    0},  {  0,  0,  4,    1},
138983 +     {  0,  0,  5,    2},  { 32,  0,  5,    4},
138984 +     {  0,  0,  5,    5},  { 32,  0,  5,    7},
138985 +     {  0,  0,  5,    8},  { 32,  0,  5,   10},
138986 +     {  0,  0,  5,   11},  {  0,  0,  6,   13},
138987 +     { 32,  1,  5,   16},  {  0,  1,  5,   18},
138988 +     { 32,  1,  5,   22},  {  0,  2,  5,   24},
138989 +     { 32,  3,  5,   32},  {  0,  3,  5,   40},
138990 +     {  0,  6,  4,   64},  { 16,  6,  4,   64},
138991 +     { 32,  7,  5,  128},  {  0,  9,  6,  512},
138992 +     {  0, 11,  6, 2048},  { 48,  0,  4,    0},
138993 +     { 16,  0,  4,    1},  { 32,  0,  5,    2},
138994 +     { 32,  0,  5,    3},  { 32,  0,  5,    5},
138995 +     { 32,  0,  5,    6},  { 32,  0,  5,    8},
138996 +     { 32,  0,  5,    9},  { 32,  0,  5,   11},
138997 +     { 32,  0,  5,   12},  {  0,  0,  6,   15},
138998 +     { 32,  1,  5,   18},  { 32,  1,  5,   20},
138999 +     { 32,  2,  5,   24},  { 32,  2,  5,   28},
139000 +     { 32,  3,  5,   40},  { 32,  4,  5,   48},
139001 +     {  0, 16,  6,65536},  {  0, 15,  6,32768},
139002 +     {  0, 14,  6,16384},  {  0, 13,  6, 8192},
139003 +};   /* LL_defaultDTable */
139005 +/* Default FSE distribution table for Offset Codes */
139006 +static const ZSTD_seqSymbol OF_defaultDTable[(1<<OF_DEFAULTNORMLOG)+1] = {
139007 +    {  1,  1,  1, OF_DEFAULTNORMLOG},  /* header : fastMode, tableLog */
139008 +    /* nextState, nbAddBits, nbBits, baseVal */
139009 +    {  0,  0,  5,    0},     {  0,  6,  4,   61},
139010 +    {  0,  9,  5,  509},     {  0, 15,  5,32765},
139011 +    {  0, 21,  5,2097149},   {  0,  3,  5,    5},
139012 +    {  0,  7,  4,  125},     {  0, 12,  5, 4093},
139013 +    {  0, 18,  5,262141},    {  0, 23,  5,8388605},
139014 +    {  0,  5,  5,   29},     {  0,  8,  4,  253},
139015 +    {  0, 14,  5,16381},     {  0, 20,  5,1048573},
139016 +    {  0,  2,  5,    1},     { 16,  7,  4,  125},
139017 +    {  0, 11,  5, 2045},     {  0, 17,  5,131069},
139018 +    {  0, 22,  5,4194301},   {  0,  4,  5,   13},
139019 +    { 16,  8,  4,  253},     {  0, 13,  5, 8189},
139020 +    {  0, 19,  5,524285},    {  0,  1,  5,    1},
139021 +    { 16,  6,  4,   61},     {  0, 10,  5, 1021},
139022 +    {  0, 16,  5,65533},     {  0, 28,  5,268435453},
139023 +    {  0, 27,  5,134217725}, {  0, 26,  5,67108861},
139024 +    {  0, 25,  5,33554429},  {  0, 24,  5,16777213},
139025 +};   /* OF_defaultDTable */
139028 +/* Default FSE distribution table for Match Lengths */
139029 +static const ZSTD_seqSymbol ML_defaultDTable[(1<<ML_DEFAULTNORMLOG)+1] = {
139030 +    {  1,  1,  1, ML_DEFAULTNORMLOG},  /* header : fastMode, tableLog */
139031 +    /* nextState, nbAddBits, nbBits, baseVal */
139032 +    {  0,  0,  6,    3},  {  0,  0,  4,    4},
139033 +    { 32,  0,  5,    5},  {  0,  0,  5,    6},
139034 +    {  0,  0,  5,    8},  {  0,  0,  5,    9},
139035 +    {  0,  0,  5,   11},  {  0,  0,  6,   13},
139036 +    {  0,  0,  6,   16},  {  0,  0,  6,   19},
139037 +    {  0,  0,  6,   22},  {  0,  0,  6,   25},
139038 +    {  0,  0,  6,   28},  {  0,  0,  6,   31},
139039 +    {  0,  0,  6,   34},  {  0,  1,  6,   37},
139040 +    {  0,  1,  6,   41},  {  0,  2,  6,   47},
139041 +    {  0,  3,  6,   59},  {  0,  4,  6,   83},
139042 +    {  0,  7,  6,  131},  {  0,  9,  6,  515},
139043 +    { 16,  0,  4,    4},  {  0,  0,  4,    5},
139044 +    { 32,  0,  5,    6},  {  0,  0,  5,    7},
139045 +    { 32,  0,  5,    9},  {  0,  0,  5,   10},
139046 +    {  0,  0,  6,   12},  {  0,  0,  6,   15},
139047 +    {  0,  0,  6,   18},  {  0,  0,  6,   21},
139048 +    {  0,  0,  6,   24},  {  0,  0,  6,   27},
139049 +    {  0,  0,  6,   30},  {  0,  0,  6,   33},
139050 +    {  0,  1,  6,   35},  {  0,  1,  6,   39},
139051 +    {  0,  2,  6,   43},  {  0,  3,  6,   51},
139052 +    {  0,  4,  6,   67},  {  0,  5,  6,   99},
139053 +    {  0,  8,  6,  259},  { 32,  0,  4,    4},
139054 +    { 48,  0,  4,    4},  { 16,  0,  4,    5},
139055 +    { 32,  0,  5,    7},  { 32,  0,  5,    8},
139056 +    { 32,  0,  5,   10},  { 32,  0,  5,   11},
139057 +    {  0,  0,  6,   14},  {  0,  0,  6,   17},
139058 +    {  0,  0,  6,   20},  {  0,  0,  6,   23},
139059 +    {  0,  0,  6,   26},  {  0,  0,  6,   29},
139060 +    {  0,  0,  6,   32},  {  0, 16,  6,65539},
139061 +    {  0, 15,  6,32771},  {  0, 14,  6,16387},
139062 +    {  0, 13,  6, 8195},  {  0, 12,  6, 4099},
139063 +    {  0, 11,  6, 2051},  {  0, 10,  6, 1027},
139064 +};   /* ML_defaultDTable */
139067 +static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U32 nbAddBits)
139069 +    void* ptr = dt;
139070 +    ZSTD_seqSymbol_header* const DTableH = (ZSTD_seqSymbol_header*)ptr;
139071 +    ZSTD_seqSymbol* const cell = dt + 1;
139073 +    DTableH->tableLog = 0;
139074 +    DTableH->fastMode = 0;
139076 +    cell->nbBits = 0;
139077 +    cell->nextState = 0;
139078 +    assert(nbAddBits < 255);
139079 +    cell->nbAdditionalBits = (BYTE)nbAddBits;
139080 +    cell->baseValue = baseValue;
139084 +/* ZSTD_buildFSETable() :
139085 + * generate FSE decoding table for one symbol (ll, ml or off)
139086 + * cannot fail if input is valid =>
139087 + * all inputs are presumed validated at this stage */
139088 +FORCE_INLINE_TEMPLATE
139089 +void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt,
139090 +            const short* normalizedCounter, unsigned maxSymbolValue,
139091 +            const U32* baseValue, const U32* nbAdditionalBits,
139092 +            unsigned tableLog, void* wksp, size_t wkspSize)
139094 +    ZSTD_seqSymbol* const tableDecode = dt+1;
139095 +    U32 const maxSV1 = maxSymbolValue + 1;
139096 +    U32 const tableSize = 1 << tableLog;
139098 +    U16* symbolNext = (U16*)wksp;
139099 +    BYTE* spread = (BYTE*)(symbolNext + MaxSeq + 1);
139100 +    U32 highThreshold = tableSize - 1;
139103 +    /* Sanity Checks */
139104 +    assert(maxSymbolValue <= MaxSeq);
139105 +    assert(tableLog <= MaxFSELog);
139106 +    assert(wkspSize >= ZSTD_BUILD_FSE_TABLE_WKSP_SIZE);
139107 +    (void)wkspSize;
139108 +    /* Init, lay down lowprob symbols */
139109 +    {   ZSTD_seqSymbol_header DTableH;
139110 +        DTableH.tableLog = tableLog;
139111 +        DTableH.fastMode = 1;
139112 +        {   S16 const largeLimit= (S16)(1 << (tableLog-1));
139113 +            U32 s;
139114 +            for (s=0; s<maxSV1; s++) {
139115 +                if (normalizedCounter[s]==-1) {
139116 +                    tableDecode[highThreshold--].baseValue = s;
139117 +                    symbolNext[s] = 1;
139118 +                } else {
139119 +                    if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
139120 +                    assert(normalizedCounter[s]>=0);
139121 +                    symbolNext[s] = (U16)normalizedCounter[s];
139122 +        }   }   }
139123 +        ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
139124 +    }
139126 +    /* Spread symbols */
139127 +    assert(tableSize <= 512);
139128 +    /* Specialized symbol spreading for the case when there are
139129 +     * no low probability (-1 count) symbols. When compressing
139130 +     * small blocks we avoid low probability symbols to hit this
139131 +     * case, since header decoding speed matters more.
139132 +     */
139133 +    if (highThreshold == tableSize - 1) {
139134 +        size_t const tableMask = tableSize-1;
139135 +        size_t const step = FSE_TABLESTEP(tableSize);
139136 +        /* First lay down the symbols in order.
139137 +         * We use a uint64_t to lay down 8 bytes at a time. This reduces branch
139138 +         * misses since small blocks generally have small table logs, so nearly
139139 +         * all symbols have counts <= 8. We ensure we have 8 bytes at the end of
139140 +         * our buffer to handle the over-write.
139141 +         */
139142 +        {
139143 +            U64 const add = 0x0101010101010101ull;
139144 +            size_t pos = 0;
139145 +            U64 sv = 0;
139146 +            U32 s;
139147 +            for (s=0; s<maxSV1; ++s, sv += add) {
139148 +                int i;
139149 +                int const n = normalizedCounter[s];
139150 +                MEM_write64(spread + pos, sv);
139151 +                for (i = 8; i < n; i += 8) {
139152 +                    MEM_write64(spread + pos + i, sv);
139153 +                }
139154 +                pos += n;
139155 +            }
139156 +        }
139157 +        /* Now we spread those positions across the table.
139158 +         * The benefit of doing it in two stages is that we avoid the the
139159 +         * variable size inner loop, which caused lots of branch misses.
139160 +         * Now we can run through all the positions without any branch misses.
139161 +         * We unroll the loop twice, since that is what emperically worked best.
139162 +         */
139163 +        {
139164 +            size_t position = 0;
139165 +            size_t s;
139166 +            size_t const unroll = 2;
139167 +            assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
139168 +            for (s = 0; s < (size_t)tableSize; s += unroll) {
139169 +                size_t u;
139170 +                for (u = 0; u < unroll; ++u) {
139171 +                    size_t const uPosition = (position + (u * step)) & tableMask;
139172 +                    tableDecode[uPosition].baseValue = spread[s + u];
139173 +                }
139174 +                position = (position + (unroll * step)) & tableMask;
139175 +            }
139176 +            assert(position == 0);
139177 +        }
139178 +    } else {
139179 +        U32 const tableMask = tableSize-1;
139180 +        U32 const step = FSE_TABLESTEP(tableSize);
139181 +        U32 s, position = 0;
139182 +        for (s=0; s<maxSV1; s++) {
139183 +            int i;
139184 +            int const n = normalizedCounter[s];
139185 +            for (i=0; i<n; i++) {
139186 +                tableDecode[position].baseValue = s;
139187 +                position = (position + step) & tableMask;
139188 +                while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
139189 +        }   }
139190 +        assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
139191 +    }
139193 +    /* Build Decoding table */
139194 +    {
139195 +        U32 u;
139196 +        for (u=0; u<tableSize; u++) {
139197 +            U32 const symbol = tableDecode[u].baseValue;
139198 +            U32 const nextState = symbolNext[symbol]++;
139199 +            tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
139200 +            tableDecode[u].nextState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
139201 +            assert(nbAdditionalBits[symbol] < 255);
139202 +            tableDecode[u].nbAdditionalBits = (BYTE)nbAdditionalBits[symbol];
139203 +            tableDecode[u].baseValue = baseValue[symbol];
139204 +        }
139205 +    }
139208 +/* Avoids the FORCE_INLINE of the _body() function. */
139209 +static void ZSTD_buildFSETable_body_default(ZSTD_seqSymbol* dt,
139210 +            const short* normalizedCounter, unsigned maxSymbolValue,
139211 +            const U32* baseValue, const U32* nbAdditionalBits,
139212 +            unsigned tableLog, void* wksp, size_t wkspSize)
139214 +    ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue,
139215 +            baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
139218 +#if DYNAMIC_BMI2
139219 +TARGET_ATTRIBUTE("bmi2") static void ZSTD_buildFSETable_body_bmi2(ZSTD_seqSymbol* dt,
139220 +            const short* normalizedCounter, unsigned maxSymbolValue,
139221 +            const U32* baseValue, const U32* nbAdditionalBits,
139222 +            unsigned tableLog, void* wksp, size_t wkspSize)
139224 +    ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue,
139225 +            baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
139227 +#endif
139229 +void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
139230 +            const short* normalizedCounter, unsigned maxSymbolValue,
139231 +            const U32* baseValue, const U32* nbAdditionalBits,
139232 +            unsigned tableLog, void* wksp, size_t wkspSize, int bmi2)
139234 +#if DYNAMIC_BMI2
139235 +    if (bmi2) {
139236 +        ZSTD_buildFSETable_body_bmi2(dt, normalizedCounter, maxSymbolValue,
139237 +                baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
139238 +        return;
139239 +    }
139240 +#endif
139241 +    (void)bmi2;
139242 +    ZSTD_buildFSETable_body_default(dt, normalizedCounter, maxSymbolValue,
139243 +            baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
139247 +/*! ZSTD_buildSeqTable() :
139248 + * @return : nb bytes read from src,
139249 + *           or an error code if it fails */
139250 +static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr,
139251 +                                 symbolEncodingType_e type, unsigned max, U32 maxLog,
139252 +                                 const void* src, size_t srcSize,
139253 +                                 const U32* baseValue, const U32* nbAdditionalBits,
139254 +                                 const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable,
139255 +                                 int ddictIsCold, int nbSeq, U32* wksp, size_t wkspSize,
139256 +                                 int bmi2)
139258 +    switch(type)
139259 +    {
139260 +    case set_rle :
139261 +        RETURN_ERROR_IF(!srcSize, srcSize_wrong, "");
139262 +        RETURN_ERROR_IF((*(const BYTE*)src) > max, corruption_detected, "");
139263 +        {   U32 const symbol = *(const BYTE*)src;
139264 +            U32 const baseline = baseValue[symbol];
139265 +            U32 const nbBits = nbAdditionalBits[symbol];
139266 +            ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits);
139267 +        }
139268 +        *DTablePtr = DTableSpace;
139269 +        return 1;
139270 +    case set_basic :
139271 +        *DTablePtr = defaultTable;
139272 +        return 0;
139273 +    case set_repeat:
139274 +        RETURN_ERROR_IF(!flagRepeatTable, corruption_detected, "");
139275 +        /* prefetch FSE table if used */
139276 +        if (ddictIsCold && (nbSeq > 24 /* heuristic */)) {
139277 +            const void* const pStart = *DTablePtr;
139278 +            size_t const pSize = sizeof(ZSTD_seqSymbol) * (SEQSYMBOL_TABLE_SIZE(maxLog));
139279 +            PREFETCH_AREA(pStart, pSize);
139280 +        }
139281 +        return 0;
139282 +    case set_compressed :
139283 +        {   unsigned tableLog;
139284 +            S16 norm[MaxSeq+1];
139285 +            size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
139286 +            RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected, "");
139287 +            RETURN_ERROR_IF(tableLog > maxLog, corruption_detected, "");
139288 +            ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog, wksp, wkspSize, bmi2);
139289 +            *DTablePtr = DTableSpace;
139290 +            return headerSize;
139291 +        }
139292 +    default :
139293 +        assert(0);
139294 +        RETURN_ERROR(GENERIC, "impossible");
139295 +    }
139298 +size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
139299 +                             const void* src, size_t srcSize)
139301 +    const BYTE* const istart = (const BYTE*)src;
139302 +    const BYTE* const iend = istart + srcSize;
139303 +    const BYTE* ip = istart;
139304 +    int nbSeq;
139305 +    DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
139307 +    /* check */
139308 +    RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong, "");
139310 +    /* SeqHead */
139311 +    nbSeq = *ip++;
139312 +    if (!nbSeq) {
139313 +        *nbSeqPtr=0;
139314 +        RETURN_ERROR_IF(srcSize != 1, srcSize_wrong, "");
139315 +        return 1;
139316 +    }
139317 +    if (nbSeq > 0x7F) {
139318 +        if (nbSeq == 0xFF) {
139319 +            RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, "");
139320 +            nbSeq = MEM_readLE16(ip) + LONGNBSEQ;
139321 +            ip+=2;
139322 +        } else {
139323 +            RETURN_ERROR_IF(ip >= iend, srcSize_wrong, "");
139324 +            nbSeq = ((nbSeq-0x80)<<8) + *ip++;
139325 +        }
139326 +    }
139327 +    *nbSeqPtr = nbSeq;
139329 +    /* FSE table descriptors */
139330 +    RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encoding types */
139331 +    {   symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
139332 +        symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
139333 +        symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
139334 +        ip++;
139336 +        /* Build DTables */
139337 +        {   size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,
139338 +                                                      LLtype, MaxLL, LLFSELog,
139339 +                                                      ip, iend-ip,
139340 +                                                      LL_base, LL_bits,
139341 +                                                      LL_defaultDTable, dctx->fseEntropy,
139342 +                                                      dctx->ddictIsCold, nbSeq,
139343 +                                                      dctx->workspace, sizeof(dctx->workspace),
139344 +                                                      dctx->bmi2);
139345 +            RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected, "ZSTD_buildSeqTable failed");
139346 +            ip += llhSize;
139347 +        }
139349 +        {   size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,
139350 +                                                      OFtype, MaxOff, OffFSELog,
139351 +                                                      ip, iend-ip,
139352 +                                                      OF_base, OF_bits,
139353 +                                                      OF_defaultDTable, dctx->fseEntropy,
139354 +                                                      dctx->ddictIsCold, nbSeq,
139355 +                                                      dctx->workspace, sizeof(dctx->workspace),
139356 +                                                      dctx->bmi2);
139357 +            RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected, "ZSTD_buildSeqTable failed");
139358 +            ip += ofhSize;
139359 +        }
139361 +        {   size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,
139362 +                                                      MLtype, MaxML, MLFSELog,
139363 +                                                      ip, iend-ip,
139364 +                                                      ML_base, ML_bits,
139365 +                                                      ML_defaultDTable, dctx->fseEntropy,
139366 +                                                      dctx->ddictIsCold, nbSeq,
139367 +                                                      dctx->workspace, sizeof(dctx->workspace),
139368 +                                                      dctx->bmi2);
139369 +            RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected, "ZSTD_buildSeqTable failed");
139370 +            ip += mlhSize;
139371 +        }
139372 +    }
139374 +    return ip-istart;
139378 +typedef struct {
139379 +    size_t litLength;
139380 +    size_t matchLength;
139381 +    size_t offset;
139382 +    const BYTE* match;
139383 +} seq_t;
139385 +typedef struct {
139386 +    size_t state;
139387 +    const ZSTD_seqSymbol* table;
139388 +} ZSTD_fseState;
139390 +typedef struct {
139391 +    BIT_DStream_t DStream;
139392 +    ZSTD_fseState stateLL;
139393 +    ZSTD_fseState stateOffb;
139394 +    ZSTD_fseState stateML;
139395 +    size_t prevOffset[ZSTD_REP_NUM];
139396 +    const BYTE* prefixStart;
139397 +    const BYTE* dictEnd;
139398 +    size_t pos;
139399 +} seqState_t;
139401 +/*! ZSTD_overlapCopy8() :
139402 + *  Copies 8 bytes from ip to op and updates op and ip where ip <= op.
139403 + *  If the offset is < 8 then the offset is spread to at least 8 bytes.
139405 + *  Precondition: *ip <= *op
139406 + *  Postcondition: *op - *op >= 8
139407 + */
139408 +HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) {
139409 +    assert(*ip <= *op);
139410 +    if (offset < 8) {
139411 +        /* close range match, overlap */
139412 +        static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
139413 +        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
139414 +        int const sub2 = dec64table[offset];
139415 +        (*op)[0] = (*ip)[0];
139416 +        (*op)[1] = (*ip)[1];
139417 +        (*op)[2] = (*ip)[2];
139418 +        (*op)[3] = (*ip)[3];
139419 +        *ip += dec32table[offset];
139420 +        ZSTD_copy4(*op+4, *ip);
139421 +        *ip -= sub2;
139422 +    } else {
139423 +        ZSTD_copy8(*op, *ip);
139424 +    }
139425 +    *ip += 8;
139426 +    *op += 8;
139427 +    assert(*op - *ip >= 8);
139430 +/*! ZSTD_safecopy() :
139431 + *  Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer
139432 + *  and write up to 16 bytes past oend_w (op >= oend_w is allowed).
139433 + *  This function is only called in the uncommon case where the sequence is near the end of the block. It
139434 + *  should be fast for a single long sequence, but can be slow for several short sequences.
139436 + *  @param ovtype controls the overlap detection
139437 + *         - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
139438 + *         - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart.
139439 + *           The src buffer must be before the dst buffer.
139440 + */
139441 +static void ZSTD_safecopy(BYTE* op, BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) {
139442 +    ptrdiff_t const diff = op - ip;
139443 +    BYTE* const oend = op + length;
139445 +    assert((ovtype == ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w)) ||
139446 +           (ovtype == ZSTD_overlap_src_before_dst && diff >= 0));
139448 +    if (length < 8) {
139449 +        /* Handle short lengths. */
139450 +        while (op < oend) *op++ = *ip++;
139451 +        return;
139452 +    }
139453 +    if (ovtype == ZSTD_overlap_src_before_dst) {
139454 +        /* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */
139455 +        assert(length >= 8);
139456 +        ZSTD_overlapCopy8(&op, &ip, diff);
139457 +        assert(op - ip >= 8);
139458 +        assert(op <= oend);
139459 +    }
139461 +    if (oend <= oend_w) {
139462 +        /* No risk of overwrite. */
139463 +        ZSTD_wildcopy(op, ip, length, ovtype);
139464 +        return;
139465 +    }
139466 +    if (op <= oend_w) {
139467 +        /* Wildcopy until we get close to the end. */
139468 +        assert(oend > oend_w);
139469 +        ZSTD_wildcopy(op, ip, oend_w - op, ovtype);
139470 +        ip += oend_w - op;
139471 +        op = oend_w;
139472 +    }
139473 +    /* Handle the leftovers. */
139474 +    while (op < oend) *op++ = *ip++;
139477 +/* ZSTD_execSequenceEnd():
139478 + * This version handles cases that are near the end of the output buffer. It requires
139479 + * more careful checks to make sure there is no overflow. By separating out these hard
139480 + * and unlikely cases, we can speed up the common cases.
139482 + * NOTE: This function needs to be fast for a single long sequence, but doesn't need
139483 + * to be optimized for many small sequences, since those fall into ZSTD_execSequence().
139484 + */
139485 +FORCE_NOINLINE
139486 +size_t ZSTD_execSequenceEnd(BYTE* op,
139487 +                            BYTE* const oend, seq_t sequence,
139488 +                            const BYTE** litPtr, const BYTE* const litLimit,
139489 +                            const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
139491 +    BYTE* const oLitEnd = op + sequence.litLength;
139492 +    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
139493 +    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
139494 +    const BYTE* match = oLitEnd - sequence.offset;
139495 +    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
139497 +    /* bounds checks : careful of address space overflow in 32-bit mode */
139498 +    RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer");
139499 +    RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer");
139500 +    assert(op < op + sequenceLength);
139501 +    assert(oLitEnd < op + sequenceLength);
139503 +    /* copy literals */
139504 +    ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap);
139505 +    op = oLitEnd;
139506 +    *litPtr = iLitEnd;
139508 +    /* copy Match */
139509 +    if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
139510 +        /* offset beyond prefix */
139511 +        RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, "");
139512 +        match = dictEnd - (prefixStart-match);
139513 +        if (match + sequence.matchLength <= dictEnd) {
139514 +            ZSTD_memmove(oLitEnd, match, sequence.matchLength);
139515 +            return sequenceLength;
139516 +        }
139517 +        /* span extDict & currentPrefixSegment */
139518 +        {   size_t const length1 = dictEnd - match;
139519 +            ZSTD_memmove(oLitEnd, match, length1);
139520 +            op = oLitEnd + length1;
139521 +            sequence.matchLength -= length1;
139522 +            match = prefixStart;
139523 +    }   }
139524 +    ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);
139525 +    return sequenceLength;
139528 +HINT_INLINE
139529 +size_t ZSTD_execSequence(BYTE* op,
139530 +                         BYTE* const oend, seq_t sequence,
139531 +                         const BYTE** litPtr, const BYTE* const litLimit,
139532 +                         const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
139534 +    BYTE* const oLitEnd = op + sequence.litLength;
139535 +    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
139536 +    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
139537 +    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;   /* risk : address space underflow on oend=NULL */
139538 +    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
139539 +    const BYTE* match = oLitEnd - sequence.offset;
139541 +    assert(op != NULL /* Precondition */);
139542 +    assert(oend_w < oend /* No underflow */);
139543 +    /* Handle edge cases in a slow path:
139544 +     *   - Read beyond end of literals
139545 +     *   - Match end is within WILDCOPY_OVERLIMIT of oend
139546 +     *   - 32-bit mode and the match length overflows
139547 +     */
139548 +    if (UNLIKELY(
139549 +            iLitEnd > litLimit ||
139550 +            oMatchEnd > oend_w ||
139551 +            (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH)))
139552 +        return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
139554 +    /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */
139555 +    assert(op <= oLitEnd /* No overflow */);
139556 +    assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */);
139557 +    assert(oMatchEnd <= oend /* No underflow */);
139558 +    assert(iLitEnd <= litLimit /* Literal length is in bounds */);
139559 +    assert(oLitEnd <= oend_w /* Can wildcopy literals */);
139560 +    assert(oMatchEnd <= oend_w /* Can wildcopy matches */);
139562 +    /* Copy Literals:
139563 +     * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9.
139564 +     * We likely don't need the full 32-byte wildcopy.
139565 +     */
139566 +    assert(WILDCOPY_OVERLENGTH >= 16);
139567 +    ZSTD_copy16(op, (*litPtr));
139568 +    if (UNLIKELY(sequence.litLength > 16)) {
139569 +        ZSTD_wildcopy(op+16, (*litPtr)+16, sequence.litLength-16, ZSTD_no_overlap);
139570 +    }
139571 +    op = oLitEnd;
139572 +    *litPtr = iLitEnd;   /* update for next sequence */
139574 +    /* Copy Match */
139575 +    if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
139576 +        /* offset beyond prefix -> go into extDict */
139577 +        RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, "");
139578 +        match = dictEnd + (match - prefixStart);
139579 +        if (match + sequence.matchLength <= dictEnd) {
139580 +            ZSTD_memmove(oLitEnd, match, sequence.matchLength);
139581 +            return sequenceLength;
139582 +        }
139583 +        /* span extDict & currentPrefixSegment */
139584 +        {   size_t const length1 = dictEnd - match;
139585 +            ZSTD_memmove(oLitEnd, match, length1);
139586 +            op = oLitEnd + length1;
139587 +            sequence.matchLength -= length1;
139588 +            match = prefixStart;
139589 +    }   }
139590 +    /* Match within prefix of 1 or more bytes */
139591 +    assert(op <= oMatchEnd);
139592 +    assert(oMatchEnd <= oend_w);
139593 +    assert(match >= prefixStart);
139594 +    assert(sequence.matchLength >= 1);
139596 +    /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy
139597 +     * without overlap checking.
139598 +     */
139599 +    if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) {
139600 +        /* We bet on a full wildcopy for matches, since we expect matches to be
139601 +         * longer than literals (in general). In silesia, ~10% of matches are longer
139602 +         * than 16 bytes.
139603 +         */
139604 +        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap);
139605 +        return sequenceLength;
139606 +    }
139607 +    assert(sequence.offset < WILDCOPY_VECLEN);
139609 +    /* Copy 8 bytes and spread the offset to be >= 8. */
139610 +    ZSTD_overlapCopy8(&op, &match, sequence.offset);
139612 +    /* If the match length is > 8 bytes, then continue with the wildcopy. */
139613 +    if (sequence.matchLength > 8) {
139614 +        assert(op < oMatchEnd);
139615 +        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);
139616 +    }
139617 +    return sequenceLength;
139620 +static void
139621 +ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt)
139623 +    const void* ptr = dt;
139624 +    const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr;
139625 +    DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
139626 +    DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits",
139627 +                (U32)DStatePtr->state, DTableH->tableLog);
139628 +    BIT_reloadDStream(bitD);
139629 +    DStatePtr->table = dt + 1;
139632 +FORCE_INLINE_TEMPLATE void
139633 +ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD)
139635 +    ZSTD_seqSymbol const DInfo = DStatePtr->table[DStatePtr->state];
139636 +    U32 const nbBits = DInfo.nbBits;
139637 +    size_t const lowBits = BIT_readBits(bitD, nbBits);
139638 +    DStatePtr->state = DInfo.nextState + lowBits;
139641 +FORCE_INLINE_TEMPLATE void
139642 +ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, ZSTD_seqSymbol const DInfo)
139644 +    U32 const nbBits = DInfo.nbBits;
139645 +    size_t const lowBits = BIT_readBits(bitD, nbBits);
139646 +    DStatePtr->state = DInfo.nextState + lowBits;
139649 +/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
139650 + * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
139651 + * bits before reloading. This value is the maximum number of bytes we read
139652 + * after reloading when we are decoding long offsets.
139653 + */
139654 +#define LONG_OFFSETS_MAX_EXTRA_BITS_32                       \
139655 +    (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32       \
139656 +        ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32  \
139657 +        : 0)
139659 +typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e;
139660 +typedef enum { ZSTD_p_noPrefetch=0, ZSTD_p_prefetch=1 } ZSTD_prefetch_e;
139662 +FORCE_INLINE_TEMPLATE seq_t
139663 +ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets, const ZSTD_prefetch_e prefetch)
139665 +    seq_t seq;
139666 +    ZSTD_seqSymbol const llDInfo = seqState->stateLL.table[seqState->stateLL.state];
139667 +    ZSTD_seqSymbol const mlDInfo = seqState->stateML.table[seqState->stateML.state];
139668 +    ZSTD_seqSymbol const ofDInfo = seqState->stateOffb.table[seqState->stateOffb.state];
139669 +    U32 const llBase = llDInfo.baseValue;
139670 +    U32 const mlBase = mlDInfo.baseValue;
139671 +    U32 const ofBase = ofDInfo.baseValue;
139672 +    BYTE const llBits = llDInfo.nbAdditionalBits;
139673 +    BYTE const mlBits = mlDInfo.nbAdditionalBits;
139674 +    BYTE const ofBits = ofDInfo.nbAdditionalBits;
139675 +    BYTE const totalBits = llBits+mlBits+ofBits;
139677 +    /* sequence */
139678 +    {   size_t offset;
139679 +        if (ofBits > 1) {
139680 +            ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
139681 +            ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
139682 +            assert(ofBits <= MaxOff);
139683 +            if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) {
139684 +                U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed);
139685 +                offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
139686 +                BIT_reloadDStream(&seqState->DStream);
139687 +                if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
139688 +                assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32);   /* to avoid another reload */
139689 +            } else {
139690 +                offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/);   /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */
139691 +                if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
139692 +            }
139693 +            seqState->prevOffset[2] = seqState->prevOffset[1];
139694 +            seqState->prevOffset[1] = seqState->prevOffset[0];
139695 +            seqState->prevOffset[0] = offset;
139696 +        } else {
139697 +            U32 const ll0 = (llBase == 0);
139698 +            if (LIKELY((ofBits == 0))) {
139699 +                if (LIKELY(!ll0))
139700 +                    offset = seqState->prevOffset[0];
139701 +                else {
139702 +                    offset = seqState->prevOffset[1];
139703 +                    seqState->prevOffset[1] = seqState->prevOffset[0];
139704 +                    seqState->prevOffset[0] = offset;
139705 +                }
139706 +            } else {
139707 +                offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1);
139708 +                {   size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
139709 +                    temp += !temp;   /* 0 is not valid; input is corrupted; force offset to 1 */
139710 +                    if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
139711 +                    seqState->prevOffset[1] = seqState->prevOffset[0];
139712 +                    seqState->prevOffset[0] = offset = temp;
139713 +        }   }   }
139714 +        seq.offset = offset;
139715 +    }
139717 +    seq.matchLength = mlBase;
139718 +    if (mlBits > 0)
139719 +        seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/);
139721 +    if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
139722 +        BIT_reloadDStream(&seqState->DStream);
139723 +    if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
139724 +        BIT_reloadDStream(&seqState->DStream);
139725 +    /* Ensure there are enough bits to read the rest of data in 64-bit mode. */
139726 +    ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
139728 +    seq.litLength = llBase;
139729 +    if (llBits > 0)
139730 +        seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/);
139732 +    if (MEM_32bits())
139733 +        BIT_reloadDStream(&seqState->DStream);
139735 +    DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
139736 +                (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
139738 +    if (prefetch == ZSTD_p_prefetch) {
139739 +        size_t const pos = seqState->pos + seq.litLength;
139740 +        const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart;
139741 +        seq.match = matchBase + pos - seq.offset;  /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
139742 +                                                    * No consequence though : no memory access will occur, offset is only used for prefetching */
139743 +        seqState->pos = pos + seq.matchLength;
139744 +    }
139746 +    /* ANS state update
139747 +     * gcc-9.0.0 does 2.5% worse with ZSTD_updateFseStateWithDInfo().
139748 +     * clang-9.2.0 does 7% worse with ZSTD_updateFseState().
139749 +     * Naturally it seems like ZSTD_updateFseStateWithDInfo() should be the
139750 +     * better option, so it is the default for other compilers. But, if you
139751 +     * measure that it is worse, please put up a pull request.
139752 +     */
139753 +    {
139754 +#if !defined(__clang__)
139755 +        const int kUseUpdateFseState = 1;
139756 +#else
139757 +        const int kUseUpdateFseState = 0;
139758 +#endif
139759 +        if (kUseUpdateFseState) {
139760 +            ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream);    /* <=  9 bits */
139761 +            ZSTD_updateFseState(&seqState->stateML, &seqState->DStream);    /* <=  9 bits */
139762 +            if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);    /* <= 18 bits */
139763 +            ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream);  /* <=  8 bits */
139764 +        } else {
139765 +            ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llDInfo);    /* <=  9 bits */
139766 +            ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlDInfo);    /* <=  9 bits */
139767 +            if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);    /* <= 18 bits */
139768 +            ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofDInfo);  /* <=  8 bits */
139769 +        }
139770 +    }
139772 +    return seq;
139775 +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
139776 +MEM_STATIC int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd)
139778 +    size_t const windowSize = dctx->fParams.windowSize;
139779 +    /* No dictionary used. */
139780 +    if (dctx->dictContentEndForFuzzing == NULL) return 0;
139781 +    /* Dictionary is our prefix. */
139782 +    if (prefixStart == dctx->dictContentBeginForFuzzing) return 1;
139783 +    /* Dictionary is not our ext-dict. */
139784 +    if (dctx->dictEnd != dctx->dictContentEndForFuzzing) return 0;
139785 +    /* Dictionary is not within our window size. */
139786 +    if ((size_t)(oLitEnd - prefixStart) >= windowSize) return 0;
139787 +    /* Dictionary is active. */
139788 +    return 1;
139791 +MEM_STATIC void ZSTD_assertValidSequence(
139792 +        ZSTD_DCtx const* dctx,
139793 +        BYTE const* op, BYTE const* oend,
139794 +        seq_t const seq,
139795 +        BYTE const* prefixStart, BYTE const* virtualStart)
139797 +#if DEBUGLEVEL >= 1
139798 +    size_t const windowSize = dctx->fParams.windowSize;
139799 +    size_t const sequenceSize = seq.litLength + seq.matchLength;
139800 +    BYTE const* const oLitEnd = op + seq.litLength;
139801 +    DEBUGLOG(6, "Checking sequence: litL=%u matchL=%u offset=%u",
139802 +            (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
139803 +    assert(op <= oend);
139804 +    assert((size_t)(oend - op) >= sequenceSize);
139805 +    assert(sequenceSize <= ZSTD_BLOCKSIZE_MAX);
139806 +    if (ZSTD_dictionaryIsActive(dctx, prefixStart, oLitEnd)) {
139807 +        size_t const dictSize = (size_t)((char const*)dctx->dictContentEndForFuzzing - (char const*)dctx->dictContentBeginForFuzzing);
139808 +        /* Offset must be within the dictionary. */
139809 +        assert(seq.offset <= (size_t)(oLitEnd - virtualStart));
139810 +        assert(seq.offset <= windowSize + dictSize);
139811 +    } else {
139812 +        /* Offset must be within our window. */
139813 +        assert(seq.offset <= windowSize);
139814 +    }
139815 +#else
139816 +    (void)dctx, (void)op, (void)oend, (void)seq, (void)prefixStart, (void)virtualStart;
139817 +#endif
139819 +#endif
139821 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
139822 +FORCE_INLINE_TEMPLATE size_t
139823 +DONT_VECTORIZE
139824 +ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
139825 +                               void* dst, size_t maxDstSize,
139826 +                         const void* seqStart, size_t seqSize, int nbSeq,
139827 +                         const ZSTD_longOffset_e isLongOffset,
139828 +                         const int frame)
139830 +    const BYTE* ip = (const BYTE*)seqStart;
139831 +    const BYTE* const iend = ip + seqSize;
139832 +    BYTE* const ostart = (BYTE*)dst;
139833 +    BYTE* const oend = ostart + maxDstSize;
139834 +    BYTE* op = ostart;
139835 +    const BYTE* litPtr = dctx->litPtr;
139836 +    const BYTE* const litEnd = litPtr + dctx->litSize;
139837 +    const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
139838 +    const BYTE* const vBase = (const BYTE*) (dctx->virtualStart);
139839 +    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
139840 +    DEBUGLOG(5, "ZSTD_decompressSequences_body");
139841 +    (void)frame;
139843 +    /* Regen sequences */
139844 +    if (nbSeq) {
139845 +        seqState_t seqState;
139846 +        size_t error = 0;
139847 +        dctx->fseEntropy = 1;
139848 +        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
139849 +        RETURN_ERROR_IF(
139850 +            ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
139851 +            corruption_detected, "");
139852 +        ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
139853 +        ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
139854 +        ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
139855 +        assert(dst != NULL);
139857 +        ZSTD_STATIC_ASSERT(
139858 +                BIT_DStream_unfinished < BIT_DStream_completed &&
139859 +                BIT_DStream_endOfBuffer < BIT_DStream_completed &&
139860 +                BIT_DStream_completed < BIT_DStream_overflow);
139862 +#if defined(__x86_64__)
139863 +        /* Align the decompression loop to 32 + 16 bytes.
139864 +         *
139865 +         * zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression
139866 +         * speed swings based on the alignment of the decompression loop. This
139867 +         * performance swing is caused by parts of the decompression loop falling
139868 +         * out of the DSB. The entire decompression loop should fit in the DSB,
139869 +         * when it can't we get much worse performance. You can measure if you've
139870 +         * hit the good case or the bad case with this perf command for some
139871 +         * compressed file test.zst:
139872 +         *
139873 +         *   perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \
139874 +         *             -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst
139875 +         *
139876 +         * If you see most cycles served out of the MITE you've hit the bad case.
139877 +         * If you see most cycles served out of the DSB you've hit the good case.
139878 +         * If it is pretty even then you may be in an okay case.
139879 +         *
139880 +         * I've been able to reproduce this issue on the following CPUs:
139881 +         *   - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9
139882 +         *               Use Instruments->Counters to get DSB/MITE cycles.
139883 +         *               I never got performance swings, but I was able to
139884 +         *               go from the good case of mostly DSB to half of the
139885 +         *               cycles served from MITE.
139886 +         *   - Coffeelake: Intel i9-9900k
139887 +         *
139888 +         * I haven't been able to reproduce the instability or DSB misses on any
139889 +         * of the following CPUS:
139890 +         *   - Haswell
139891 +         *   - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH
139892 +         *   - Skylake
139893 +         *
139894 +         * If you are seeing performance stability this script can help test.
139895 +         * It tests on 4 commits in zstd where I saw performance change.
139896 +         *
139897 +         *   https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4
139898 +         */
139899 +        __asm__(".p2align 5");
139900 +        __asm__("nop");
139901 +        __asm__(".p2align 4");
139902 +#endif
139903 +        for ( ; ; ) {
139904 +            seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_noPrefetch);
139905 +            size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd);
139906 +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
139907 +            assert(!ZSTD_isError(oneSeqSize));
139908 +            if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
139909 +#endif
139910 +            DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
139911 +            BIT_reloadDStream(&(seqState.DStream));
139912 +            op += oneSeqSize;
139913 +            /* gcc and clang both don't like early returns in this loop.
139914 +             * Instead break and check for an error at the end of the loop.
139915 +             */
139916 +            if (UNLIKELY(ZSTD_isError(oneSeqSize))) {
139917 +                error = oneSeqSize;
139918 +                break;
139919 +            }
139920 +            if (UNLIKELY(!--nbSeq)) break;
139921 +        }
139923 +        /* check if reached exact end */
139924 +        DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
139925 +        if (ZSTD_isError(error)) return error;
139926 +        RETURN_ERROR_IF(nbSeq, corruption_detected, "");
139927 +        RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, "");
139928 +        /* save reps for next block */
139929 +        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
139930 +    }
139932 +    /* last literal segment */
139933 +    {   size_t const lastLLSize = litEnd - litPtr;
139934 +        RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
139935 +        if (op != NULL) {
139936 +            ZSTD_memcpy(op, litPtr, lastLLSize);
139937 +            op += lastLLSize;
139938 +        }
139939 +    }
139941 +    return op-ostart;
139944 +static size_t
139945 +ZSTD_decompressSequences_default(ZSTD_DCtx* dctx,
139946 +                                 void* dst, size_t maxDstSize,
139947 +                           const void* seqStart, size_t seqSize, int nbSeq,
139948 +                           const ZSTD_longOffset_e isLongOffset,
139949 +                           const int frame)
139951 +    return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
139953 +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
139955 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
139956 +FORCE_INLINE_TEMPLATE size_t
139957 +ZSTD_decompressSequencesLong_body(
139958 +                               ZSTD_DCtx* dctx,
139959 +                               void* dst, size_t maxDstSize,
139960 +                         const void* seqStart, size_t seqSize, int nbSeq,
139961 +                         const ZSTD_longOffset_e isLongOffset,
139962 +                         const int frame)
139964 +    const BYTE* ip = (const BYTE*)seqStart;
139965 +    const BYTE* const iend = ip + seqSize;
139966 +    BYTE* const ostart = (BYTE*)dst;
139967 +    BYTE* const oend = ostart + maxDstSize;
139968 +    BYTE* op = ostart;
139969 +    const BYTE* litPtr = dctx->litPtr;
139970 +    const BYTE* const litEnd = litPtr + dctx->litSize;
139971 +    const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
139972 +    const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart);
139973 +    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
139974 +    (void)frame;
139976 +    /* Regen sequences */
139977 +    if (nbSeq) {
139978 +#define STORED_SEQS 4
139979 +#define STORED_SEQS_MASK (STORED_SEQS-1)
139980 +#define ADVANCED_SEQS 4
139981 +        seq_t sequences[STORED_SEQS];
139982 +        int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
139983 +        seqState_t seqState;
139984 +        int seqNb;
139985 +        dctx->fseEntropy = 1;
139986 +        { int i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
139987 +        seqState.prefixStart = prefixStart;
139988 +        seqState.pos = (size_t)(op-prefixStart);
139989 +        seqState.dictEnd = dictEnd;
139990 +        assert(dst != NULL);
139991 +        assert(iend >= ip);
139992 +        RETURN_ERROR_IF(
139993 +            ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
139994 +            corruption_detected, "");
139995 +        ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
139996 +        ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
139997 +        ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
139999 +        /* prepare in advance */
140000 +        for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNb<seqAdvance); seqNb++) {
140001 +            sequences[seqNb] = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_prefetch);
140002 +            PREFETCH_L1(sequences[seqNb].match); PREFETCH_L1(sequences[seqNb].match + sequences[seqNb].matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
140003 +        }
140004 +        RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected, "");
140006 +        /* decode and decompress */
140007 +        for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
140008 +            seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_prefetch);
140009 +            size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
140010 +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
140011 +            assert(!ZSTD_isError(oneSeqSize));
140012 +            if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
140013 +#endif
140014 +            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
140015 +            PREFETCH_L1(sequence.match); PREFETCH_L1(sequence.match + sequence.matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
140016 +            sequences[seqNb & STORED_SEQS_MASK] = sequence;
140017 +            op += oneSeqSize;
140018 +        }
140019 +        RETURN_ERROR_IF(seqNb<nbSeq, corruption_detected, "");
140021 +        /* finish queue */
140022 +        seqNb -= seqAdvance;
140023 +        for ( ; seqNb<nbSeq ; seqNb++) {
140024 +            size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[seqNb&STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
140025 +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
140026 +            assert(!ZSTD_isError(oneSeqSize));
140027 +            if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
140028 +#endif
140029 +            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
140030 +            op += oneSeqSize;
140031 +        }
140033 +        /* save reps for next block */
140034 +        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
140035 +    }
140037 +    /* last literal segment */
140038 +    {   size_t const lastLLSize = litEnd - litPtr;
140039 +        RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
140040 +        if (op != NULL) {
140041 +            ZSTD_memcpy(op, litPtr, lastLLSize);
140042 +            op += lastLLSize;
140043 +        }
140044 +    }
140046 +    return op-ostart;
140049 +static size_t
140050 +ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx,
140051 +                                 void* dst, size_t maxDstSize,
140052 +                           const void* seqStart, size_t seqSize, int nbSeq,
140053 +                           const ZSTD_longOffset_e isLongOffset,
140054 +                           const int frame)
140056 +    return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
140058 +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
140062 +#if DYNAMIC_BMI2
140064 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
140065 +static TARGET_ATTRIBUTE("bmi2") size_t
140066 +DONT_VECTORIZE
140067 +ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx,
140068 +                                 void* dst, size_t maxDstSize,
140069 +                           const void* seqStart, size_t seqSize, int nbSeq,
140070 +                           const ZSTD_longOffset_e isLongOffset,
140071 +                           const int frame)
140073 +    return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
140075 +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
140077 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
140078 +static TARGET_ATTRIBUTE("bmi2") size_t
140079 +ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx,
140080 +                                 void* dst, size_t maxDstSize,
140081 +                           const void* seqStart, size_t seqSize, int nbSeq,
140082 +                           const ZSTD_longOffset_e isLongOffset,
140083 +                           const int frame)
140085 +    return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
140087 +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
140089 +#endif /* DYNAMIC_BMI2 */
140091 +typedef size_t (*ZSTD_decompressSequences_t)(
140092 +                            ZSTD_DCtx* dctx,
140093 +                            void* dst, size_t maxDstSize,
140094 +                            const void* seqStart, size_t seqSize, int nbSeq,
140095 +                            const ZSTD_longOffset_e isLongOffset,
140096 +                            const int frame);
140098 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
140099 +static size_t
140100 +ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
140101 +                   const void* seqStart, size_t seqSize, int nbSeq,
140102 +                   const ZSTD_longOffset_e isLongOffset,
140103 +                   const int frame)
140105 +    DEBUGLOG(5, "ZSTD_decompressSequences");
140106 +#if DYNAMIC_BMI2
140107 +    if (dctx->bmi2) {
140108 +        return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
140109 +    }
140110 +#endif
140111 +  return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
140113 +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
140116 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
140117 +/* ZSTD_decompressSequencesLong() :
140118 + * decompression function triggered when a minimum share of offsets is considered "long",
140119 + * aka out of cache.
140120 + * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance".
140121 + * This function will try to mitigate main memory latency through the use of prefetching */
140122 +static size_t
140123 +ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
140124 +                             void* dst, size_t maxDstSize,
140125 +                             const void* seqStart, size_t seqSize, int nbSeq,
140126 +                             const ZSTD_longOffset_e isLongOffset,
140127 +                             const int frame)
140129 +    DEBUGLOG(5, "ZSTD_decompressSequencesLong");
140130 +#if DYNAMIC_BMI2
140131 +    if (dctx->bmi2) {
140132 +        return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
140133 +    }
140134 +#endif
140135 +  return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
140137 +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
140141 +#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
140142 +    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
140143 +/* ZSTD_getLongOffsetsShare() :
140144 + * condition : offTable must be valid
140145 + * @return : "share" of long offsets (arbitrarily defined as > (1<<23))
140146 + *           compared to maximum possible of (1<<OffFSELog) */
140147 +static unsigned
140148 +ZSTD_getLongOffsetsShare(const ZSTD_seqSymbol* offTable)
140150 +    const void* ptr = offTable;
140151 +    U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog;
140152 +    const ZSTD_seqSymbol* table = offTable + 1;
140153 +    U32 const max = 1 << tableLog;
140154 +    U32 u, total = 0;
140155 +    DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog);
140157 +    assert(max <= (1 << OffFSELog));  /* max not too large */
140158 +    for (u=0; u<max; u++) {
140159 +        if (table[u].nbAdditionalBits > 22) total += 1;
140160 +    }
140162 +    assert(tableLog <= OffFSELog);
140163 +    total <<= (OffFSELog - tableLog);  /* scale to OffFSELog */
140165 +    return total;
140167 +#endif
140169 +size_t
140170 +ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
140171 +                              void* dst, size_t dstCapacity,
140172 +                        const void* src, size_t srcSize, const int frame)
140173 +{   /* blockType == blockCompressed */
140174 +    const BYTE* ip = (const BYTE*)src;
140175 +    /* isLongOffset must be true if there are long offsets.
140176 +     * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.
140177 +     * We don't expect that to be the case in 64-bit mode.
140178 +     * In block mode, window size is not known, so we have to be conservative.
140179 +     * (note: but it could be evaluated from current-lowLimit)
140180 +     */
140181 +    ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))));
140182 +    DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
140184 +    RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong, "");
140186 +    /* Decode literals section */
140187 +    {   size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
140188 +        DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize);
140189 +        if (ZSTD_isError(litCSize)) return litCSize;
140190 +        ip += litCSize;
140191 +        srcSize -= litCSize;
140192 +    }
140194 +    /* Build Decoding Tables */
140195 +    {
140196 +        /* These macros control at build-time which decompressor implementation
140197 +         * we use. If neither is defined, we do some inspection and dispatch at
140198 +         * runtime.
140199 +         */
140200 +#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
140201 +    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
140202 +        int usePrefetchDecoder = dctx->ddictIsCold;
140203 +#endif
140204 +        int nbSeq;
140205 +        size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize);
140206 +        if (ZSTD_isError(seqHSize)) return seqHSize;
140207 +        ip += seqHSize;
140208 +        srcSize -= seqHSize;
140210 +        RETURN_ERROR_IF(dst == NULL && nbSeq > 0, dstSize_tooSmall, "NULL not handled");
140212 +#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
140213 +    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
140214 +        if ( !usePrefetchDecoder
140215 +          && (!frame || (dctx->fParams.windowSize > (1<<24)))
140216 +          && (nbSeq>ADVANCED_SEQS) ) {  /* could probably use a larger nbSeq limit */
140217 +            U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr);
140218 +            U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */
140219 +            usePrefetchDecoder = (shareLongOffsets >= minShare);
140220 +        }
140221 +#endif
140223 +        dctx->ddictIsCold = 0;
140225 +#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
140226 +    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
140227 +        if (usePrefetchDecoder)
140228 +#endif
140229 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
140230 +            return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
140231 +#endif
140233 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
140234 +        /* else */
140235 +        return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
140236 +#endif
140237 +    }
140241 +void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize)
140243 +    if (dst != dctx->previousDstEnd && dstSize > 0) {   /* not contiguous */
140244 +        dctx->dictEnd = dctx->previousDstEnd;
140245 +        dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
140246 +        dctx->prefixStart = dst;
140247 +        dctx->previousDstEnd = dst;
140248 +    }
140252 +size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
140253 +                            void* dst, size_t dstCapacity,
140254 +                      const void* src, size_t srcSize)
140256 +    size_t dSize;
140257 +    ZSTD_checkContinuity(dctx, dst, dstCapacity);
140258 +    dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0);
140259 +    dctx->previousDstEnd = (char*)dst + dSize;
140260 +    return dSize;
140262 diff --git a/lib/zstd/decompress/zstd_decompress_block.h b/lib/zstd/decompress/zstd_decompress_block.h
140263 new file mode 100644
140264 index 000000000000..e7f5f6689459
140265 --- /dev/null
140266 +++ b/lib/zstd/decompress/zstd_decompress_block.h
140267 @@ -0,0 +1,62 @@
140269 + * Copyright (c) Yann Collet, Facebook, Inc.
140270 + * All rights reserved.
140272 + * This source code is licensed under both the BSD-style license (found in the
140273 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
140274 + * in the COPYING file in the root directory of this source tree).
140275 + * You may select, at your option, one of the above-listed licenses.
140276 + */
140279 +#ifndef ZSTD_DEC_BLOCK_H
140280 +#define ZSTD_DEC_BLOCK_H
140282 +/*-*******************************************************
140283 + *  Dependencies
140284 + *********************************************************/
140285 +#include "../common/zstd_deps.h"   /* size_t */
140286 +#include <linux/zstd.h>    /* DCtx, and some public functions */
140287 +#include "../common/zstd_internal.h"  /* blockProperties_t, and some public functions */
140288 +#include "zstd_decompress_internal.h"  /* ZSTD_seqSymbol */
140291 +/* ===   Prototypes   === */
140293 +/* note: prototypes already published within `zstd.h` :
140294 + * ZSTD_decompressBlock()
140295 + */
140297 +/* note: prototypes already published within `zstd_internal.h` :
140298 + * ZSTD_getcBlockSize()
140299 + * ZSTD_decodeSeqHeaders()
140300 + */
140303 +/* ZSTD_decompressBlock_internal() :
140304 + * decompress block, starting at `src`,
140305 + * into destination buffer `dst`.
140306 + * @return : decompressed block size,
140307 + *           or an error code (which can be tested using ZSTD_isError())
140308 + */
140309 +size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
140310 +                               void* dst, size_t dstCapacity,
140311 +                         const void* src, size_t srcSize, const int frame);
140313 +/* ZSTD_buildFSETable() :
140314 + * generate FSE decoding table for one symbol (ll, ml or off)
140315 + * this function must be called with valid parameters only
140316 + * (dt is large enough, normalizedCounter distribution total is a power of 2, max is within range, etc.)
140317 + * in which case it cannot fail.
140318 + * The workspace must be 4-byte aligned and at least ZSTD_BUILD_FSE_TABLE_WKSP_SIZE bytes, which is
140319 + * defined in zstd_decompress_internal.h.
140320 + * Internal use only.
140321 + */
140322 +void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
140323 +             const short* normalizedCounter, unsigned maxSymbolValue,
140324 +             const U32* baseValue, const U32* nbAdditionalBits,
140325 +                   unsigned tableLog, void* wksp, size_t wkspSize,
140326 +                   int bmi2);
140329 +#endif /* ZSTD_DEC_BLOCK_H */
140330 diff --git a/lib/zstd/decompress/zstd_decompress_internal.h b/lib/zstd/decompress/zstd_decompress_internal.h
140331 new file mode 100644
140332 index 000000000000..4b9052f68755
140333 --- /dev/null
140334 +++ b/lib/zstd/decompress/zstd_decompress_internal.h
140335 @@ -0,0 +1,202 @@
140337 + * Copyright (c) Yann Collet, Facebook, Inc.
140338 + * All rights reserved.
140340 + * This source code is licensed under both the BSD-style license (found in the
140341 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
140342 + * in the COPYING file in the root directory of this source tree).
140343 + * You may select, at your option, one of the above-listed licenses.
140344 + */
140347 +/* zstd_decompress_internal:
140348 + * objects and definitions shared within lib/decompress modules */
140350 + #ifndef ZSTD_DECOMPRESS_INTERNAL_H
140351 + #define ZSTD_DECOMPRESS_INTERNAL_H
140354 +/*-*******************************************************
140355 + *  Dependencies
140356 + *********************************************************/
140357 +#include "../common/mem.h"             /* BYTE, U16, U32 */
140358 +#include "../common/zstd_internal.h"   /* ZSTD_seqSymbol */
140362 +/*-*******************************************************
140363 + *  Constants
140364 + *********************************************************/
140365 +static UNUSED_ATTR const U32 LL_base[MaxLL+1] = {
140366 +                 0,    1,    2,     3,     4,     5,     6,      7,
140367 +                 8,    9,   10,    11,    12,    13,    14,     15,
140368 +                16,   18,   20,    22,    24,    28,    32,     40,
140369 +                48,   64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
140370 +                0x2000, 0x4000, 0x8000, 0x10000 };
140372 +static UNUSED_ATTR const U32 OF_base[MaxOff+1] = {
140373 +                 0,        1,       1,       5,     0xD,     0x1D,     0x3D,     0x7D,
140374 +                 0xFD,   0x1FD,   0x3FD,   0x7FD,   0xFFD,   0x1FFD,   0x3FFD,   0x7FFD,
140375 +                 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
140376 +                 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD };
140378 +static UNUSED_ATTR const U32 OF_bits[MaxOff+1] = {
140379 +                     0,  1,  2,  3,  4,  5,  6,  7,
140380 +                     8,  9, 10, 11, 12, 13, 14, 15,
140381 +                    16, 17, 18, 19, 20, 21, 22, 23,
140382 +                    24, 25, 26, 27, 28, 29, 30, 31 };
140384 +static UNUSED_ATTR const U32 ML_base[MaxML+1] = {
140385 +                     3,  4,  5,    6,     7,     8,     9,    10,
140386 +                    11, 12, 13,   14,    15,    16,    17,    18,
140387 +                    19, 20, 21,   22,    23,    24,    25,    26,
140388 +                    27, 28, 29,   30,    31,    32,    33,    34,
140389 +                    35, 37, 39,   41,    43,    47,    51,    59,
140390 +                    67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
140391 +                    0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
140394 +/*-*******************************************************
140395 + *  Decompression types
140396 + *********************************************************/
140397 + typedef struct {
140398 +     U32 fastMode;
140399 +     U32 tableLog;
140400 + } ZSTD_seqSymbol_header;
140402 + typedef struct {
140403 +     U16  nextState;
140404 +     BYTE nbAdditionalBits;
140405 +     BYTE nbBits;
140406 +     U32  baseValue;
140407 + } ZSTD_seqSymbol;
140409 + #define SEQSYMBOL_TABLE_SIZE(log)   (1 + (1 << (log)))
140411 +#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE (sizeof(S16) * (MaxSeq + 1) + (1u << MaxFSELog) + sizeof(U64))
140412 +#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32 ((ZSTD_BUILD_FSE_TABLE_WKSP_SIZE + sizeof(U32) - 1) / sizeof(U32))
140414 +typedef struct {
140415 +    ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)];    /* Note : Space reserved for FSE Tables */
140416 +    ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)];   /* is also used as temporary workspace while building hufTable during DDict creation */
140417 +    ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)];    /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */
140418 +    HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)];  /* can accommodate HUF_decompress4X */
140419 +    U32 rep[ZSTD_REP_NUM];
140420 +    U32 workspace[ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32];
140421 +} ZSTD_entropyDTables_t;
140423 +typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
140424 +               ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock,
140425 +               ZSTDds_decompressLastBlock, ZSTDds_checkChecksum,
140426 +               ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage;
140428 +typedef enum { zdss_init=0, zdss_loadHeader,
140429 +               zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
140431 +typedef enum {
140432 +    ZSTD_use_indefinitely = -1,  /* Use the dictionary indefinitely */
140433 +    ZSTD_dont_use = 0,           /* Do not use the dictionary (if one exists free it) */
140434 +    ZSTD_use_once = 1            /* Use the dictionary once and set to ZSTD_dont_use */
140435 +} ZSTD_dictUses_e;
140437 +/* Hashset for storing references to multiple ZSTD_DDict within ZSTD_DCtx */
140438 +typedef struct {
140439 +    const ZSTD_DDict** ddictPtrTable;
140440 +    size_t ddictPtrTableSize;
140441 +    size_t ddictPtrCount;
140442 +} ZSTD_DDictHashSet;
140444 +struct ZSTD_DCtx_s
140446 +    const ZSTD_seqSymbol* LLTptr;
140447 +    const ZSTD_seqSymbol* MLTptr;
140448 +    const ZSTD_seqSymbol* OFTptr;
140449 +    const HUF_DTable* HUFptr;
140450 +    ZSTD_entropyDTables_t entropy;
140451 +    U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];   /* space needed when building huffman tables */
140452 +    const void* previousDstEnd;   /* detect continuity */
140453 +    const void* prefixStart;      /* start of current segment */
140454 +    const void* virtualStart;     /* virtual start of previous segment if it was just before current one */
140455 +    const void* dictEnd;          /* end of previous segment */
140456 +    size_t expected;
140457 +    ZSTD_frameHeader fParams;
140458 +    U64 processedCSize;
140459 +    U64 decodedSize;
140460 +    blockType_e bType;            /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */
140461 +    ZSTD_dStage stage;
140462 +    U32 litEntropy;
140463 +    U32 fseEntropy;
140464 +    struct xxh64_state xxhState;
140465 +    size_t headerSize;
140466 +    ZSTD_format_e format;
140467 +    ZSTD_forceIgnoreChecksum_e forceIgnoreChecksum;   /* User specified: if == 1, will ignore checksums in compressed frame. Default == 0 */
140468 +    U32 validateChecksum;         /* if == 1, will validate checksum. Is == 1 if (fParams.checksumFlag == 1) and (forceIgnoreChecksum == 0). */
140469 +    const BYTE* litPtr;
140470 +    ZSTD_customMem customMem;
140471 +    size_t litSize;
140472 +    size_t rleSize;
140473 +    size_t staticSize;
140474 +    int bmi2;                     /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
140476 +    /* dictionary */
140477 +    ZSTD_DDict* ddictLocal;
140478 +    const ZSTD_DDict* ddict;     /* set by ZSTD_initDStream_usingDDict(), or ZSTD_DCtx_refDDict() */
140479 +    U32 dictID;
140480 +    int ddictIsCold;             /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */
140481 +    ZSTD_dictUses_e dictUses;
140482 +    ZSTD_DDictHashSet* ddictSet;                    /* Hash set for multiple ddicts */
140483 +    ZSTD_refMultipleDDicts_e refMultipleDDicts;     /* User specified: if == 1, will allow references to multiple DDicts. Default == 0 (disabled) */
140485 +    /* streaming */
140486 +    ZSTD_dStreamStage streamStage;
140487 +    char*  inBuff;
140488 +    size_t inBuffSize;
140489 +    size_t inPos;
140490 +    size_t maxWindowSize;
140491 +    char*  outBuff;
140492 +    size_t outBuffSize;
140493 +    size_t outStart;
140494 +    size_t outEnd;
140495 +    size_t lhSize;
140496 +    void* legacyContext;
140497 +    U32 previousLegacyVersion;
140498 +    U32 legacyVersion;
140499 +    U32 hostageByte;
140500 +    int noForwardProgress;
140501 +    ZSTD_bufferMode_e outBufferMode;
140502 +    ZSTD_outBuffer expectedOutBuffer;
140504 +    /* workspace */
140505 +    BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];
140506 +    BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
140508 +    size_t oversizedDuration;
140510 +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
140511 +    void const* dictContentBeginForFuzzing;
140512 +    void const* dictContentEndForFuzzing;
140513 +#endif
140515 +    /* Tracing */
140516 +};  /* typedef'd to ZSTD_DCtx within "zstd.h" */
140519 +/*-*******************************************************
140520 + *  Shared internal functions
140521 + *********************************************************/
140523 +/*! ZSTD_loadDEntropy() :
140524 + *  dict : must point at beginning of a valid zstd dictionary.
140525 + * @return : size of dictionary header (size of magic number + dict ID + entropy tables) */
140526 +size_t ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
140527 +                   const void* const dict, size_t const dictSize);
140529 +/*! ZSTD_checkContinuity() :
140530 + *  check if next `dst` follows previous position, where decompression ended.
140531 + *  If yes, do nothing (continue on current segment).
140532 + *  If not, classify previous segment as "external dictionary", and start a new segment.
140533 + *  This function cannot fail. */
140534 +void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize);
140537 +#endif /* ZSTD_DECOMPRESS_INTERNAL_H */
140538 diff --git a/lib/zstd/decompress_sources.h b/lib/zstd/decompress_sources.h
140539 new file mode 100644
140540 index 000000000000..f35bef03eb22
140541 --- /dev/null
140542 +++ b/lib/zstd/decompress_sources.h
140543 @@ -0,0 +1,28 @@
140544 +/* SPDX-License-Identifier: GPL-2.0-only */
140546 + * Copyright (c) Facebook, Inc.
140547 + * All rights reserved.
140549 + * This source code is licensed under both the BSD-style license (found in the
140550 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
140551 + * in the COPYING file in the root directory of this source tree).
140552 + * You may select, at your option, one of the above-listed licenses.
140553 + */
140556 + * This file includes every .c file needed for decompression.
140557 + * It is used by lib/decompress_unzstd.c to include the decompression
140558 + * source into the translation-unit, so it can be used for kernel
140559 + * decompression.
140560 + */
140562 +#include "common/debug.c"
140563 +#include "common/entropy_common.c"
140564 +#include "common/error_private.c"
140565 +#include "common/fse_decompress.c"
140566 +#include "common/zstd_common.c"
140567 +#include "decompress/huf_decompress.c"
140568 +#include "decompress/zstd_ddict.c"
140569 +#include "decompress/zstd_decompress.c"
140570 +#include "decompress/zstd_decompress_block.c"
140571 +#include "zstd_decompress_module.c"
140572 diff --git a/lib/zstd/entropy_common.c b/lib/zstd/entropy_common.c
140573 deleted file mode 100644
140574 index 2b0a643c32c4..000000000000
140575 --- a/lib/zstd/entropy_common.c
140576 +++ /dev/null
140577 @@ -1,243 +0,0 @@
140579 - * Common functions of New Generation Entropy library
140580 - * Copyright (C) 2016, Yann Collet.
140582 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
140584 - * Redistribution and use in source and binary forms, with or without
140585 - * modification, are permitted provided that the following conditions are
140586 - * met:
140588 - *   * Redistributions of source code must retain the above copyright
140589 - * notice, this list of conditions and the following disclaimer.
140590 - *   * Redistributions in binary form must reproduce the above
140591 - * copyright notice, this list of conditions and the following disclaimer
140592 - * in the documentation and/or other materials provided with the
140593 - * distribution.
140595 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
140596 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
140597 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
140598 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
140599 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
140600 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
140601 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
140602 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
140603 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
140604 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
140605 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
140607 - * This program is free software; you can redistribute it and/or modify it under
140608 - * the terms of the GNU General Public License version 2 as published by the
140609 - * Free Software Foundation. This program is dual-licensed; you may select
140610 - * either version 2 of the GNU General Public License ("GPL") or BSD license
140611 - * ("BSD").
140613 - * You can contact the author at :
140614 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
140615 - */
140617 -/* *************************************
140618 -*  Dependencies
140619 -***************************************/
140620 -#include "error_private.h" /* ERR_*, ERROR */
140621 -#include "fse.h"
140622 -#include "huf.h"
140623 -#include "mem.h"
140625 -/*===   Version   ===*/
140626 -unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
140628 -/*===   Error Management   ===*/
140629 -unsigned FSE_isError(size_t code) { return ERR_isError(code); }
140631 -unsigned HUF_isError(size_t code) { return ERR_isError(code); }
140633 -/*-**************************************************************
140634 -*  FSE NCount encoding-decoding
140635 -****************************************************************/
140636 -size_t FSE_readNCount(short *normalizedCounter, unsigned *maxSVPtr, unsigned *tableLogPtr, const void *headerBuffer, size_t hbSize)
140638 -       const BYTE *const istart = (const BYTE *)headerBuffer;
140639 -       const BYTE *const iend = istart + hbSize;
140640 -       const BYTE *ip = istart;
140641 -       int nbBits;
140642 -       int remaining;
140643 -       int threshold;
140644 -       U32 bitStream;
140645 -       int bitCount;
140646 -       unsigned charnum = 0;
140647 -       int previous0 = 0;
140649 -       if (hbSize < 4)
140650 -               return ERROR(srcSize_wrong);
140651 -       bitStream = ZSTD_readLE32(ip);
140652 -       nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
140653 -       if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX)
140654 -               return ERROR(tableLog_tooLarge);
140655 -       bitStream >>= 4;
140656 -       bitCount = 4;
140657 -       *tableLogPtr = nbBits;
140658 -       remaining = (1 << nbBits) + 1;
140659 -       threshold = 1 << nbBits;
140660 -       nbBits++;
140662 -       while ((remaining > 1) & (charnum <= *maxSVPtr)) {
140663 -               if (previous0) {
140664 -                       unsigned n0 = charnum;
140665 -                       while ((bitStream & 0xFFFF) == 0xFFFF) {
140666 -                               n0 += 24;
140667 -                               if (ip < iend - 5) {
140668 -                                       ip += 2;
140669 -                                       bitStream = ZSTD_readLE32(ip) >> bitCount;
140670 -                               } else {
140671 -                                       bitStream >>= 16;
140672 -                                       bitCount += 16;
140673 -                               }
140674 -                       }
140675 -                       while ((bitStream & 3) == 3) {
140676 -                               n0 += 3;
140677 -                               bitStream >>= 2;
140678 -                               bitCount += 2;
140679 -                       }
140680 -                       n0 += bitStream & 3;
140681 -                       bitCount += 2;
140682 -                       if (n0 > *maxSVPtr)
140683 -                               return ERROR(maxSymbolValue_tooSmall);
140684 -                       while (charnum < n0)
140685 -                               normalizedCounter[charnum++] = 0;
140686 -                       if ((ip <= iend - 7) || (ip + (bitCount >> 3) <= iend - 4)) {
140687 -                               ip += bitCount >> 3;
140688 -                               bitCount &= 7;
140689 -                               bitStream = ZSTD_readLE32(ip) >> bitCount;
140690 -                       } else {
140691 -                               bitStream >>= 2;
140692 -                       }
140693 -               }
140694 -               {
140695 -                       int const max = (2 * threshold - 1) - remaining;
140696 -                       int count;
140698 -                       if ((bitStream & (threshold - 1)) < (U32)max) {
140699 -                               count = bitStream & (threshold - 1);
140700 -                               bitCount += nbBits - 1;
140701 -                       } else {
140702 -                               count = bitStream & (2 * threshold - 1);
140703 -                               if (count >= threshold)
140704 -                                       count -= max;
140705 -                               bitCount += nbBits;
140706 -                       }
140708 -                       count--;                                 /* extra accuracy */
140709 -                       remaining -= count < 0 ? -count : count; /* -1 means +1 */
140710 -                       normalizedCounter[charnum++] = (short)count;
140711 -                       previous0 = !count;
140712 -                       while (remaining < threshold) {
140713 -                               nbBits--;
140714 -                               threshold >>= 1;
140715 -                       }
140717 -                       if ((ip <= iend - 7) || (ip + (bitCount >> 3) <= iend - 4)) {
140718 -                               ip += bitCount >> 3;
140719 -                               bitCount &= 7;
140720 -                       } else {
140721 -                               bitCount -= (int)(8 * (iend - 4 - ip));
140722 -                               ip = iend - 4;
140723 -                       }
140724 -                       bitStream = ZSTD_readLE32(ip) >> (bitCount & 31);
140725 -               }
140726 -       } /* while ((remaining>1) & (charnum<=*maxSVPtr)) */
140727 -       if (remaining != 1)
140728 -               return ERROR(corruption_detected);
140729 -       if (bitCount > 32)
140730 -               return ERROR(corruption_detected);
140731 -       *maxSVPtr = charnum - 1;
140733 -       ip += (bitCount + 7) >> 3;
140734 -       return ip - istart;
140737 -/*! HUF_readStats() :
140738 -       Read compact Huffman tree, saved by HUF_writeCTable().
140739 -       `huffWeight` is destination buffer.
140740 -       `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
140741 -       @return : size read from `src` , or an error Code .
140742 -       Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
140744 -size_t HUF_readStats_wksp(BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
140746 -       U32 weightTotal;
140747 -       const BYTE *ip = (const BYTE *)src;
140748 -       size_t iSize;
140749 -       size_t oSize;
140751 -       if (!srcSize)
140752 -               return ERROR(srcSize_wrong);
140753 -       iSize = ip[0];
140754 -       /* memset(huffWeight, 0, hwSize);   */ /* is not necessary, even though some analyzer complain ... */
140756 -       if (iSize >= 128) { /* special header */
140757 -               oSize = iSize - 127;
140758 -               iSize = ((oSize + 1) / 2);
140759 -               if (iSize + 1 > srcSize)
140760 -                       return ERROR(srcSize_wrong);
140761 -               if (oSize >= hwSize)
140762 -                       return ERROR(corruption_detected);
140763 -               ip += 1;
140764 -               {
140765 -                       U32 n;
140766 -                       for (n = 0; n < oSize; n += 2) {
140767 -                               huffWeight[n] = ip[n / 2] >> 4;
140768 -                               huffWeight[n + 1] = ip[n / 2] & 15;
140769 -                       }
140770 -               }
140771 -       } else {                                                 /* header compressed with FSE (normal case) */
140772 -               if (iSize + 1 > srcSize)
140773 -                       return ERROR(srcSize_wrong);
140774 -               oSize = FSE_decompress_wksp(huffWeight, hwSize - 1, ip + 1, iSize, 6, workspace, workspaceSize); /* max (hwSize-1) values decoded, as last one is implied */
140775 -               if (FSE_isError(oSize))
140776 -                       return oSize;
140777 -       }
140779 -       /* collect weight stats */
140780 -       memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
140781 -       weightTotal = 0;
140782 -       {
140783 -               U32 n;
140784 -               for (n = 0; n < oSize; n++) {
140785 -                       if (huffWeight[n] >= HUF_TABLELOG_MAX)
140786 -                               return ERROR(corruption_detected);
140787 -                       rankStats[huffWeight[n]]++;
140788 -                       weightTotal += (1 << huffWeight[n]) >> 1;
140789 -               }
140790 -       }
140791 -       if (weightTotal == 0)
140792 -               return ERROR(corruption_detected);
140794 -       /* get last non-null symbol weight (implied, total must be 2^n) */
140795 -       {
140796 -               U32 const tableLog = BIT_highbit32(weightTotal) + 1;
140797 -               if (tableLog > HUF_TABLELOG_MAX)
140798 -                       return ERROR(corruption_detected);
140799 -               *tableLogPtr = tableLog;
140800 -               /* determine last weight */
140801 -               {
140802 -                       U32 const total = 1 << tableLog;
140803 -                       U32 const rest = total - weightTotal;
140804 -                       U32 const verif = 1 << BIT_highbit32(rest);
140805 -                       U32 const lastWeight = BIT_highbit32(rest) + 1;
140806 -                       if (verif != rest)
140807 -                               return ERROR(corruption_detected); /* last value must be a clean power of 2 */
140808 -                       huffWeight[oSize] = (BYTE)lastWeight;
140809 -                       rankStats[lastWeight]++;
140810 -               }
140811 -       }
140813 -       /* check tree construction validity */
140814 -       if ((rankStats[1] < 2) || (rankStats[1] & 1))
140815 -               return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
140817 -       /* results */
140818 -       *nbSymbolsPtr = (U32)(oSize + 1);
140819 -       return iSize + 1;
140821 diff --git a/lib/zstd/error_private.h b/lib/zstd/error_private.h
140822 deleted file mode 100644
140823 index 1a60b31f706c..000000000000
140824 --- a/lib/zstd/error_private.h
140825 +++ /dev/null
140826 @@ -1,53 +0,0 @@
140828 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
140829 - * All rights reserved.
140831 - * This source code is licensed under the BSD-style license found in the
140832 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
140833 - * An additional grant of patent rights can be found in the PATENTS file in the
140834 - * same directory.
140836 - * This program is free software; you can redistribute it and/or modify it under
140837 - * the terms of the GNU General Public License version 2 as published by the
140838 - * Free Software Foundation. This program is dual-licensed; you may select
140839 - * either version 2 of the GNU General Public License ("GPL") or BSD license
140840 - * ("BSD").
140841 - */
140843 -/* Note : this module is expected to remain private, do not expose it */
140845 -#ifndef ERROR_H_MODULE
140846 -#define ERROR_H_MODULE
140848 -/* ****************************************
140849 -*  Dependencies
140850 -******************************************/
140851 -#include <linux/types.h> /* size_t */
140852 -#include <linux/zstd.h>  /* enum list */
140854 -/* ****************************************
140855 -*  Compiler-specific
140856 -******************************************/
140857 -#define ERR_STATIC static __attribute__((unused))
140859 -/*-****************************************
140860 -*  Customization (error_public.h)
140861 -******************************************/
140862 -typedef ZSTD_ErrorCode ERR_enum;
140863 -#define PREFIX(name) ZSTD_error_##name
140865 -/*-****************************************
140866 -*  Error codes handling
140867 -******************************************/
140868 -#define ERROR(name) ((size_t)-PREFIX(name))
140870 -ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
140872 -ERR_STATIC ERR_enum ERR_getErrorCode(size_t code)
140874 -       if (!ERR_isError(code))
140875 -               return (ERR_enum)0;
140876 -       return (ERR_enum)(0 - code);
140879 -#endif /* ERROR_H_MODULE */
140880 diff --git a/lib/zstd/fse.h b/lib/zstd/fse.h
140881 deleted file mode 100644
140882 index 7460ab04b191..000000000000
140883 --- a/lib/zstd/fse.h
140884 +++ /dev/null
140885 @@ -1,575 +0,0 @@
140887 - * FSE : Finite State Entropy codec
140888 - * Public Prototypes declaration
140889 - * Copyright (C) 2013-2016, Yann Collet.
140891 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
140893 - * Redistribution and use in source and binary forms, with or without
140894 - * modification, are permitted provided that the following conditions are
140895 - * met:
140897 - *   * Redistributions of source code must retain the above copyright
140898 - * notice, this list of conditions and the following disclaimer.
140899 - *   * Redistributions in binary form must reproduce the above
140900 - * copyright notice, this list of conditions and the following disclaimer
140901 - * in the documentation and/or other materials provided with the
140902 - * distribution.
140904 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
140905 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
140906 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
140907 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
140908 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
140909 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
140910 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
140911 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
140912 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
140913 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
140914 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
140916 - * This program is free software; you can redistribute it and/or modify it under
140917 - * the terms of the GNU General Public License version 2 as published by the
140918 - * Free Software Foundation. This program is dual-licensed; you may select
140919 - * either version 2 of the GNU General Public License ("GPL") or BSD license
140920 - * ("BSD").
140922 - * You can contact the author at :
140923 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
140924 - */
140925 -#ifndef FSE_H
140926 -#define FSE_H
140928 -/*-*****************************************
140929 -*  Dependencies
140930 -******************************************/
140931 -#include <linux/types.h> /* size_t, ptrdiff_t */
140933 -/*-*****************************************
140934 -*  FSE_PUBLIC_API : control library symbols visibility
140935 -******************************************/
140936 -#define FSE_PUBLIC_API
140938 -/*------   Version   ------*/
140939 -#define FSE_VERSION_MAJOR 0
140940 -#define FSE_VERSION_MINOR 9
140941 -#define FSE_VERSION_RELEASE 0
140943 -#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
140944 -#define FSE_QUOTE(str) #str
140945 -#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
140946 -#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
140948 -#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR * 100 * 100 + FSE_VERSION_MINOR * 100 + FSE_VERSION_RELEASE)
140949 -FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */
140951 -/*-*****************************************
140952 -*  Tool functions
140953 -******************************************/
140954 -FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */
140956 -/* Error Management */
140957 -FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */
140959 -/*-*****************************************
140960 -*  FSE detailed API
140961 -******************************************/
140963 -FSE_compress() does the following:
140964 -1. count symbol occurrence from source[] into table count[]
140965 -2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
140966 -3. save normalized counters to memory buffer using writeNCount()
140967 -4. build encoding table 'CTable' from normalized counters
140968 -5. encode the data stream using encoding table 'CTable'
140970 -FSE_decompress() does the following:
140971 -1. read normalized counters with readNCount()
140972 -2. build decoding table 'DTable' from normalized counters
140973 -3. decode the data stream using decoding table 'DTable'
140975 -The following API allows targeting specific sub-functions for advanced tasks.
140976 -For example, it's possible to compress several blocks using the same 'CTable',
140977 -or to save and provide normalized distribution using external method.
140980 -/* *** COMPRESSION *** */
140981 -/*! FSE_optimalTableLog():
140982 -       dynamically downsize 'tableLog' when conditions are met.
140983 -       It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
140984 -       @return : recommended tableLog (necessarily <= 'maxTableLog') */
140985 -FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
140987 -/*! FSE_normalizeCount():
140988 -       normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
140989 -       'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
140990 -       @return : tableLog,
140991 -                         or an errorCode, which can be tested using FSE_isError() */
140992 -FSE_PUBLIC_API size_t FSE_normalizeCount(short *normalizedCounter, unsigned tableLog, const unsigned *count, size_t srcSize, unsigned maxSymbolValue);
140994 -/*! FSE_NCountWriteBound():
140995 -       Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
140996 -       Typically useful for allocation purpose. */
140997 -FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
140999 -/*! FSE_writeNCount():
141000 -       Compactly save 'normalizedCounter' into 'buffer'.
141001 -       @return : size of the compressed table,
141002 -                         or an errorCode, which can be tested using FSE_isError(). */
141003 -FSE_PUBLIC_API size_t FSE_writeNCount(void *buffer, size_t bufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
141005 -/*! Constructor and Destructor of FSE_CTable.
141006 -       Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
141007 -typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */
141009 -/*! FSE_compress_usingCTable():
141010 -       Compress `src` using `ct` into `dst` which must be already allocated.
141011 -       @return : size of compressed data (<= `dstCapacity`),
141012 -                         or 0 if compressed data could not fit into `dst`,
141013 -                         or an errorCode, which can be tested using FSE_isError() */
141014 -FSE_PUBLIC_API size_t FSE_compress_usingCTable(void *dst, size_t dstCapacity, const void *src, size_t srcSize, const FSE_CTable *ct);
141017 -Tutorial :
141018 -----------
141019 -The first step is to count all symbols. FSE_count() does this job very fast.
141020 -Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.
141021 -'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]
141022 -maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)
141023 -FSE_count() will return the number of occurrence of the most frequent symbol.
141024 -This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.
141025 -If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
141027 -The next step is to normalize the frequencies.
141028 -FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.
141029 -It also guarantees a minimum of 1 to any Symbol with frequency >= 1.
141030 -You can use 'tableLog'==0 to mean "use default tableLog value".
141031 -If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),
141032 -which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
141034 -The result of FSE_normalizeCount() will be saved into a table,
141035 -called 'normalizedCounter', which is a table of signed short.
141036 -'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.
141037 -The return value is tableLog if everything proceeded as expected.
141038 -It is 0 if there is a single symbol within distribution.
141039 -If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
141041 -'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().
141042 -'buffer' must be already allocated.
141043 -For guaranteed success, buffer size must be at least FSE_headerBound().
141044 -The result of the function is the number of bytes written into 'buffer'.
141045 -If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
141047 -'normalizedCounter' can then be used to create the compression table 'CTable'.
141048 -The space required by 'CTable' must be already allocated, using FSE_createCTable().
141049 -You can then use FSE_buildCTable() to fill 'CTable'.
141050 -If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
141052 -'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().
141053 -Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'
141054 -The function returns the size of compressed data (without header), necessarily <= `dstCapacity`.
141055 -If it returns '0', compressed data could not fit into 'dst'.
141056 -If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
141059 -/* *** DECOMPRESSION *** */
141061 -/*! FSE_readNCount():
141062 -       Read compactly saved 'normalizedCounter' from 'rBuffer'.
141063 -       @return : size read from 'rBuffer',
141064 -                         or an errorCode, which can be tested using FSE_isError().
141065 -                         maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
141066 -FSE_PUBLIC_API size_t FSE_readNCount(short *normalizedCounter, unsigned *maxSymbolValuePtr, unsigned *tableLogPtr, const void *rBuffer, size_t rBuffSize);
141068 -/*! Constructor and Destructor of FSE_DTable.
141069 -       Note that its size depends on 'tableLog' */
141070 -typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
141072 -/*! FSE_buildDTable():
141073 -       Builds 'dt', which must be already allocated, using FSE_createDTable().
141074 -       return : 0, or an errorCode, which can be tested using FSE_isError() */
141075 -FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable *dt, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize);
141077 -/*! FSE_decompress_usingDTable():
141078 -       Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
141079 -       into `dst` which must be already allocated.
141080 -       @return : size of regenerated data (necessarily <= `dstCapacity`),
141081 -                         or an errorCode, which can be tested using FSE_isError() */
141082 -FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt);
141085 -Tutorial :
141086 -----------
141087 -(Note : these functions only decompress FSE-compressed blocks.
141088 - If block is uncompressed, use memcpy() instead
141089 - If block is a single repeated byte, use memset() instead )
141091 -The first step is to obtain the normalized frequencies of symbols.
141092 -This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
141093 -'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
141094 -In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
141095 -or size the table to handle worst case situations (typically 256).
141096 -FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
141097 -The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
141098 -Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
141099 -If there is an error, the function will return an error code, which can be tested using FSE_isError().
141101 -The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
141102 -This is performed by the function FSE_buildDTable().
141103 -The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
141104 -If there is an error, the function will return an error code, which can be tested using FSE_isError().
141106 -`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().
141107 -`cSrcSize` must be strictly correct, otherwise decompression will fail.
141108 -FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
141109 -If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
141112 -/* *** Dependency *** */
141113 -#include "bitstream.h"
141115 -/* *****************************************
141116 -*  Static allocation
141117 -*******************************************/
141118 -/* FSE buffer bounds */
141119 -#define FSE_NCOUNTBOUND 512
141120 -#define FSE_BLOCKBOUND(size) (size + (size >> 7))
141121 -#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
141123 -/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
141124 -#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1 << (maxTableLog - 1)) + ((maxSymbolValue + 1) * 2))
141125 -#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1 << maxTableLog))
141127 -/* *****************************************
141128 -*  FSE advanced API
141129 -*******************************************/
141130 -/* FSE_count_wksp() :
141131 - * Same as FSE_count(), but using an externally provided scratch buffer.
141132 - * `workSpace` size must be table of >= `1024` unsigned
141133 - */
141134 -size_t FSE_count_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace);
141136 -/* FSE_countFast_wksp() :
141137 - * Same as FSE_countFast(), but using an externally provided scratch buffer.
141138 - * `workSpace` must be a table of minimum `1024` unsigned
141139 - */
141140 -size_t FSE_countFast_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize, unsigned *workSpace);
141142 -/*! FSE_count_simple
141143 - * Same as FSE_countFast(), but does not use any additional memory (not even on stack).
141144 - * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`).
141146 -size_t FSE_count_simple(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize);
141148 -unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
141149 -/**< same as FSE_optimalTableLog(), which used `minus==2` */
141151 -size_t FSE_buildCTable_raw(FSE_CTable *ct, unsigned nbBits);
141152 -/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
141154 -size_t FSE_buildCTable_rle(FSE_CTable *ct, unsigned char symbolValue);
141155 -/**< build a fake FSE_CTable, designed to compress always the same symbolValue */
141157 -/* FSE_buildCTable_wksp() :
141158 - * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
141159 - * `wkspSize` must be >= `(1<<tableLog)`.
141160 - */
141161 -size_t FSE_buildCTable_wksp(FSE_CTable *ct, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, size_t wkspSize);
141163 -size_t FSE_buildDTable_raw(FSE_DTable *dt, unsigned nbBits);
141164 -/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
141166 -size_t FSE_buildDTable_rle(FSE_DTable *dt, unsigned char symbolValue);
141167 -/**< build a fake FSE_DTable, designed to always generate the same symbolValue */
141169 -size_t FSE_decompress_wksp(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, unsigned maxLog, void *workspace, size_t workspaceSize);
141170 -/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DTABLE_SIZE_U32(maxLog)` */
141172 -/* *****************************************
141173 -*  FSE symbol compression API
141174 -*******************************************/
141176 -   This API consists of small unitary functions, which highly benefit from being inlined.
141177 -   Hence their body are included in next section.
141179 -typedef struct {
141180 -       ptrdiff_t value;
141181 -       const void *stateTable;
141182 -       const void *symbolTT;
141183 -       unsigned stateLog;
141184 -} FSE_CState_t;
141186 -static void FSE_initCState(FSE_CState_t *CStatePtr, const FSE_CTable *ct);
141188 -static void FSE_encodeSymbol(BIT_CStream_t *bitC, FSE_CState_t *CStatePtr, unsigned symbol);
141190 -static void FSE_flushCState(BIT_CStream_t *bitC, const FSE_CState_t *CStatePtr);
141192 -/**<
141193 -These functions are inner components of FSE_compress_usingCTable().
141194 -They allow the creation of custom streams, mixing multiple tables and bit sources.
141196 -A key property to keep in mind is that encoding and decoding are done **in reverse direction**.
141197 -So the first symbol you will encode is the last you will decode, like a LIFO stack.
141199 -You will need a few variables to track your CStream. They are :
141201 -FSE_CTable    ct;         // Provided by FSE_buildCTable()
141202 -BIT_CStream_t bitStream;  // bitStream tracking structure
141203 -FSE_CState_t  state;      // State tracking structure (can have several)
141206 -The first thing to do is to init bitStream and state.
141207 -       size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);
141208 -       FSE_initCState(&state, ct);
141210 -Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();
141211 -You can then encode your input data, byte after byte.
141212 -FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.
141213 -Remember decoding will be done in reverse direction.
141214 -       FSE_encodeByte(&bitStream, &state, symbol);
141216 -At any time, you can also add any bit sequence.
141217 -Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders
141218 -       BIT_addBits(&bitStream, bitField, nbBits);
141220 -The above methods don't commit data to memory, they just store it into local register, for speed.
141221 -Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
141222 -Writing data to memory is a manual operation, performed by the flushBits function.
141223 -       BIT_flushBits(&bitStream);
141225 -Your last FSE encoding operation shall be to flush your last state value(s).
141226 -       FSE_flushState(&bitStream, &state);
141228 -Finally, you must close the bitStream.
141229 -The function returns the size of CStream in bytes.
141230 -If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)
141231 -If there is an error, it returns an errorCode (which can be tested using FSE_isError()).
141232 -       size_t size = BIT_closeCStream(&bitStream);
141235 -/* *****************************************
141236 -*  FSE symbol decompression API
141237 -*******************************************/
141238 -typedef struct {
141239 -       size_t state;
141240 -       const void *table; /* precise table may vary, depending on U16 */
141241 -} FSE_DState_t;
141243 -static void FSE_initDState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD, const FSE_DTable *dt);
141245 -static unsigned char FSE_decodeSymbol(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD);
141247 -static unsigned FSE_endOfDState(const FSE_DState_t *DStatePtr);
141249 -/**<
141250 -Let's now decompose FSE_decompress_usingDTable() into its unitary components.
141251 -You will decode FSE-encoded symbols from the bitStream,
141252 -and also any other bitFields you put in, **in reverse order**.
141254 -You will need a few variables to track your bitStream. They are :
141256 -BIT_DStream_t DStream;    // Stream context
141257 -FSE_DState_t  DState;     // State context. Multiple ones are possible
141258 -FSE_DTable*   DTablePtr;  // Decoding table, provided by FSE_buildDTable()
141260 -The first thing to do is to init the bitStream.
141261 -       errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
141263 -You should then retrieve your initial state(s)
141264 -(in reverse flushing order if you have several ones) :
141265 -       errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
141267 -You can then decode your data, symbol after symbol.
141268 -For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.
141269 -Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).
141270 -       unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
141272 -You can retrieve any bitfield you eventually stored into the bitStream (in reverse order)
141273 -Note : maximum allowed nbBits is 25, for 32-bits compatibility
141274 -       size_t bitField = BIT_readBits(&DStream, nbBits);
141276 -All above operations only read from local register (which size depends on size_t).
141277 -Refueling the register from memory is manually performed by the reload method.
141278 -       endSignal = FSE_reloadDStream(&DStream);
141280 -BIT_reloadDStream() result tells if there is still some more data to read from DStream.
141281 -BIT_DStream_unfinished : there is still some data left into the DStream.
141282 -BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.
141283 -BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.
141284 -BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
141286 -When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,
141287 -to properly detect the exact end of stream.
141288 -After each decoded symbol, check if DStream is fully consumed using this simple test :
141289 -       BIT_reloadDStream(&DStream) >= BIT_DStream_completed
141291 -When it's done, verify decompression is fully completed, by checking both DStream and the relevant states.
141292 -Checking if DStream has reached its end is performed by :
141293 -       BIT_endOfDStream(&DStream);
141294 -Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.
141295 -       FSE_endOfDState(&DState);
141298 -/* *****************************************
141299 -*  FSE unsafe API
141300 -*******************************************/
141301 -static unsigned char FSE_decodeSymbolFast(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD);
141302 -/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
141304 -/* *****************************************
141305 -*  Implementation of inlined functions
141306 -*******************************************/
141307 -typedef struct {
141308 -       int deltaFindState;
141309 -       U32 deltaNbBits;
141310 -} FSE_symbolCompressionTransform; /* total 8 bytes */
141312 -ZSTD_STATIC void FSE_initCState(FSE_CState_t *statePtr, const FSE_CTable *ct)
141314 -       const void *ptr = ct;
141315 -       const U16 *u16ptr = (const U16 *)ptr;
141316 -       const U32 tableLog = ZSTD_read16(ptr);
141317 -       statePtr->value = (ptrdiff_t)1 << tableLog;
141318 -       statePtr->stateTable = u16ptr + 2;
141319 -       statePtr->symbolTT = ((const U32 *)ct + 1 + (tableLog ? (1 << (tableLog - 1)) : 1));
141320 -       statePtr->stateLog = tableLog;
141323 -/*! FSE_initCState2() :
141324 -*   Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
141325 -*   uses the smallest state value possible, saving the cost of this symbol */
141326 -ZSTD_STATIC void FSE_initCState2(FSE_CState_t *statePtr, const FSE_CTable *ct, U32 symbol)
141328 -       FSE_initCState(statePtr, ct);
141329 -       {
141330 -               const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform *)(statePtr->symbolTT))[symbol];
141331 -               const U16 *stateTable = (const U16 *)(statePtr->stateTable);
141332 -               U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1 << 15)) >> 16);
141333 -               statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
141334 -               statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
141335 -       }
141338 -ZSTD_STATIC void FSE_encodeSymbol(BIT_CStream_t *bitC, FSE_CState_t *statePtr, U32 symbol)
141340 -       const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform *)(statePtr->symbolTT))[symbol];
141341 -       const U16 *const stateTable = (const U16 *)(statePtr->stateTable);
141342 -       U32 nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
141343 -       BIT_addBits(bitC, statePtr->value, nbBitsOut);
141344 -       statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
141347 -ZSTD_STATIC void FSE_flushCState(BIT_CStream_t *bitC, const FSE_CState_t *statePtr)
141349 -       BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
141350 -       BIT_flushBits(bitC);
141353 -/* ======    Decompression    ====== */
141355 -typedef struct {
141356 -       U16 tableLog;
141357 -       U16 fastMode;
141358 -} FSE_DTableHeader; /* sizeof U32 */
141360 -typedef struct {
141361 -       unsigned short newState;
141362 -       unsigned char symbol;
141363 -       unsigned char nbBits;
141364 -} FSE_decode_t; /* size == U32 */
141366 -ZSTD_STATIC void FSE_initDState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD, const FSE_DTable *dt)
141368 -       const void *ptr = dt;
141369 -       const FSE_DTableHeader *const DTableH = (const FSE_DTableHeader *)ptr;
141370 -       DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
141371 -       BIT_reloadDStream(bitD);
141372 -       DStatePtr->table = dt + 1;
141375 -ZSTD_STATIC BYTE FSE_peekSymbol(const FSE_DState_t *DStatePtr)
141377 -       FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
141378 -       return DInfo.symbol;
141381 -ZSTD_STATIC void FSE_updateState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD)
141383 -       FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
141384 -       U32 const nbBits = DInfo.nbBits;
141385 -       size_t const lowBits = BIT_readBits(bitD, nbBits);
141386 -       DStatePtr->state = DInfo.newState + lowBits;
141389 -ZSTD_STATIC BYTE FSE_decodeSymbol(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD)
141391 -       FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
141392 -       U32 const nbBits = DInfo.nbBits;
141393 -       BYTE const symbol = DInfo.symbol;
141394 -       size_t const lowBits = BIT_readBits(bitD, nbBits);
141396 -       DStatePtr->state = DInfo.newState + lowBits;
141397 -       return symbol;
141400 -/*! FSE_decodeSymbolFast() :
141401 -       unsafe, only works if no symbol has a probability > 50% */
141402 -ZSTD_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD)
141404 -       FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
141405 -       U32 const nbBits = DInfo.nbBits;
141406 -       BYTE const symbol = DInfo.symbol;
141407 -       size_t const lowBits = BIT_readBitsFast(bitD, nbBits);
141409 -       DStatePtr->state = DInfo.newState + lowBits;
141410 -       return symbol;
141413 -ZSTD_STATIC unsigned FSE_endOfDState(const FSE_DState_t *DStatePtr) { return DStatePtr->state == 0; }
141415 -/* **************************************************************
141416 -*  Tuning parameters
141417 -****************************************************************/
141418 -/*!MEMORY_USAGE :
141419 -*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
141420 -*  Increasing memory usage improves compression ratio
141421 -*  Reduced memory usage can improve speed, due to cache effect
141422 -*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
141423 -#ifndef FSE_MAX_MEMORY_USAGE
141424 -#define FSE_MAX_MEMORY_USAGE 14
141425 -#endif
141426 -#ifndef FSE_DEFAULT_MEMORY_USAGE
141427 -#define FSE_DEFAULT_MEMORY_USAGE 13
141428 -#endif
141430 -/*!FSE_MAX_SYMBOL_VALUE :
141431 -*  Maximum symbol value authorized.
141432 -*  Required for proper stack allocation */
141433 -#ifndef FSE_MAX_SYMBOL_VALUE
141434 -#define FSE_MAX_SYMBOL_VALUE 255
141435 -#endif
141437 -/* **************************************************************
141438 -*  template functions type & suffix
141439 -****************************************************************/
141440 -#define FSE_FUNCTION_TYPE BYTE
141441 -#define FSE_FUNCTION_EXTENSION
141442 -#define FSE_DECODE_TYPE FSE_decode_t
141444 -/* ***************************************************************
141445 -*  Constants
141446 -*****************************************************************/
141447 -#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE - 2)
141448 -#define FSE_MAX_TABLESIZE (1U << FSE_MAX_TABLELOG)
141449 -#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE - 1)
141450 -#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE - 2)
141451 -#define FSE_MIN_TABLELOG 5
141453 -#define FSE_TABLELOG_ABSOLUTE_MAX 15
141454 -#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
141455 -#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
141456 -#endif
141458 -#define FSE_TABLESTEP(tableSize) ((tableSize >> 1) + (tableSize >> 3) + 3)
141460 -#endif /* FSE_H */
141461 diff --git a/lib/zstd/fse_compress.c b/lib/zstd/fse_compress.c
141462 deleted file mode 100644
141463 index ef3d1741d532..000000000000
141464 --- a/lib/zstd/fse_compress.c
141465 +++ /dev/null
141466 @@ -1,795 +0,0 @@
141468 - * FSE : Finite State Entropy encoder
141469 - * Copyright (C) 2013-2015, Yann Collet.
141471 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
141473 - * Redistribution and use in source and binary forms, with or without
141474 - * modification, are permitted provided that the following conditions are
141475 - * met:
141477 - *   * Redistributions of source code must retain the above copyright
141478 - * notice, this list of conditions and the following disclaimer.
141479 - *   * Redistributions in binary form must reproduce the above
141480 - * copyright notice, this list of conditions and the following disclaimer
141481 - * in the documentation and/or other materials provided with the
141482 - * distribution.
141484 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
141485 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
141486 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
141487 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
141488 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
141489 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
141490 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
141491 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
141492 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
141493 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
141494 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
141496 - * This program is free software; you can redistribute it and/or modify it under
141497 - * the terms of the GNU General Public License version 2 as published by the
141498 - * Free Software Foundation. This program is dual-licensed; you may select
141499 - * either version 2 of the GNU General Public License ("GPL") or BSD license
141500 - * ("BSD").
141502 - * You can contact the author at :
141503 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
141504 - */
141506 -/* **************************************************************
141507 -*  Compiler specifics
141508 -****************************************************************/
141509 -#define FORCE_INLINE static __always_inline
141511 -/* **************************************************************
141512 -*  Includes
141513 -****************************************************************/
141514 -#include "bitstream.h"
141515 -#include "fse.h"
141516 -#include <linux/compiler.h>
141517 -#include <linux/kernel.h>
141518 -#include <linux/math64.h>
141519 -#include <linux/string.h> /* memcpy, memset */
141521 -/* **************************************************************
141522 -*  Error Management
141523 -****************************************************************/
141524 -#define FSE_STATIC_ASSERT(c)                                   \
141525 -       {                                                      \
141526 -               enum { FSE_static_assert = 1 / (int)(!!(c)) }; \
141527 -       } /* use only *after* variable declarations */
141529 -/* **************************************************************
141530 -*  Templates
141531 -****************************************************************/
141533 -  designed to be included
141534 -  for type-specific functions (template emulation in C)
141535 -  Objective is to write these functions only once, for improved maintenance
141538 -/* safety checks */
141539 -#ifndef FSE_FUNCTION_EXTENSION
141540 -#error "FSE_FUNCTION_EXTENSION must be defined"
141541 -#endif
141542 -#ifndef FSE_FUNCTION_TYPE
141543 -#error "FSE_FUNCTION_TYPE must be defined"
141544 -#endif
141546 -/* Function names */
141547 -#define FSE_CAT(X, Y) X##Y
141548 -#define FSE_FUNCTION_NAME(X, Y) FSE_CAT(X, Y)
141549 -#define FSE_TYPE_NAME(X, Y) FSE_CAT(X, Y)
141551 -/* Function templates */
141553 -/* FSE_buildCTable_wksp() :
141554 - * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
141555 - * wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
141556 - * workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
141557 - */
141558 -size_t FSE_buildCTable_wksp(FSE_CTable *ct, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize)
141560 -       U32 const tableSize = 1 << tableLog;
141561 -       U32 const tableMask = tableSize - 1;
141562 -       void *const ptr = ct;
141563 -       U16 *const tableU16 = ((U16 *)ptr) + 2;
141564 -       void *const FSCT = ((U32 *)ptr) + 1 /* header */ + (tableLog ? tableSize >> 1 : 1);
141565 -       FSE_symbolCompressionTransform *const symbolTT = (FSE_symbolCompressionTransform *)(FSCT);
141566 -       U32 const step = FSE_TABLESTEP(tableSize);
141567 -       U32 highThreshold = tableSize - 1;
141569 -       U32 *cumul;
141570 -       FSE_FUNCTION_TYPE *tableSymbol;
141571 -       size_t spaceUsed32 = 0;
141573 -       cumul = (U32 *)workspace + spaceUsed32;
141574 -       spaceUsed32 += FSE_MAX_SYMBOL_VALUE + 2;
141575 -       tableSymbol = (FSE_FUNCTION_TYPE *)((U32 *)workspace + spaceUsed32);
141576 -       spaceUsed32 += ALIGN(sizeof(FSE_FUNCTION_TYPE) * ((size_t)1 << tableLog), sizeof(U32)) >> 2;
141578 -       if ((spaceUsed32 << 2) > workspaceSize)
141579 -               return ERROR(tableLog_tooLarge);
141580 -       workspace = (U32 *)workspace + spaceUsed32;
141581 -       workspaceSize -= (spaceUsed32 << 2);
141583 -       /* CTable header */
141584 -       tableU16[-2] = (U16)tableLog;
141585 -       tableU16[-1] = (U16)maxSymbolValue;
141587 -       /* For explanations on how to distribute symbol values over the table :
141588 -       *  http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
141590 -       /* symbol start positions */
141591 -       {
141592 -               U32 u;
141593 -               cumul[0] = 0;
141594 -               for (u = 1; u <= maxSymbolValue + 1; u++) {
141595 -                       if (normalizedCounter[u - 1] == -1) { /* Low proba symbol */
141596 -                               cumul[u] = cumul[u - 1] + 1;
141597 -                               tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u - 1);
141598 -                       } else {
141599 -                               cumul[u] = cumul[u - 1] + normalizedCounter[u - 1];
141600 -                       }
141601 -               }
141602 -               cumul[maxSymbolValue + 1] = tableSize + 1;
141603 -       }
141605 -       /* Spread symbols */
141606 -       {
141607 -               U32 position = 0;
141608 -               U32 symbol;
141609 -               for (symbol = 0; symbol <= maxSymbolValue; symbol++) {
141610 -                       int nbOccurences;
141611 -                       for (nbOccurences = 0; nbOccurences < normalizedCounter[symbol]; nbOccurences++) {
141612 -                               tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
141613 -                               position = (position + step) & tableMask;
141614 -                               while (position > highThreshold)
141615 -                                       position = (position + step) & tableMask; /* Low proba area */
141616 -                       }
141617 -               }
141619 -               if (position != 0)
141620 -                       return ERROR(GENERIC); /* Must have gone through all positions */
141621 -       }
141623 -       /* Build table */
141624 -       {
141625 -               U32 u;
141626 -               for (u = 0; u < tableSize; u++) {
141627 -                       FSE_FUNCTION_TYPE s = tableSymbol[u];   /* note : static analyzer may not understand tableSymbol is properly initialized */
141628 -                       tableU16[cumul[s]++] = (U16)(tableSize + u); /* TableU16 : sorted by symbol order; gives next state value */
141629 -               }
141630 -       }
141632 -       /* Build Symbol Transformation Table */
141633 -       {
141634 -               unsigned total = 0;
141635 -               unsigned s;
141636 -               for (s = 0; s <= maxSymbolValue; s++) {
141637 -                       switch (normalizedCounter[s]) {
141638 -                       case 0: break;
141640 -                       case -1:
141641 -                       case 1:
141642 -                               symbolTT[s].deltaNbBits = (tableLog << 16) - (1 << tableLog);
141643 -                               symbolTT[s].deltaFindState = total - 1;
141644 -                               total++;
141645 -                               break;
141646 -                       default: {
141647 -                               U32 const maxBitsOut = tableLog - BIT_highbit32(normalizedCounter[s] - 1);
141648 -                               U32 const minStatePlus = normalizedCounter[s] << maxBitsOut;
141649 -                               symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
141650 -                               symbolTT[s].deltaFindState = total - normalizedCounter[s];
141651 -                               total += normalizedCounter[s];
141652 -                       }
141653 -                       }
141654 -               }
141655 -       }
141657 -       return 0;
141660 -/*-**************************************************************
141661 -*  FSE NCount encoding-decoding
141662 -****************************************************************/
141663 -size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
141665 -       size_t const maxHeaderSize = (((maxSymbolValue + 1) * tableLog) >> 3) + 3;
141666 -       return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
141669 -static size_t FSE_writeNCount_generic(void *header, size_t headerBufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
141670 -                                     unsigned writeIsSafe)
141672 -       BYTE *const ostart = (BYTE *)header;
141673 -       BYTE *out = ostart;
141674 -       BYTE *const oend = ostart + headerBufferSize;
141675 -       int nbBits;
141676 -       const int tableSize = 1 << tableLog;
141677 -       int remaining;
141678 -       int threshold;
141679 -       U32 bitStream;
141680 -       int bitCount;
141681 -       unsigned charnum = 0;
141682 -       int previous0 = 0;
141684 -       bitStream = 0;
141685 -       bitCount = 0;
141686 -       /* Table Size */
141687 -       bitStream += (tableLog - FSE_MIN_TABLELOG) << bitCount;
141688 -       bitCount += 4;
141690 -       /* Init */
141691 -       remaining = tableSize + 1; /* +1 for extra accuracy */
141692 -       threshold = tableSize;
141693 -       nbBits = tableLog + 1;
141695 -       while (remaining > 1) { /* stops at 1 */
141696 -               if (previous0) {
141697 -                       unsigned start = charnum;
141698 -                       while (!normalizedCounter[charnum])
141699 -                               charnum++;
141700 -                       while (charnum >= start + 24) {
141701 -                               start += 24;
141702 -                               bitStream += 0xFFFFU << bitCount;
141703 -                               if ((!writeIsSafe) && (out > oend - 2))
141704 -                                       return ERROR(dstSize_tooSmall); /* Buffer overflow */
141705 -                               out[0] = (BYTE)bitStream;
141706 -                               out[1] = (BYTE)(bitStream >> 8);
141707 -                               out += 2;
141708 -                               bitStream >>= 16;
141709 -                       }
141710 -                       while (charnum >= start + 3) {
141711 -                               start += 3;
141712 -                               bitStream += 3 << bitCount;
141713 -                               bitCount += 2;
141714 -                       }
141715 -                       bitStream += (charnum - start) << bitCount;
141716 -                       bitCount += 2;
141717 -                       if (bitCount > 16) {
141718 -                               if ((!writeIsSafe) && (out > oend - 2))
141719 -                                       return ERROR(dstSize_tooSmall); /* Buffer overflow */
141720 -                               out[0] = (BYTE)bitStream;
141721 -                               out[1] = (BYTE)(bitStream >> 8);
141722 -                               out += 2;
141723 -                               bitStream >>= 16;
141724 -                               bitCount -= 16;
141725 -                       }
141726 -               }
141727 -               {
141728 -                       int count = normalizedCounter[charnum++];
141729 -                       int const max = (2 * threshold - 1) - remaining;
141730 -                       remaining -= count < 0 ? -count : count;
141731 -                       count++; /* +1 for extra accuracy */
141732 -                       if (count >= threshold)
141733 -                               count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
141734 -                       bitStream += count << bitCount;
141735 -                       bitCount += nbBits;
141736 -                       bitCount -= (count < max);
141737 -                       previous0 = (count == 1);
141738 -                       if (remaining < 1)
141739 -                               return ERROR(GENERIC);
141740 -                       while (remaining < threshold)
141741 -                               nbBits--, threshold >>= 1;
141742 -               }
141743 -               if (bitCount > 16) {
141744 -                       if ((!writeIsSafe) && (out > oend - 2))
141745 -                               return ERROR(dstSize_tooSmall); /* Buffer overflow */
141746 -                       out[0] = (BYTE)bitStream;
141747 -                       out[1] = (BYTE)(bitStream >> 8);
141748 -                       out += 2;
141749 -                       bitStream >>= 16;
141750 -                       bitCount -= 16;
141751 -               }
141752 -       }
141754 -       /* flush remaining bitStream */
141755 -       if ((!writeIsSafe) && (out > oend - 2))
141756 -               return ERROR(dstSize_tooSmall); /* Buffer overflow */
141757 -       out[0] = (BYTE)bitStream;
141758 -       out[1] = (BYTE)(bitStream >> 8);
141759 -       out += (bitCount + 7) / 8;
141761 -       if (charnum > maxSymbolValue + 1)
141762 -               return ERROR(GENERIC);
141764 -       return (out - ostart);
141767 -size_t FSE_writeNCount(void *buffer, size_t bufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
141769 -       if (tableLog > FSE_MAX_TABLELOG)
141770 -               return ERROR(tableLog_tooLarge); /* Unsupported */
141771 -       if (tableLog < FSE_MIN_TABLELOG)
141772 -               return ERROR(GENERIC); /* Unsupported */
141774 -       if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
141775 -               return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
141777 -       return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1);
141780 -/*-**************************************************************
141781 -*  Counting histogram
141782 -****************************************************************/
141783 -/*! FSE_count_simple
141784 -       This function counts byte values within `src`, and store the histogram into table `count`.
141785 -       It doesn't use any additional memory.
141786 -       But this function is unsafe : it doesn't check that all values within `src` can fit into `count`.
141787 -       For this reason, prefer using a table `count` with 256 elements.
141788 -       @return : count of most numerous element
141790 -size_t FSE_count_simple(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize)
141792 -       const BYTE *ip = (const BYTE *)src;
141793 -       const BYTE *const end = ip + srcSize;
141794 -       unsigned maxSymbolValue = *maxSymbolValuePtr;
141795 -       unsigned max = 0;
141797 -       memset(count, 0, (maxSymbolValue + 1) * sizeof(*count));
141798 -       if (srcSize == 0) {
141799 -               *maxSymbolValuePtr = 0;
141800 -               return 0;
141801 -       }
141803 -       while (ip < end)
141804 -               count[*ip++]++;
141806 -       while (!count[maxSymbolValue])
141807 -               maxSymbolValue--;
141808 -       *maxSymbolValuePtr = maxSymbolValue;
141810 -       {
141811 -               U32 s;
141812 -               for (s = 0; s <= maxSymbolValue; s++)
141813 -                       if (count[s] > max)
141814 -                               max = count[s];
141815 -       }
141817 -       return (size_t)max;
141820 -/* FSE_count_parallel_wksp() :
141821 - * Same as FSE_count_parallel(), but using an externally provided scratch buffer.
141822 - * `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`` */
141823 -static size_t FSE_count_parallel_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned checkMax,
141824 -                                     unsigned *const workSpace)
141826 -       const BYTE *ip = (const BYTE *)source;
141827 -       const BYTE *const iend = ip + sourceSize;
141828 -       unsigned maxSymbolValue = *maxSymbolValuePtr;
141829 -       unsigned max = 0;
141830 -       U32 *const Counting1 = workSpace;
141831 -       U32 *const Counting2 = Counting1 + 256;
141832 -       U32 *const Counting3 = Counting2 + 256;
141833 -       U32 *const Counting4 = Counting3 + 256;
141835 -       memset(Counting1, 0, 4 * 256 * sizeof(unsigned));
141837 -       /* safety checks */
141838 -       if (!sourceSize) {
141839 -               memset(count, 0, maxSymbolValue + 1);
141840 -               *maxSymbolValuePtr = 0;
141841 -               return 0;
141842 -       }
141843 -       if (!maxSymbolValue)
141844 -               maxSymbolValue = 255; /* 0 == default */
141846 -       /* by stripes of 16 bytes */
141847 -       {
141848 -               U32 cached = ZSTD_read32(ip);
141849 -               ip += 4;
141850 -               while (ip < iend - 15) {
141851 -                       U32 c = cached;
141852 -                       cached = ZSTD_read32(ip);
141853 -                       ip += 4;
141854 -                       Counting1[(BYTE)c]++;
141855 -                       Counting2[(BYTE)(c >> 8)]++;
141856 -                       Counting3[(BYTE)(c >> 16)]++;
141857 -                       Counting4[c >> 24]++;
141858 -                       c = cached;
141859 -                       cached = ZSTD_read32(ip);
141860 -                       ip += 4;
141861 -                       Counting1[(BYTE)c]++;
141862 -                       Counting2[(BYTE)(c >> 8)]++;
141863 -                       Counting3[(BYTE)(c >> 16)]++;
141864 -                       Counting4[c >> 24]++;
141865 -                       c = cached;
141866 -                       cached = ZSTD_read32(ip);
141867 -                       ip += 4;
141868 -                       Counting1[(BYTE)c]++;
141869 -                       Counting2[(BYTE)(c >> 8)]++;
141870 -                       Counting3[(BYTE)(c >> 16)]++;
141871 -                       Counting4[c >> 24]++;
141872 -                       c = cached;
141873 -                       cached = ZSTD_read32(ip);
141874 -                       ip += 4;
141875 -                       Counting1[(BYTE)c]++;
141876 -                       Counting2[(BYTE)(c >> 8)]++;
141877 -                       Counting3[(BYTE)(c >> 16)]++;
141878 -                       Counting4[c >> 24]++;
141879 -               }
141880 -               ip -= 4;
141881 -       }
141883 -       /* finish last symbols */
141884 -       while (ip < iend)
141885 -               Counting1[*ip++]++;
141887 -       if (checkMax) { /* verify stats will fit into destination table */
141888 -               U32 s;
141889 -               for (s = 255; s > maxSymbolValue; s--) {
141890 -                       Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
141891 -                       if (Counting1[s])
141892 -                               return ERROR(maxSymbolValue_tooSmall);
141893 -               }
141894 -       }
141896 -       {
141897 -               U32 s;
141898 -               for (s = 0; s <= maxSymbolValue; s++) {
141899 -                       count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
141900 -                       if (count[s] > max)
141901 -                               max = count[s];
141902 -               }
141903 -       }
141905 -       while (!count[maxSymbolValue])
141906 -               maxSymbolValue--;
141907 -       *maxSymbolValuePtr = maxSymbolValue;
141908 -       return (size_t)max;
141911 -/* FSE_countFast_wksp() :
141912 - * Same as FSE_countFast(), but using an externally provided scratch buffer.
141913 - * `workSpace` size must be table of >= `1024` unsigned */
141914 -size_t FSE_countFast_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace)
141916 -       if (sourceSize < 1500)
141917 -               return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize);
141918 -       return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace);
141921 -/* FSE_count_wksp() :
141922 - * Same as FSE_count(), but using an externally provided scratch buffer.
141923 - * `workSpace` size must be table of >= `1024` unsigned */
141924 -size_t FSE_count_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace)
141926 -       if (*maxSymbolValuePtr < 255)
141927 -               return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace);
141928 -       *maxSymbolValuePtr = 255;
141929 -       return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace);
141932 -/*-**************************************************************
141933 -*  FSE Compression Code
141934 -****************************************************************/
141935 -/*! FSE_sizeof_CTable() :
141936 -       FSE_CTable is a variable size structure which contains :
141937 -       `U16 tableLog;`
141938 -       `U16 maxSymbolValue;`
141939 -       `U16 nextStateNumber[1 << tableLog];`                         // This size is variable
141940 -       `FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];`  // This size is variable
141941 -Allocation is manual (C standard does not support variable-size structures).
141943 -size_t FSE_sizeof_CTable(unsigned maxSymbolValue, unsigned tableLog)
141945 -       if (tableLog > FSE_MAX_TABLELOG)
141946 -               return ERROR(tableLog_tooLarge);
141947 -       return FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue) * sizeof(U32);
141950 -/* provides the minimum logSize to safely represent a distribution */
141951 -static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
141953 -       U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1;
141954 -       U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
141955 -       U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
141956 -       return minBits;
141959 -unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
141961 -       U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
141962 -       U32 tableLog = maxTableLog;
141963 -       U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
141964 -       if (tableLog == 0)
141965 -               tableLog = FSE_DEFAULT_TABLELOG;
141966 -       if (maxBitsSrc < tableLog)
141967 -               tableLog = maxBitsSrc; /* Accuracy can be reduced */
141968 -       if (minBits > tableLog)
141969 -               tableLog = minBits; /* Need a minimum to safely represent all symbol values */
141970 -       if (tableLog < FSE_MIN_TABLELOG)
141971 -               tableLog = FSE_MIN_TABLELOG;
141972 -       if (tableLog > FSE_MAX_TABLELOG)
141973 -               tableLog = FSE_MAX_TABLELOG;
141974 -       return tableLog;
141977 -unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
141979 -       return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
141982 -/* Secondary normalization method.
141983 -   To be used when primary method fails. */
141985 -static size_t FSE_normalizeM2(short *norm, U32 tableLog, const unsigned *count, size_t total, U32 maxSymbolValue)
141987 -       short const NOT_YET_ASSIGNED = -2;
141988 -       U32 s;
141989 -       U32 distributed = 0;
141990 -       U32 ToDistribute;
141992 -       /* Init */
141993 -       U32 const lowThreshold = (U32)(total >> tableLog);
141994 -       U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
141996 -       for (s = 0; s <= maxSymbolValue; s++) {
141997 -               if (count[s] == 0) {
141998 -                       norm[s] = 0;
141999 -                       continue;
142000 -               }
142001 -               if (count[s] <= lowThreshold) {
142002 -                       norm[s] = -1;
142003 -                       distributed++;
142004 -                       total -= count[s];
142005 -                       continue;
142006 -               }
142007 -               if (count[s] <= lowOne) {
142008 -                       norm[s] = 1;
142009 -                       distributed++;
142010 -                       total -= count[s];
142011 -                       continue;
142012 -               }
142014 -               norm[s] = NOT_YET_ASSIGNED;
142015 -       }
142016 -       ToDistribute = (1 << tableLog) - distributed;
142018 -       if ((total / ToDistribute) > lowOne) {
142019 -               /* risk of rounding to zero */
142020 -               lowOne = (U32)((total * 3) / (ToDistribute * 2));
142021 -               for (s = 0; s <= maxSymbolValue; s++) {
142022 -                       if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
142023 -                               norm[s] = 1;
142024 -                               distributed++;
142025 -                               total -= count[s];
142026 -                               continue;
142027 -                       }
142028 -               }
142029 -               ToDistribute = (1 << tableLog) - distributed;
142030 -       }
142032 -       if (distributed == maxSymbolValue + 1) {
142033 -               /* all values are pretty poor;
142034 -                  probably incompressible data (should have already been detected);
142035 -                  find max, then give all remaining points to max */
142036 -               U32 maxV = 0, maxC = 0;
142037 -               for (s = 0; s <= maxSymbolValue; s++)
142038 -                       if (count[s] > maxC)
142039 -                               maxV = s, maxC = count[s];
142040 -               norm[maxV] += (short)ToDistribute;
142041 -               return 0;
142042 -       }
142044 -       if (total == 0) {
142045 -               /* all of the symbols were low enough for the lowOne or lowThreshold */
142046 -               for (s = 0; ToDistribute > 0; s = (s + 1) % (maxSymbolValue + 1))
142047 -                       if (norm[s] > 0)
142048 -                               ToDistribute--, norm[s]++;
142049 -               return 0;
142050 -       }
142052 -       {
142053 -               U64 const vStepLog = 62 - tableLog;
142054 -               U64 const mid = (1ULL << (vStepLog - 1)) - 1;
142055 -               U64 const rStep = div_u64((((U64)1 << vStepLog) * ToDistribute) + mid, (U32)total); /* scale on remaining */
142056 -               U64 tmpTotal = mid;
142057 -               for (s = 0; s <= maxSymbolValue; s++) {
142058 -                       if (norm[s] == NOT_YET_ASSIGNED) {
142059 -                               U64 const end = tmpTotal + (count[s] * rStep);
142060 -                               U32 const sStart = (U32)(tmpTotal >> vStepLog);
142061 -                               U32 const sEnd = (U32)(end >> vStepLog);
142062 -                               U32 const weight = sEnd - sStart;
142063 -                               if (weight < 1)
142064 -                                       return ERROR(GENERIC);
142065 -                               norm[s] = (short)weight;
142066 -                               tmpTotal = end;
142067 -                       }
142068 -               }
142069 -       }
142071 -       return 0;
142074 -size_t FSE_normalizeCount(short *normalizedCounter, unsigned tableLog, const unsigned *count, size_t total, unsigned maxSymbolValue)
142076 -       /* Sanity checks */
142077 -       if (tableLog == 0)
142078 -               tableLog = FSE_DEFAULT_TABLELOG;
142079 -       if (tableLog < FSE_MIN_TABLELOG)
142080 -               return ERROR(GENERIC); /* Unsupported size */
142081 -       if (tableLog > FSE_MAX_TABLELOG)
142082 -               return ERROR(tableLog_tooLarge); /* Unsupported size */
142083 -       if (tableLog < FSE_minTableLog(total, maxSymbolValue))
142084 -               return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
142086 -       {
142087 -               U32 const rtbTable[] = {0, 473195, 504333, 520860, 550000, 700000, 750000, 830000};
142088 -               U64 const scale = 62 - tableLog;
142089 -               U64 const step = div_u64((U64)1 << 62, (U32)total); /* <== here, one division ! */
142090 -               U64 const vStep = 1ULL << (scale - 20);
142091 -               int stillToDistribute = 1 << tableLog;
142092 -               unsigned s;
142093 -               unsigned largest = 0;
142094 -               short largestP = 0;
142095 -               U32 lowThreshold = (U32)(total >> tableLog);
142097 -               for (s = 0; s <= maxSymbolValue; s++) {
142098 -                       if (count[s] == total)
142099 -                               return 0; /* rle special case */
142100 -                       if (count[s] == 0) {
142101 -                               normalizedCounter[s] = 0;
142102 -                               continue;
142103 -                       }
142104 -                       if (count[s] <= lowThreshold) {
142105 -                               normalizedCounter[s] = -1;
142106 -                               stillToDistribute--;
142107 -                       } else {
142108 -                               short proba = (short)((count[s] * step) >> scale);
142109 -                               if (proba < 8) {
142110 -                                       U64 restToBeat = vStep * rtbTable[proba];
142111 -                                       proba += (count[s] * step) - ((U64)proba << scale) > restToBeat;
142112 -                               }
142113 -                               if (proba > largestP)
142114 -                                       largestP = proba, largest = s;
142115 -                               normalizedCounter[s] = proba;
142116 -                               stillToDistribute -= proba;
142117 -                       }
142118 -               }
142119 -               if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
142120 -                       /* corner case, need another normalization method */
142121 -                       size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue);
142122 -                       if (FSE_isError(errorCode))
142123 -                               return errorCode;
142124 -               } else
142125 -                       normalizedCounter[largest] += (short)stillToDistribute;
142126 -       }
142128 -       return tableLog;
142131 -/* fake FSE_CTable, for raw (uncompressed) input */
142132 -size_t FSE_buildCTable_raw(FSE_CTable *ct, unsigned nbBits)
142134 -       const unsigned tableSize = 1 << nbBits;
142135 -       const unsigned tableMask = tableSize - 1;
142136 -       const unsigned maxSymbolValue = tableMask;
142137 -       void *const ptr = ct;
142138 -       U16 *const tableU16 = ((U16 *)ptr) + 2;
142139 -       void *const FSCT = ((U32 *)ptr) + 1 /* header */ + (tableSize >> 1); /* assumption : tableLog >= 1 */
142140 -       FSE_symbolCompressionTransform *const symbolTT = (FSE_symbolCompressionTransform *)(FSCT);
142141 -       unsigned s;
142143 -       /* Sanity checks */
142144 -       if (nbBits < 1)
142145 -               return ERROR(GENERIC); /* min size */
142147 -       /* header */
142148 -       tableU16[-2] = (U16)nbBits;
142149 -       tableU16[-1] = (U16)maxSymbolValue;
142151 -       /* Build table */
142152 -       for (s = 0; s < tableSize; s++)
142153 -               tableU16[s] = (U16)(tableSize + s);
142155 -       /* Build Symbol Transformation Table */
142156 -       {
142157 -               const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
142158 -               for (s = 0; s <= maxSymbolValue; s++) {
142159 -                       symbolTT[s].deltaNbBits = deltaNbBits;
142160 -                       symbolTT[s].deltaFindState = s - 1;
142161 -               }
142162 -       }
142164 -       return 0;
142167 -/* fake FSE_CTable, for rle input (always same symbol) */
142168 -size_t FSE_buildCTable_rle(FSE_CTable *ct, BYTE symbolValue)
142170 -       void *ptr = ct;
142171 -       U16 *tableU16 = ((U16 *)ptr) + 2;
142172 -       void *FSCTptr = (U32 *)ptr + 2;
142173 -       FSE_symbolCompressionTransform *symbolTT = (FSE_symbolCompressionTransform *)FSCTptr;
142175 -       /* header */
142176 -       tableU16[-2] = (U16)0;
142177 -       tableU16[-1] = (U16)symbolValue;
142179 -       /* Build table */
142180 -       tableU16[0] = 0;
142181 -       tableU16[1] = 0; /* just in case */
142183 -       /* Build Symbol Transformation Table */
142184 -       symbolTT[symbolValue].deltaNbBits = 0;
142185 -       symbolTT[symbolValue].deltaFindState = 0;
142187 -       return 0;
142190 -static size_t FSE_compress_usingCTable_generic(void *dst, size_t dstSize, const void *src, size_t srcSize, const FSE_CTable *ct, const unsigned fast)
142192 -       const BYTE *const istart = (const BYTE *)src;
142193 -       const BYTE *const iend = istart + srcSize;
142194 -       const BYTE *ip = iend;
142196 -       BIT_CStream_t bitC;
142197 -       FSE_CState_t CState1, CState2;
142199 -       /* init */
142200 -       if (srcSize <= 2)
142201 -               return 0;
142202 -       {
142203 -               size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
142204 -               if (FSE_isError(initError))
142205 -                       return 0; /* not enough space available to write a bitstream */
142206 -       }
142208 -#define FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
142210 -       if (srcSize & 1) {
142211 -               FSE_initCState2(&CState1, ct, *--ip);
142212 -               FSE_initCState2(&CState2, ct, *--ip);
142213 -               FSE_encodeSymbol(&bitC, &CState1, *--ip);
142214 -               FSE_FLUSHBITS(&bitC);
142215 -       } else {
142216 -               FSE_initCState2(&CState2, ct, *--ip);
142217 -               FSE_initCState2(&CState1, ct, *--ip);
142218 -       }
142220 -       /* join to mod 4 */
142221 -       srcSize -= 2;
142222 -       if ((sizeof(bitC.bitContainer) * 8 > FSE_MAX_TABLELOG * 4 + 7) && (srcSize & 2)) { /* test bit 2 */
142223 -               FSE_encodeSymbol(&bitC, &CState2, *--ip);
142224 -               FSE_encodeSymbol(&bitC, &CState1, *--ip);
142225 -               FSE_FLUSHBITS(&bitC);
142226 -       }
142228 -       /* 2 or 4 encoding per loop */
142229 -       while (ip > istart) {
142231 -               FSE_encodeSymbol(&bitC, &CState2, *--ip);
142233 -               if (sizeof(bitC.bitContainer) * 8 < FSE_MAX_TABLELOG * 2 + 7) /* this test must be static */
142234 -                       FSE_FLUSHBITS(&bitC);
142236 -               FSE_encodeSymbol(&bitC, &CState1, *--ip);
142238 -               if (sizeof(bitC.bitContainer) * 8 > FSE_MAX_TABLELOG * 4 + 7) { /* this test must be static */
142239 -                       FSE_encodeSymbol(&bitC, &CState2, *--ip);
142240 -                       FSE_encodeSymbol(&bitC, &CState1, *--ip);
142241 -               }
142243 -               FSE_FLUSHBITS(&bitC);
142244 -       }
142246 -       FSE_flushCState(&bitC, &CState2);
142247 -       FSE_flushCState(&bitC, &CState1);
142248 -       return BIT_closeCStream(&bitC);
142251 -size_t FSE_compress_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const FSE_CTable *ct)
142253 -       unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
142255 -       if (fast)
142256 -               return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
142257 -       else
142258 -               return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
142261 -size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
142262 diff --git a/lib/zstd/fse_decompress.c b/lib/zstd/fse_decompress.c
142263 deleted file mode 100644
142264 index 0b353530fb3f..000000000000
142265 --- a/lib/zstd/fse_decompress.c
142266 +++ /dev/null
142267 @@ -1,325 +0,0 @@
142269 - * FSE : Finite State Entropy decoder
142270 - * Copyright (C) 2013-2015, Yann Collet.
142272 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
142274 - * Redistribution and use in source and binary forms, with or without
142275 - * modification, are permitted provided that the following conditions are
142276 - * met:
142278 - *   * Redistributions of source code must retain the above copyright
142279 - * notice, this list of conditions and the following disclaimer.
142280 - *   * Redistributions in binary form must reproduce the above
142281 - * copyright notice, this list of conditions and the following disclaimer
142282 - * in the documentation and/or other materials provided with the
142283 - * distribution.
142285 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
142286 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
142287 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
142288 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
142289 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
142290 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
142291 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
142292 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
142293 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
142294 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
142295 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
142297 - * This program is free software; you can redistribute it and/or modify it under
142298 - * the terms of the GNU General Public License version 2 as published by the
142299 - * Free Software Foundation. This program is dual-licensed; you may select
142300 - * either version 2 of the GNU General Public License ("GPL") or BSD license
142301 - * ("BSD").
142303 - * You can contact the author at :
142304 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
142305 - */
142307 -/* **************************************************************
142308 -*  Compiler specifics
142309 -****************************************************************/
142310 -#define FORCE_INLINE static __always_inline
142312 -/* **************************************************************
142313 -*  Includes
142314 -****************************************************************/
142315 -#include "bitstream.h"
142316 -#include "fse.h"
142317 -#include "zstd_internal.h"
142318 -#include <linux/compiler.h>
142319 -#include <linux/kernel.h>
142320 -#include <linux/string.h> /* memcpy, memset */
142322 -/* **************************************************************
142323 -*  Error Management
142324 -****************************************************************/
142325 -#define FSE_isError ERR_isError
142326 -#define FSE_STATIC_ASSERT(c)                                   \
142327 -       {                                                      \
142328 -               enum { FSE_static_assert = 1 / (int)(!!(c)) }; \
142329 -       } /* use only *after* variable declarations */
142331 -/* **************************************************************
142332 -*  Templates
142333 -****************************************************************/
142335 -  designed to be included
142336 -  for type-specific functions (template emulation in C)
142337 -  Objective is to write these functions only once, for improved maintenance
142340 -/* safety checks */
142341 -#ifndef FSE_FUNCTION_EXTENSION
142342 -#error "FSE_FUNCTION_EXTENSION must be defined"
142343 -#endif
142344 -#ifndef FSE_FUNCTION_TYPE
142345 -#error "FSE_FUNCTION_TYPE must be defined"
142346 -#endif
142348 -/* Function names */
142349 -#define FSE_CAT(X, Y) X##Y
142350 -#define FSE_FUNCTION_NAME(X, Y) FSE_CAT(X, Y)
142351 -#define FSE_TYPE_NAME(X, Y) FSE_CAT(X, Y)
142353 -/* Function templates */
142355 -size_t FSE_buildDTable_wksp(FSE_DTable *dt, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize)
142357 -       void *const tdPtr = dt + 1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
142358 -       FSE_DECODE_TYPE *const tableDecode = (FSE_DECODE_TYPE *)(tdPtr);
142359 -       U16 *symbolNext = (U16 *)workspace;
142361 -       U32 const maxSV1 = maxSymbolValue + 1;
142362 -       U32 const tableSize = 1 << tableLog;
142363 -       U32 highThreshold = tableSize - 1;
142365 -       /* Sanity Checks */
142366 -       if (workspaceSize < sizeof(U16) * (FSE_MAX_SYMBOL_VALUE + 1))
142367 -               return ERROR(tableLog_tooLarge);
142368 -       if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE)
142369 -               return ERROR(maxSymbolValue_tooLarge);
142370 -       if (tableLog > FSE_MAX_TABLELOG)
142371 -               return ERROR(tableLog_tooLarge);
142373 -       /* Init, lay down lowprob symbols */
142374 -       {
142375 -               FSE_DTableHeader DTableH;
142376 -               DTableH.tableLog = (U16)tableLog;
142377 -               DTableH.fastMode = 1;
142378 -               {
142379 -                       S16 const largeLimit = (S16)(1 << (tableLog - 1));
142380 -                       U32 s;
142381 -                       for (s = 0; s < maxSV1; s++) {
142382 -                               if (normalizedCounter[s] == -1) {
142383 -                                       tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
142384 -                                       symbolNext[s] = 1;
142385 -                               } else {
142386 -                                       if (normalizedCounter[s] >= largeLimit)
142387 -                                               DTableH.fastMode = 0;
142388 -                                       symbolNext[s] = normalizedCounter[s];
142389 -                               }
142390 -                       }
142391 -               }
142392 -               memcpy(dt, &DTableH, sizeof(DTableH));
142393 -       }
142395 -       /* Spread symbols */
142396 -       {
142397 -               U32 const tableMask = tableSize - 1;
142398 -               U32 const step = FSE_TABLESTEP(tableSize);
142399 -               U32 s, position = 0;
142400 -               for (s = 0; s < maxSV1; s++) {
142401 -                       int i;
142402 -                       for (i = 0; i < normalizedCounter[s]; i++) {
142403 -                               tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
142404 -                               position = (position + step) & tableMask;
142405 -                               while (position > highThreshold)
142406 -                                       position = (position + step) & tableMask; /* lowprob area */
142407 -                       }
142408 -               }
142409 -               if (position != 0)
142410 -                       return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
142411 -       }
142413 -       /* Build Decoding table */
142414 -       {
142415 -               U32 u;
142416 -               for (u = 0; u < tableSize; u++) {
142417 -                       FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
142418 -                       U16 nextState = symbolNext[symbol]++;
142419 -                       tableDecode[u].nbBits = (BYTE)(tableLog - BIT_highbit32((U32)nextState));
142420 -                       tableDecode[u].newState = (U16)((nextState << tableDecode[u].nbBits) - tableSize);
142421 -               }
142422 -       }
142424 -       return 0;
142427 -/*-*******************************************************
142428 -*  Decompression (Byte symbols)
142429 -*********************************************************/
142430 -size_t FSE_buildDTable_rle(FSE_DTable *dt, BYTE symbolValue)
142432 -       void *ptr = dt;
142433 -       FSE_DTableHeader *const DTableH = (FSE_DTableHeader *)ptr;
142434 -       void *dPtr = dt + 1;
142435 -       FSE_decode_t *const cell = (FSE_decode_t *)dPtr;
142437 -       DTableH->tableLog = 0;
142438 -       DTableH->fastMode = 0;
142440 -       cell->newState = 0;
142441 -       cell->symbol = symbolValue;
142442 -       cell->nbBits = 0;
142444 -       return 0;
142447 -size_t FSE_buildDTable_raw(FSE_DTable *dt, unsigned nbBits)
142449 -       void *ptr = dt;
142450 -       FSE_DTableHeader *const DTableH = (FSE_DTableHeader *)ptr;
142451 -       void *dPtr = dt + 1;
142452 -       FSE_decode_t *const dinfo = (FSE_decode_t *)dPtr;
142453 -       const unsigned tableSize = 1 << nbBits;
142454 -       const unsigned tableMask = tableSize - 1;
142455 -       const unsigned maxSV1 = tableMask + 1;
142456 -       unsigned s;
142458 -       /* Sanity checks */
142459 -       if (nbBits < 1)
142460 -               return ERROR(GENERIC); /* min size */
142462 -       /* Build Decoding Table */
142463 -       DTableH->tableLog = (U16)nbBits;
142464 -       DTableH->fastMode = 1;
142465 -       for (s = 0; s < maxSV1; s++) {
142466 -               dinfo[s].newState = 0;
142467 -               dinfo[s].symbol = (BYTE)s;
142468 -               dinfo[s].nbBits = (BYTE)nbBits;
142469 -       }
142471 -       return 0;
142474 -FORCE_INLINE size_t FSE_decompress_usingDTable_generic(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt,
142475 -                                                      const unsigned fast)
142477 -       BYTE *const ostart = (BYTE *)dst;
142478 -       BYTE *op = ostart;
142479 -       BYTE *const omax = op + maxDstSize;
142480 -       BYTE *const olimit = omax - 3;
142482 -       BIT_DStream_t bitD;
142483 -       FSE_DState_t state1;
142484 -       FSE_DState_t state2;
142486 -       /* Init */
142487 -       CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
142489 -       FSE_initDState(&state1, &bitD, dt);
142490 -       FSE_initDState(&state2, &bitD, dt);
142492 -#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
142494 -       /* 4 symbols per loop */
142495 -       for (; (BIT_reloadDStream(&bitD) == BIT_DStream_unfinished) & (op < olimit); op += 4) {
142496 -               op[0] = FSE_GETSYMBOL(&state1);
142498 -               if (FSE_MAX_TABLELOG * 2 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */
142499 -                       BIT_reloadDStream(&bitD);
142501 -               op[1] = FSE_GETSYMBOL(&state2);
142503 -               if (FSE_MAX_TABLELOG * 4 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */
142504 -               {
142505 -                       if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) {
142506 -                               op += 2;
142507 -                               break;
142508 -                       }
142509 -               }
142511 -               op[2] = FSE_GETSYMBOL(&state1);
142513 -               if (FSE_MAX_TABLELOG * 2 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */
142514 -                       BIT_reloadDStream(&bitD);
142516 -               op[3] = FSE_GETSYMBOL(&state2);
142517 -       }
142519 -       /* tail */
142520 -       /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
142521 -       while (1) {
142522 -               if (op > (omax - 2))
142523 -                       return ERROR(dstSize_tooSmall);
142524 -               *op++ = FSE_GETSYMBOL(&state1);
142525 -               if (BIT_reloadDStream(&bitD) == BIT_DStream_overflow) {
142526 -                       *op++ = FSE_GETSYMBOL(&state2);
142527 -                       break;
142528 -               }
142530 -               if (op > (omax - 2))
142531 -                       return ERROR(dstSize_tooSmall);
142532 -               *op++ = FSE_GETSYMBOL(&state2);
142533 -               if (BIT_reloadDStream(&bitD) == BIT_DStream_overflow) {
142534 -                       *op++ = FSE_GETSYMBOL(&state1);
142535 -                       break;
142536 -               }
142537 -       }
142539 -       return op - ostart;
142542 -size_t FSE_decompress_usingDTable(void *dst, size_t originalSize, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt)
142544 -       const void *ptr = dt;
142545 -       const FSE_DTableHeader *DTableH = (const FSE_DTableHeader *)ptr;
142546 -       const U32 fastMode = DTableH->fastMode;
142548 -       /* select fast mode (static) */
142549 -       if (fastMode)
142550 -               return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
142551 -       return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
142554 -size_t FSE_decompress_wksp(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, unsigned maxLog, void *workspace, size_t workspaceSize)
142556 -       const BYTE *const istart = (const BYTE *)cSrc;
142557 -       const BYTE *ip = istart;
142558 -       unsigned tableLog;
142559 -       unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
142560 -       size_t NCountLength;
142562 -       FSE_DTable *dt;
142563 -       short *counting;
142564 -       size_t spaceUsed32 = 0;
142566 -       FSE_STATIC_ASSERT(sizeof(FSE_DTable) == sizeof(U32));
142568 -       dt = (FSE_DTable *)((U32 *)workspace + spaceUsed32);
142569 -       spaceUsed32 += FSE_DTABLE_SIZE_U32(maxLog);
142570 -       counting = (short *)((U32 *)workspace + spaceUsed32);
142571 -       spaceUsed32 += ALIGN(sizeof(short) * (FSE_MAX_SYMBOL_VALUE + 1), sizeof(U32)) >> 2;
142573 -       if ((spaceUsed32 << 2) > workspaceSize)
142574 -               return ERROR(tableLog_tooLarge);
142575 -       workspace = (U32 *)workspace + spaceUsed32;
142576 -       workspaceSize -= (spaceUsed32 << 2);
142578 -       /* normal FSE decoding mode */
142579 -       NCountLength = FSE_readNCount(counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
142580 -       if (FSE_isError(NCountLength))
142581 -               return NCountLength;
142582 -       // if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size; supposed to be already checked in NCountLength, only remaining
142583 -       // case : NCountLength==cSrcSize */
142584 -       if (tableLog > maxLog)
142585 -               return ERROR(tableLog_tooLarge);
142586 -       ip += NCountLength;
142587 -       cSrcSize -= NCountLength;
142589 -       CHECK_F(FSE_buildDTable_wksp(dt, counting, maxSymbolValue, tableLog, workspace, workspaceSize));
142591 -       return FSE_decompress_usingDTable(dst, dstCapacity, ip, cSrcSize, dt); /* always return, even if it is an error code */
142593 diff --git a/lib/zstd/huf.h b/lib/zstd/huf.h
142594 deleted file mode 100644
142595 index 2143da28d952..000000000000
142596 --- a/lib/zstd/huf.h
142597 +++ /dev/null
142598 @@ -1,212 +0,0 @@
142600 - * Huffman coder, part of New Generation Entropy library
142601 - * header file
142602 - * Copyright (C) 2013-2016, Yann Collet.
142604 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
142606 - * Redistribution and use in source and binary forms, with or without
142607 - * modification, are permitted provided that the following conditions are
142608 - * met:
142610 - *   * Redistributions of source code must retain the above copyright
142611 - * notice, this list of conditions and the following disclaimer.
142612 - *   * Redistributions in binary form must reproduce the above
142613 - * copyright notice, this list of conditions and the following disclaimer
142614 - * in the documentation and/or other materials provided with the
142615 - * distribution.
142617 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
142618 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
142619 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
142620 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
142621 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
142622 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
142623 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
142624 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
142625 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
142626 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
142627 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
142629 - * This program is free software; you can redistribute it and/or modify it under
142630 - * the terms of the GNU General Public License version 2 as published by the
142631 - * Free Software Foundation. This program is dual-licensed; you may select
142632 - * either version 2 of the GNU General Public License ("GPL") or BSD license
142633 - * ("BSD").
142635 - * You can contact the author at :
142636 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
142637 - */
142638 -#ifndef HUF_H_298734234
142639 -#define HUF_H_298734234
142641 -/* *** Dependencies *** */
142642 -#include <linux/types.h> /* size_t */
142644 -/* ***   Tool functions *** */
142645 -#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */
142646 -size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */
142648 -/* Error Management */
142649 -unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */
142651 -/* ***   Advanced function   *** */
142653 -/** HUF_compress4X_wksp() :
142654 -*   Same as HUF_compress2(), but uses externally allocated `workSpace`, which must be a table of >= 1024 unsigned */
142655 -size_t HUF_compress4X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
142656 -                          size_t wkspSize); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
142658 -/* *** Dependencies *** */
142659 -#include "mem.h" /* U32 */
142661 -/* *** Constants *** */
142662 -#define HUF_TABLELOG_MAX 12     /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
142663 -#define HUF_TABLELOG_DEFAULT 11 /* tableLog by default, when not specified */
142664 -#define HUF_SYMBOLVALUE_MAX 255
142666 -#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
142667 -#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
142668 -#error "HUF_TABLELOG_MAX is too large !"
142669 -#endif
142671 -/* ****************************************
142672 -*  Static allocation
142673 -******************************************/
142674 -/* HUF buffer bounds */
142675 -#define HUF_CTABLEBOUND 129
142676 -#define HUF_BLOCKBOUND(size) (size + (size >> 8) + 8)                   /* only true if incompressible pre-filtered with fast heuristic */
142677 -#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
142679 -/* static allocation of HUF's Compression Table */
142680 -#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
142681 -       U32 name##hb[maxSymbolValue + 1];              \
142682 -       void *name##hv = &(name##hb);                  \
142683 -       HUF_CElt *name = (HUF_CElt *)(name##hv) /* no final ; */
142685 -/* static allocation of HUF's DTable */
142686 -typedef U32 HUF_DTable;
142687 -#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1 << (maxTableLog)))
142688 -#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = {((U32)((maxTableLog)-1) * 0x01000001)}
142689 -#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = {((U32)(maxTableLog)*0x01000001)}
142691 -/* The workspace must have alignment at least 4 and be at least this large */
142692 -#define HUF_COMPRESS_WORKSPACE_SIZE (6 << 10)
142693 -#define HUF_COMPRESS_WORKSPACE_SIZE_U32 (HUF_COMPRESS_WORKSPACE_SIZE / sizeof(U32))
142695 -/* The workspace must have alignment at least 4 and be at least this large */
142696 -#define HUF_DECOMPRESS_WORKSPACE_SIZE (3 << 10)
142697 -#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
142699 -/* ****************************************
142700 -*  Advanced decompression functions
142701 -******************************************/
142702 -size_t HUF_decompress4X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize); /**< decodes RLE and uncompressed */
142703 -size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
142704 -                               size_t workspaceSize);                                                         /**< considers RLE and uncompressed as errors */
142705 -size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
142706 -                                  size_t workspaceSize); /**< single-symbol decoder */
142707 -size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
142708 -                                  size_t workspaceSize); /**< double-symbols decoder */
142710 -/* ****************************************
142711 -*  HUF detailed API
142712 -******************************************/
142714 -HUF_compress() does the following:
142715 -1. count symbol occurrence from source[] into table count[] using FSE_count()
142716 -2. (optional) refine tableLog using HUF_optimalTableLog()
142717 -3. build Huffman table from count using HUF_buildCTable()
142718 -4. save Huffman table to memory buffer using HUF_writeCTable_wksp()
142719 -5. encode the data stream using HUF_compress4X_usingCTable()
142721 -The following API allows targeting specific sub-functions for advanced tasks.
142722 -For example, it's possible to compress several blocks using the same 'CTable',
142723 -or to save and regenerate 'CTable' using external methods.
142725 -/* FSE_count() : find it within "fse.h" */
142726 -unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
142727 -typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */
142728 -size_t HUF_writeCTable_wksp(void *dst, size_t maxDstSize, const HUF_CElt *CTable, unsigned maxSymbolValue, unsigned huffLog, void *workspace, size_t workspaceSize);
142729 -size_t HUF_compress4X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable);
142731 -typedef enum {
142732 -       HUF_repeat_none,  /**< Cannot use the previous table */
142733 -       HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1,
142734 -                            4}X_repeat */
142735 -       HUF_repeat_valid  /**< Can use the previous table and it is asumed to be valid */
142736 -} HUF_repeat;
142737 -/** HUF_compress4X_repeat() :
142738 -*   Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
142739 -*   If it uses hufTable it does not modify hufTable or repeat.
142740 -*   If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
142741 -*   If preferRepeat then the old table will always be used if valid. */
142742 -size_t HUF_compress4X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
142743 -                            size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat,
142744 -                            int preferRepeat); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
142746 -/** HUF_buildCTable_wksp() :
142747 - *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
142748 - *  `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
142749 - */
142750 -size_t HUF_buildCTable_wksp(HUF_CElt *tree, const U32 *count, U32 maxSymbolValue, U32 maxNbBits, void *workSpace, size_t wkspSize);
142752 -/*! HUF_readStats() :
142753 -       Read compact Huffman tree, saved by HUF_writeCTable().
142754 -       `huffWeight` is destination buffer.
142755 -       @return : size read from `src` , or an error Code .
142756 -       Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
142757 -size_t HUF_readStats_wksp(BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize,
142758 -                         void *workspace, size_t workspaceSize);
142760 -/** HUF_readCTable() :
142761 -*   Loading a CTable saved with HUF_writeCTable() */
142762 -size_t HUF_readCTable_wksp(HUF_CElt *CTable, unsigned maxSymbolValue, const void *src, size_t srcSize, void *workspace, size_t workspaceSize);
142765 -HUF_decompress() does the following:
142766 -1. select the decompression algorithm (X2, X4) based on pre-computed heuristics
142767 -2. build Huffman table from save, using HUF_readDTableXn()
142768 -3. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable
142771 -/** HUF_selectDecoder() :
142772 -*   Tells which decoder is likely to decode faster,
142773 -*   based on a set of pre-determined metrics.
142774 -*   @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
142775 -*   Assumption : 0 < cSrcSize < dstSize <= 128 KB */
142776 -U32 HUF_selectDecoder(size_t dstSize, size_t cSrcSize);
142778 -size_t HUF_readDTableX2_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize);
142779 -size_t HUF_readDTableX4_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize);
142781 -size_t HUF_decompress4X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
142782 -size_t HUF_decompress4X2_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
142783 -size_t HUF_decompress4X4_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
142785 -/* single stream variants */
142787 -size_t HUF_compress1X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
142788 -                          size_t wkspSize); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
142789 -size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable);
142790 -/** HUF_compress1X_repeat() :
142791 -*   Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
142792 -*   If it uses hufTable it does not modify hufTable or repeat.
142793 -*   If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
142794 -*   If preferRepeat then the old table will always be used if valid. */
142795 -size_t HUF_compress1X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
142796 -                            size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat,
142797 -                            int preferRepeat); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
142799 -size_t HUF_decompress1X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize);
142800 -size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
142801 -                                  size_t workspaceSize); /**< single-symbol decoder */
142802 -size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
142803 -                                  size_t workspaceSize); /**< double-symbols decoder */
142805 -size_t HUF_decompress1X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize,
142806 -                                   const HUF_DTable *DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */
142807 -size_t HUF_decompress1X2_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
142808 -size_t HUF_decompress1X4_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
142810 -#endif /* HUF_H_298734234 */
142811 diff --git a/lib/zstd/huf_compress.c b/lib/zstd/huf_compress.c
142812 deleted file mode 100644
142813 index fd32838c185f..000000000000
142814 --- a/lib/zstd/huf_compress.c
142815 +++ /dev/null
142816 @@ -1,773 +0,0 @@
142818 - * Huffman encoder, part of New Generation Entropy library
142819 - * Copyright (C) 2013-2016, Yann Collet.
142821 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
142823 - * Redistribution and use in source and binary forms, with or without
142824 - * modification, are permitted provided that the following conditions are
142825 - * met:
142827 - *   * Redistributions of source code must retain the above copyright
142828 - * notice, this list of conditions and the following disclaimer.
142829 - *   * Redistributions in binary form must reproduce the above
142830 - * copyright notice, this list of conditions and the following disclaimer
142831 - * in the documentation and/or other materials provided with the
142832 - * distribution.
142834 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
142835 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
142836 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
142837 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
142838 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
142839 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
142840 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
142841 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
142842 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
142843 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
142844 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
142846 - * This program is free software; you can redistribute it and/or modify it under
142847 - * the terms of the GNU General Public License version 2 as published by the
142848 - * Free Software Foundation. This program is dual-licensed; you may select
142849 - * either version 2 of the GNU General Public License ("GPL") or BSD license
142850 - * ("BSD").
142852 - * You can contact the author at :
142853 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
142854 - */
142856 -/* **************************************************************
142857 -*  Includes
142858 -****************************************************************/
142859 -#include "bitstream.h"
142860 -#include "fse.h" /* header compression */
142861 -#include "huf.h"
142862 -#include <linux/kernel.h>
142863 -#include <linux/string.h> /* memcpy, memset */
142865 -/* **************************************************************
142866 -*  Error Management
142867 -****************************************************************/
142868 -#define HUF_STATIC_ASSERT(c)                                   \
142869 -       {                                                      \
142870 -               enum { HUF_static_assert = 1 / (int)(!!(c)) }; \
142871 -       } /* use only *after* variable declarations */
142872 -#define CHECK_V_F(e, f)     \
142873 -       size_t const e = f; \
142874 -       if (ERR_isError(e)) \
142875 -       return f
142876 -#define CHECK_F(f)                        \
142877 -       {                                 \
142878 -               CHECK_V_F(_var_err__, f); \
142879 -       }
142881 -/* **************************************************************
142882 -*  Utils
142883 -****************************************************************/
142884 -unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
142886 -       return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
142889 -/* *******************************************************
142890 -*  HUF : Huffman block compression
142891 -*********************************************************/
142892 -/* HUF_compressWeights() :
142893 - * Same as FSE_compress(), but dedicated to huff0's weights compression.
142894 - * The use case needs much less stack memory.
142895 - * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
142896 - */
142897 -#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
142898 -size_t HUF_compressWeights_wksp(void *dst, size_t dstSize, const void *weightTable, size_t wtSize, void *workspace, size_t workspaceSize)
142900 -       BYTE *const ostart = (BYTE *)dst;
142901 -       BYTE *op = ostart;
142902 -       BYTE *const oend = ostart + dstSize;
142904 -       U32 maxSymbolValue = HUF_TABLELOG_MAX;
142905 -       U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
142907 -       FSE_CTable *CTable;
142908 -       U32 *count;
142909 -       S16 *norm;
142910 -       size_t spaceUsed32 = 0;
142912 -       HUF_STATIC_ASSERT(sizeof(FSE_CTable) == sizeof(U32));
142914 -       CTable = (FSE_CTable *)((U32 *)workspace + spaceUsed32);
142915 -       spaceUsed32 += FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX);
142916 -       count = (U32 *)workspace + spaceUsed32;
142917 -       spaceUsed32 += HUF_TABLELOG_MAX + 1;
142918 -       norm = (S16 *)((U32 *)workspace + spaceUsed32);
142919 -       spaceUsed32 += ALIGN(sizeof(S16) * (HUF_TABLELOG_MAX + 1), sizeof(U32)) >> 2;
142921 -       if ((spaceUsed32 << 2) > workspaceSize)
142922 -               return ERROR(tableLog_tooLarge);
142923 -       workspace = (U32 *)workspace + spaceUsed32;
142924 -       workspaceSize -= (spaceUsed32 << 2);
142926 -       /* init conditions */
142927 -       if (wtSize <= 1)
142928 -               return 0; /* Not compressible */
142930 -       /* Scan input and build symbol stats */
142931 -       {
142932 -               CHECK_V_F(maxCount, FSE_count_simple(count, &maxSymbolValue, weightTable, wtSize));
142933 -               if (maxCount == wtSize)
142934 -                       return 1; /* only a single symbol in src : rle */
142935 -               if (maxCount == 1)
142936 -                       return 0; /* each symbol present maximum once => not compressible */
142937 -       }
142939 -       tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
142940 -       CHECK_F(FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue));
142942 -       /* Write table description header */
142943 -       {
142944 -               CHECK_V_F(hSize, FSE_writeNCount(op, oend - op, norm, maxSymbolValue, tableLog));
142945 -               op += hSize;
142946 -       }
142948 -       /* Compress */
142949 -       CHECK_F(FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, workspace, workspaceSize));
142950 -       {
142951 -               CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable));
142952 -               if (cSize == 0)
142953 -                       return 0; /* not enough space for compressed data */
142954 -               op += cSize;
142955 -       }
142957 -       return op - ostart;
142960 -struct HUF_CElt_s {
142961 -       U16 val;
142962 -       BYTE nbBits;
142963 -}; /* typedef'd to HUF_CElt within "huf.h" */
142965 -/*! HUF_writeCTable_wksp() :
142966 -       `CTable` : Huffman tree to save, using huf representation.
142967 -       @return : size of saved CTable */
142968 -size_t HUF_writeCTable_wksp(void *dst, size_t maxDstSize, const HUF_CElt *CTable, U32 maxSymbolValue, U32 huffLog, void *workspace, size_t workspaceSize)
142970 -       BYTE *op = (BYTE *)dst;
142971 -       U32 n;
142973 -       BYTE *bitsToWeight;
142974 -       BYTE *huffWeight;
142975 -       size_t spaceUsed32 = 0;
142977 -       bitsToWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
142978 -       spaceUsed32 += ALIGN(HUF_TABLELOG_MAX + 1, sizeof(U32)) >> 2;
142979 -       huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
142980 -       spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX, sizeof(U32)) >> 2;
142982 -       if ((spaceUsed32 << 2) > workspaceSize)
142983 -               return ERROR(tableLog_tooLarge);
142984 -       workspace = (U32 *)workspace + spaceUsed32;
142985 -       workspaceSize -= (spaceUsed32 << 2);
142987 -       /* check conditions */
142988 -       if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
142989 -               return ERROR(maxSymbolValue_tooLarge);
142991 -       /* convert to weight */
142992 -       bitsToWeight[0] = 0;
142993 -       for (n = 1; n < huffLog + 1; n++)
142994 -               bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
142995 -       for (n = 0; n < maxSymbolValue; n++)
142996 -               huffWeight[n] = bitsToWeight[CTable[n].nbBits];
142998 -       /* attempt weights compression by FSE */
142999 -       {
143000 -               CHECK_V_F(hSize, HUF_compressWeights_wksp(op + 1, maxDstSize - 1, huffWeight, maxSymbolValue, workspace, workspaceSize));
143001 -               if ((hSize > 1) & (hSize < maxSymbolValue / 2)) { /* FSE compressed */
143002 -                       op[0] = (BYTE)hSize;
143003 -                       return hSize + 1;
143004 -               }
143005 -       }
143007 -       /* write raw values as 4-bits (max : 15) */
143008 -       if (maxSymbolValue > (256 - 128))
143009 -               return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */
143010 -       if (((maxSymbolValue + 1) / 2) + 1 > maxDstSize)
143011 -               return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
143012 -       op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue - 1));
143013 -       huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
143014 -       for (n = 0; n < maxSymbolValue; n += 2)
143015 -               op[(n / 2) + 1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n + 1]);
143016 -       return ((maxSymbolValue + 1) / 2) + 1;
143019 -size_t HUF_readCTable_wksp(HUF_CElt *CTable, U32 maxSymbolValue, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
143021 -       U32 *rankVal;
143022 -       BYTE *huffWeight;
143023 -       U32 tableLog = 0;
143024 -       U32 nbSymbols = 0;
143025 -       size_t readSize;
143026 -       size_t spaceUsed32 = 0;
143028 -       rankVal = (U32 *)workspace + spaceUsed32;
143029 -       spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1;
143030 -       huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
143031 -       spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
143033 -       if ((spaceUsed32 << 2) > workspaceSize)
143034 -               return ERROR(tableLog_tooLarge);
143035 -       workspace = (U32 *)workspace + spaceUsed32;
143036 -       workspaceSize -= (spaceUsed32 << 2);
143038 -       /* get symbol weights */
143039 -       readSize = HUF_readStats_wksp(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize);
143040 -       if (ERR_isError(readSize))
143041 -               return readSize;
143043 -       /* check result */
143044 -       if (tableLog > HUF_TABLELOG_MAX)
143045 -               return ERROR(tableLog_tooLarge);
143046 -       if (nbSymbols > maxSymbolValue + 1)
143047 -               return ERROR(maxSymbolValue_tooSmall);
143049 -       /* Prepare base value per rank */
143050 -       {
143051 -               U32 n, nextRankStart = 0;
143052 -               for (n = 1; n <= tableLog; n++) {
143053 -                       U32 curr = nextRankStart;
143054 -                       nextRankStart += (rankVal[n] << (n - 1));
143055 -                       rankVal[n] = curr;
143056 -               }
143057 -       }
143059 -       /* fill nbBits */
143060 -       {
143061 -               U32 n;
143062 -               for (n = 0; n < nbSymbols; n++) {
143063 -                       const U32 w = huffWeight[n];
143064 -                       CTable[n].nbBits = (BYTE)(tableLog + 1 - w);
143065 -               }
143066 -       }
143068 -       /* fill val */
143069 -       {
143070 -               U16 nbPerRank[HUF_TABLELOG_MAX + 2] = {0}; /* support w=0=>n=tableLog+1 */
143071 -               U16 valPerRank[HUF_TABLELOG_MAX + 2] = {0};
143072 -               {
143073 -                       U32 n;
143074 -                       for (n = 0; n < nbSymbols; n++)
143075 -                               nbPerRank[CTable[n].nbBits]++;
143076 -               }
143077 -               /* determine stating value per rank */
143078 -               valPerRank[tableLog + 1] = 0; /* for w==0 */
143079 -               {
143080 -                       U16 min = 0;
143081 -                       U32 n;
143082 -                       for (n = tableLog; n > 0; n--) { /* start at n=tablelog <-> w=1 */
143083 -                               valPerRank[n] = min;     /* get starting value within each rank */
143084 -                               min += nbPerRank[n];
143085 -                               min >>= 1;
143086 -                       }
143087 -               }
143088 -               /* assign value within rank, symbol order */
143089 -               {
143090 -                       U32 n;
143091 -                       for (n = 0; n <= maxSymbolValue; n++)
143092 -                               CTable[n].val = valPerRank[CTable[n].nbBits]++;
143093 -               }
143094 -       }
143096 -       return readSize;
143099 -typedef struct nodeElt_s {
143100 -       U32 count;
143101 -       U16 parent;
143102 -       BYTE byte;
143103 -       BYTE nbBits;
143104 -} nodeElt;
143106 -static U32 HUF_setMaxHeight(nodeElt *huffNode, U32 lastNonNull, U32 maxNbBits)
143108 -       const U32 largestBits = huffNode[lastNonNull].nbBits;
143109 -       if (largestBits <= maxNbBits)
143110 -               return largestBits; /* early exit : no elt > maxNbBits */
143112 -       /* there are several too large elements (at least >= 2) */
143113 -       {
143114 -               int totalCost = 0;
143115 -               const U32 baseCost = 1 << (largestBits - maxNbBits);
143116 -               U32 n = lastNonNull;
143118 -               while (huffNode[n].nbBits > maxNbBits) {
143119 -                       totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
143120 -                       huffNode[n].nbBits = (BYTE)maxNbBits;
143121 -                       n--;
143122 -               } /* n stops at huffNode[n].nbBits <= maxNbBits */
143123 -               while (huffNode[n].nbBits == maxNbBits)
143124 -                       n--; /* n end at index of smallest symbol using < maxNbBits */
143126 -               /* renorm totalCost */
143127 -               totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */
143129 -               /* repay normalized cost */
143130 -               {
143131 -                       U32 const noSymbol = 0xF0F0F0F0;
143132 -                       U32 rankLast[HUF_TABLELOG_MAX + 2];
143133 -                       int pos;
143135 -                       /* Get pos of last (smallest) symbol per rank */
143136 -                       memset(rankLast, 0xF0, sizeof(rankLast));
143137 -                       {
143138 -                               U32 currNbBits = maxNbBits;
143139 -                               for (pos = n; pos >= 0; pos--) {
143140 -                                       if (huffNode[pos].nbBits >= currNbBits)
143141 -                                               continue;
143142 -                                       currNbBits = huffNode[pos].nbBits; /* < maxNbBits */
143143 -                                       rankLast[maxNbBits - currNbBits] = pos;
143144 -                               }
143145 -                       }
143147 -                       while (totalCost > 0) {
143148 -                               U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1;
143149 -                               for (; nBitsToDecrease > 1; nBitsToDecrease--) {
143150 -                                       U32 highPos = rankLast[nBitsToDecrease];
143151 -                                       U32 lowPos = rankLast[nBitsToDecrease - 1];
143152 -                                       if (highPos == noSymbol)
143153 -                                               continue;
143154 -                                       if (lowPos == noSymbol)
143155 -                                               break;
143156 -                                       {
143157 -                                               U32 const highTotal = huffNode[highPos].count;
143158 -                                               U32 const lowTotal = 2 * huffNode[lowPos].count;
143159 -                                               if (highTotal <= lowTotal)
143160 -                                                       break;
143161 -                                       }
143162 -                               }
143163 -                               /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
143164 -                               /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
143165 -                               while ((nBitsToDecrease <= HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
143166 -                                       nBitsToDecrease++;
143167 -                               totalCost -= 1 << (nBitsToDecrease - 1);
143168 -                               if (rankLast[nBitsToDecrease - 1] == noSymbol)
143169 -                                       rankLast[nBitsToDecrease - 1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */
143170 -                               huffNode[rankLast[nBitsToDecrease]].nbBits++;
143171 -                               if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */
143172 -                                       rankLast[nBitsToDecrease] = noSymbol;
143173 -                               else {
143174 -                                       rankLast[nBitsToDecrease]--;
143175 -                                       if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits - nBitsToDecrease)
143176 -                                               rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
143177 -                               }
143178 -                       } /* while (totalCost > 0) */
143180 -                       while (totalCost < 0) {                /* Sometimes, cost correction overshoot */
143181 -                               if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0
143182 -                                                                 (using maxNbBits) */
143183 -                                       while (huffNode[n].nbBits == maxNbBits)
143184 -                                               n--;
143185 -                                       huffNode[n + 1].nbBits--;
143186 -                                       rankLast[1] = n + 1;
143187 -                                       totalCost++;
143188 -                                       continue;
143189 -                               }
143190 -                               huffNode[rankLast[1] + 1].nbBits--;
143191 -                               rankLast[1]++;
143192 -                               totalCost++;
143193 -                       }
143194 -               }
143195 -       } /* there are several too large elements (at least >= 2) */
143197 -       return maxNbBits;
143200 -typedef struct {
143201 -       U32 base;
143202 -       U32 curr;
143203 -} rankPos;
143205 -static void HUF_sort(nodeElt *huffNode, const U32 *count, U32 maxSymbolValue)
143207 -       rankPos rank[32];
143208 -       U32 n;
143210 -       memset(rank, 0, sizeof(rank));
143211 -       for (n = 0; n <= maxSymbolValue; n++) {
143212 -               U32 r = BIT_highbit32(count[n] + 1);
143213 -               rank[r].base++;
143214 -       }
143215 -       for (n = 30; n > 0; n--)
143216 -               rank[n - 1].base += rank[n].base;
143217 -       for (n = 0; n < 32; n++)
143218 -               rank[n].curr = rank[n].base;
143219 -       for (n = 0; n <= maxSymbolValue; n++) {
143220 -               U32 const c = count[n];
143221 -               U32 const r = BIT_highbit32(c + 1) + 1;
143222 -               U32 pos = rank[r].curr++;
143223 -               while ((pos > rank[r].base) && (c > huffNode[pos - 1].count))
143224 -                       huffNode[pos] = huffNode[pos - 1], pos--;
143225 -               huffNode[pos].count = c;
143226 -               huffNode[pos].byte = (BYTE)n;
143227 -       }
143230 -/** HUF_buildCTable_wksp() :
143231 - *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
143232 - *  `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
143233 - */
143234 -#define STARTNODE (HUF_SYMBOLVALUE_MAX + 1)
143235 -typedef nodeElt huffNodeTable[2 * HUF_SYMBOLVALUE_MAX + 1 + 1];
143236 -size_t HUF_buildCTable_wksp(HUF_CElt *tree, const U32 *count, U32 maxSymbolValue, U32 maxNbBits, void *workSpace, size_t wkspSize)
143238 -       nodeElt *const huffNode0 = (nodeElt *)workSpace;
143239 -       nodeElt *const huffNode = huffNode0 + 1;
143240 -       U32 n, nonNullRank;
143241 -       int lowS, lowN;
143242 -       U16 nodeNb = STARTNODE;
143243 -       U32 nodeRoot;
143245 -       /* safety checks */
143246 -       if (wkspSize < sizeof(huffNodeTable))
143247 -               return ERROR(GENERIC); /* workSpace is not large enough */
143248 -       if (maxNbBits == 0)
143249 -               maxNbBits = HUF_TABLELOG_DEFAULT;
143250 -       if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
143251 -               return ERROR(GENERIC);
143252 -       memset(huffNode0, 0, sizeof(huffNodeTable));
143254 -       /* sort, decreasing order */
143255 -       HUF_sort(huffNode, count, maxSymbolValue);
143257 -       /* init for parents */
143258 -       nonNullRank = maxSymbolValue;
143259 -       while (huffNode[nonNullRank].count == 0)
143260 -               nonNullRank--;
143261 -       lowS = nonNullRank;
143262 -       nodeRoot = nodeNb + lowS - 1;
143263 -       lowN = nodeNb;
143264 -       huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS - 1].count;
143265 -       huffNode[lowS].parent = huffNode[lowS - 1].parent = nodeNb;
143266 -       nodeNb++;
143267 -       lowS -= 2;
143268 -       for (n = nodeNb; n <= nodeRoot; n++)
143269 -               huffNode[n].count = (U32)(1U << 30);
143270 -       huffNode0[0].count = (U32)(1U << 31); /* fake entry, strong barrier */
143272 -       /* create parents */
143273 -       while (nodeNb <= nodeRoot) {
143274 -               U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
143275 -               U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
143276 -               huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
143277 -               huffNode[n1].parent = huffNode[n2].parent = nodeNb;
143278 -               nodeNb++;
143279 -       }
143281 -       /* distribute weights (unlimited tree height) */
143282 -       huffNode[nodeRoot].nbBits = 0;
143283 -       for (n = nodeRoot - 1; n >= STARTNODE; n--)
143284 -               huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1;
143285 -       for (n = 0; n <= nonNullRank; n++)
143286 -               huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1;
143288 -       /* enforce maxTableLog */
143289 -       maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits);
143291 -       /* fill result into tree (val, nbBits) */
143292 -       {
143293 -               U16 nbPerRank[HUF_TABLELOG_MAX + 1] = {0};
143294 -               U16 valPerRank[HUF_TABLELOG_MAX + 1] = {0};
143295 -               if (maxNbBits > HUF_TABLELOG_MAX)
143296 -                       return ERROR(GENERIC); /* check fit into table */
143297 -               for (n = 0; n <= nonNullRank; n++)
143298 -                       nbPerRank[huffNode[n].nbBits]++;
143299 -               /* determine stating value per rank */
143300 -               {
143301 -                       U16 min = 0;
143302 -                       for (n = maxNbBits; n > 0; n--) {
143303 -                               valPerRank[n] = min; /* get starting value within each rank */
143304 -                               min += nbPerRank[n];
143305 -                               min >>= 1;
143306 -                       }
143307 -               }
143308 -               for (n = 0; n <= maxSymbolValue; n++)
143309 -                       tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */
143310 -               for (n = 0; n <= maxSymbolValue; n++)
143311 -                       tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */
143312 -       }
143314 -       return maxNbBits;
143317 -static size_t HUF_estimateCompressedSize(HUF_CElt *CTable, const unsigned *count, unsigned maxSymbolValue)
143319 -       size_t nbBits = 0;
143320 -       int s;
143321 -       for (s = 0; s <= (int)maxSymbolValue; ++s) {
143322 -               nbBits += CTable[s].nbBits * count[s];
143323 -       }
143324 -       return nbBits >> 3;
143327 -static int HUF_validateCTable(const HUF_CElt *CTable, const unsigned *count, unsigned maxSymbolValue)
143329 -       int bad = 0;
143330 -       int s;
143331 -       for (s = 0; s <= (int)maxSymbolValue; ++s) {
143332 -               bad |= (count[s] != 0) & (CTable[s].nbBits == 0);
143333 -       }
143334 -       return !bad;
143337 -static void HUF_encodeSymbol(BIT_CStream_t *bitCPtr, U32 symbol, const HUF_CElt *CTable)
143339 -       BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
143342 -size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
143344 -#define HUF_FLUSHBITS(s)  BIT_flushBits(s)
143346 -#define HUF_FLUSHBITS_1(stream)                                            \
143347 -       if (sizeof((stream)->bitContainer) * 8 < HUF_TABLELOG_MAX * 2 + 7) \
143348 -       HUF_FLUSHBITS(stream)
143350 -#define HUF_FLUSHBITS_2(stream)                                            \
143351 -       if (sizeof((stream)->bitContainer) * 8 < HUF_TABLELOG_MAX * 4 + 7) \
143352 -       HUF_FLUSHBITS(stream)
143354 -size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable)
143356 -       const BYTE *ip = (const BYTE *)src;
143357 -       BYTE *const ostart = (BYTE *)dst;
143358 -       BYTE *const oend = ostart + dstSize;
143359 -       BYTE *op = ostart;
143360 -       size_t n;
143361 -       BIT_CStream_t bitC;
143363 -       /* init */
143364 -       if (dstSize < 8)
143365 -               return 0; /* not enough space to compress */
143366 -       {
143367 -               size_t const initErr = BIT_initCStream(&bitC, op, oend - op);
143368 -               if (HUF_isError(initErr))
143369 -                       return 0;
143370 -       }
143372 -       n = srcSize & ~3; /* join to mod 4 */
143373 -       switch (srcSize & 3) {
143374 -       case 3: HUF_encodeSymbol(&bitC, ip[n + 2], CTable); HUF_FLUSHBITS_2(&bitC);
143375 -               fallthrough;
143376 -       case 2: HUF_encodeSymbol(&bitC, ip[n + 1], CTable); HUF_FLUSHBITS_1(&bitC);
143377 -               fallthrough;
143378 -       case 1: HUF_encodeSymbol(&bitC, ip[n + 0], CTable); HUF_FLUSHBITS(&bitC);
143379 -               fallthrough;
143380 -       case 0:
143381 -       default:;
143382 -       }
143384 -       for (; n > 0; n -= 4) { /* note : n&3==0 at this stage */
143385 -               HUF_encodeSymbol(&bitC, ip[n - 1], CTable);
143386 -               HUF_FLUSHBITS_1(&bitC);
143387 -               HUF_encodeSymbol(&bitC, ip[n - 2], CTable);
143388 -               HUF_FLUSHBITS_2(&bitC);
143389 -               HUF_encodeSymbol(&bitC, ip[n - 3], CTable);
143390 -               HUF_FLUSHBITS_1(&bitC);
143391 -               HUF_encodeSymbol(&bitC, ip[n - 4], CTable);
143392 -               HUF_FLUSHBITS(&bitC);
143393 -       }
143395 -       return BIT_closeCStream(&bitC);
143398 -size_t HUF_compress4X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable)
143400 -       size_t const segmentSize = (srcSize + 3) / 4; /* first 3 segments */
143401 -       const BYTE *ip = (const BYTE *)src;
143402 -       const BYTE *const iend = ip + srcSize;
143403 -       BYTE *const ostart = (BYTE *)dst;
143404 -       BYTE *const oend = ostart + dstSize;
143405 -       BYTE *op = ostart;
143407 -       if (dstSize < 6 + 1 + 1 + 1 + 8)
143408 -               return 0; /* minimum space to compress successfully */
143409 -       if (srcSize < 12)
143410 -               return 0; /* no saving possible : too small input */
143411 -       op += 6;          /* jumpTable */
143413 -       {
143414 -               CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable));
143415 -               if (cSize == 0)
143416 -                       return 0;
143417 -               ZSTD_writeLE16(ostart, (U16)cSize);
143418 -               op += cSize;
143419 -       }
143421 -       ip += segmentSize;
143422 -       {
143423 -               CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable));
143424 -               if (cSize == 0)
143425 -                       return 0;
143426 -               ZSTD_writeLE16(ostart + 2, (U16)cSize);
143427 -               op += cSize;
143428 -       }
143430 -       ip += segmentSize;
143431 -       {
143432 -               CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable));
143433 -               if (cSize == 0)
143434 -                       return 0;
143435 -               ZSTD_writeLE16(ostart + 4, (U16)cSize);
143436 -               op += cSize;
143437 -       }
143439 -       ip += segmentSize;
143440 -       {
143441 -               CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, iend - ip, CTable));
143442 -               if (cSize == 0)
143443 -                       return 0;
143444 -               op += cSize;
143445 -       }
143447 -       return op - ostart;
143450 -static size_t HUF_compressCTable_internal(BYTE *const ostart, BYTE *op, BYTE *const oend, const void *src, size_t srcSize, unsigned singleStream,
143451 -                                         const HUF_CElt *CTable)
143453 -       size_t const cSize =
143454 -           singleStream ? HUF_compress1X_usingCTable(op, oend - op, src, srcSize, CTable) : HUF_compress4X_usingCTable(op, oend - op, src, srcSize, CTable);
143455 -       if (HUF_isError(cSize)) {
143456 -               return cSize;
143457 -       }
143458 -       if (cSize == 0) {
143459 -               return 0;
143460 -       } /* uncompressible */
143461 -       op += cSize;
143462 -       /* check compressibility */
143463 -       if ((size_t)(op - ostart) >= srcSize - 1) {
143464 -               return 0;
143465 -       }
143466 -       return op - ostart;
143469 -/* `workSpace` must a table of at least 1024 unsigned */
143470 -static size_t HUF_compress_internal(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog,
143471 -                                   unsigned singleStream, void *workSpace, size_t wkspSize, HUF_CElt *oldHufTable, HUF_repeat *repeat, int preferRepeat)
143473 -       BYTE *const ostart = (BYTE *)dst;
143474 -       BYTE *const oend = ostart + dstSize;
143475 -       BYTE *op = ostart;
143477 -       U32 *count;
143478 -       size_t const countSize = sizeof(U32) * (HUF_SYMBOLVALUE_MAX + 1);
143479 -       HUF_CElt *CTable;
143480 -       size_t const CTableSize = sizeof(HUF_CElt) * (HUF_SYMBOLVALUE_MAX + 1);
143482 -       /* checks & inits */
143483 -       if (wkspSize < sizeof(huffNodeTable) + countSize + CTableSize)
143484 -               return ERROR(GENERIC);
143485 -       if (!srcSize)
143486 -               return 0; /* Uncompressed (note : 1 means rle, so first byte must be correct) */
143487 -       if (!dstSize)
143488 -               return 0; /* cannot fit within dst budget */
143489 -       if (srcSize > HUF_BLOCKSIZE_MAX)
143490 -               return ERROR(srcSize_wrong); /* curr block size limit */
143491 -       if (huffLog > HUF_TABLELOG_MAX)
143492 -               return ERROR(tableLog_tooLarge);
143493 -       if (!maxSymbolValue)
143494 -               maxSymbolValue = HUF_SYMBOLVALUE_MAX;
143495 -       if (!huffLog)
143496 -               huffLog = HUF_TABLELOG_DEFAULT;
143498 -       count = (U32 *)workSpace;
143499 -       workSpace = (BYTE *)workSpace + countSize;
143500 -       wkspSize -= countSize;
143501 -       CTable = (HUF_CElt *)workSpace;
143502 -       workSpace = (BYTE *)workSpace + CTableSize;
143503 -       wkspSize -= CTableSize;
143505 -       /* Heuristic : If we don't need to check the validity of the old table use the old table for small inputs */
143506 -       if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
143507 -               return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
143508 -       }
143510 -       /* Scan input and build symbol stats */
143511 -       {
143512 -               CHECK_V_F(largest, FSE_count_wksp(count, &maxSymbolValue, (const BYTE *)src, srcSize, (U32 *)workSpace));
143513 -               if (largest == srcSize) {
143514 -                       *ostart = ((const BYTE *)src)[0];
143515 -                       return 1;
143516 -               } /* single symbol, rle */
143517 -               if (largest <= (srcSize >> 7) + 1)
143518 -                       return 0; /* Fast heuristic : not compressible enough */
143519 -       }
143521 -       /* Check validity of previous table */
143522 -       if (repeat && *repeat == HUF_repeat_check && !HUF_validateCTable(oldHufTable, count, maxSymbolValue)) {
143523 -               *repeat = HUF_repeat_none;
143524 -       }
143525 -       /* Heuristic : use existing table for small inputs */
143526 -       if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
143527 -               return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
143528 -       }
143530 -       /* Build Huffman Tree */
143531 -       huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
143532 -       {
143533 -               CHECK_V_F(maxBits, HUF_buildCTable_wksp(CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize));
143534 -               huffLog = (U32)maxBits;
143535 -               /* Zero the unused symbols so we can check it for validity */
143536 -               memset(CTable + maxSymbolValue + 1, 0, CTableSize - (maxSymbolValue + 1) * sizeof(HUF_CElt));
143537 -       }
143539 -       /* Write table description header */
143540 -       {
143541 -               CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, CTable, maxSymbolValue, huffLog, workSpace, wkspSize));
143542 -               /* Check if using the previous table will be beneficial */
143543 -               if (repeat && *repeat != HUF_repeat_none) {
143544 -                       size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, count, maxSymbolValue);
143545 -                       size_t const newSize = HUF_estimateCompressedSize(CTable, count, maxSymbolValue);
143546 -                       if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
143547 -                               return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
143548 -                       }
143549 -               }
143550 -               /* Use the new table */
143551 -               if (hSize + 12ul >= srcSize) {
143552 -                       return 0;
143553 -               }
143554 -               op += hSize;
143555 -               if (repeat) {
143556 -                       *repeat = HUF_repeat_none;
143557 -               }
143558 -               if (oldHufTable) {
143559 -                       memcpy(oldHufTable, CTable, CTableSize);
143560 -               } /* Save the new table */
143561 -       }
143562 -       return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, CTable);
143565 -size_t HUF_compress1X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
143566 -                          size_t wkspSize)
143568 -       return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, NULL, NULL, 0);
143571 -size_t HUF_compress1X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
143572 -                            size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, int preferRepeat)
143574 -       return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, hufTable, repeat,
143575 -                                    preferRepeat);
143578 -size_t HUF_compress4X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
143579 -                          size_t wkspSize)
143581 -       return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, NULL, NULL, 0);
143584 -size_t HUF_compress4X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
143585 -                            size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, int preferRepeat)
143587 -       return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, hufTable, repeat,
143588 -                                    preferRepeat);
143590 diff --git a/lib/zstd/huf_decompress.c b/lib/zstd/huf_decompress.c
143591 deleted file mode 100644
143592 index 6526482047dc..000000000000
143593 --- a/lib/zstd/huf_decompress.c
143594 +++ /dev/null
143595 @@ -1,960 +0,0 @@
143597 - * Huffman decoder, part of New Generation Entropy library
143598 - * Copyright (C) 2013-2016, Yann Collet.
143600 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
143602 - * Redistribution and use in source and binary forms, with or without
143603 - * modification, are permitted provided that the following conditions are
143604 - * met:
143606 - *   * Redistributions of source code must retain the above copyright
143607 - * notice, this list of conditions and the following disclaimer.
143608 - *   * Redistributions in binary form must reproduce the above
143609 - * copyright notice, this list of conditions and the following disclaimer
143610 - * in the documentation and/or other materials provided with the
143611 - * distribution.
143613 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
143614 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
143615 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
143616 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
143617 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
143618 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
143619 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
143620 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
143621 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
143622 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
143623 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
143625 - * This program is free software; you can redistribute it and/or modify it under
143626 - * the terms of the GNU General Public License version 2 as published by the
143627 - * Free Software Foundation. This program is dual-licensed; you may select
143628 - * either version 2 of the GNU General Public License ("GPL") or BSD license
143629 - * ("BSD").
143631 - * You can contact the author at :
143632 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
143633 - */
143635 -/* **************************************************************
143636 -*  Compiler specifics
143637 -****************************************************************/
143638 -#define FORCE_INLINE static __always_inline
143640 -/* **************************************************************
143641 -*  Dependencies
143642 -****************************************************************/
143643 -#include "bitstream.h" /* BIT_* */
143644 -#include "fse.h"       /* header compression */
143645 -#include "huf.h"
143646 -#include <linux/compiler.h>
143647 -#include <linux/kernel.h>
143648 -#include <linux/string.h> /* memcpy, memset */
143650 -/* **************************************************************
143651 -*  Error Management
143652 -****************************************************************/
143653 -#define HUF_STATIC_ASSERT(c)                                   \
143654 -       {                                                      \
143655 -               enum { HUF_static_assert = 1 / (int)(!!(c)) }; \
143656 -       } /* use only *after* variable declarations */
143658 -/*-***************************/
143659 -/*  generic DTableDesc       */
143660 -/*-***************************/
143662 -typedef struct {
143663 -       BYTE maxTableLog;
143664 -       BYTE tableType;
143665 -       BYTE tableLog;
143666 -       BYTE reserved;
143667 -} DTableDesc;
143669 -static DTableDesc HUF_getDTableDesc(const HUF_DTable *table)
143671 -       DTableDesc dtd;
143672 -       memcpy(&dtd, table, sizeof(dtd));
143673 -       return dtd;
143676 -/*-***************************/
143677 -/*  single-symbol decoding   */
143678 -/*-***************************/
143680 -typedef struct {
143681 -       BYTE byte;
143682 -       BYTE nbBits;
143683 -} HUF_DEltX2; /* single-symbol decoding */
143685 -size_t HUF_readDTableX2_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
143687 -       U32 tableLog = 0;
143688 -       U32 nbSymbols = 0;
143689 -       size_t iSize;
143690 -       void *const dtPtr = DTable + 1;
143691 -       HUF_DEltX2 *const dt = (HUF_DEltX2 *)dtPtr;
143693 -       U32 *rankVal;
143694 -       BYTE *huffWeight;
143695 -       size_t spaceUsed32 = 0;
143697 -       rankVal = (U32 *)workspace + spaceUsed32;
143698 -       spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1;
143699 -       huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
143700 -       spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
143702 -       if ((spaceUsed32 << 2) > workspaceSize)
143703 -               return ERROR(tableLog_tooLarge);
143704 -       workspace = (U32 *)workspace + spaceUsed32;
143705 -       workspaceSize -= (spaceUsed32 << 2);
143707 -       HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
143708 -       /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
143710 -       iSize = HUF_readStats_wksp(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize);
143711 -       if (HUF_isError(iSize))
143712 -               return iSize;
143714 -       /* Table header */
143715 -       {
143716 -               DTableDesc dtd = HUF_getDTableDesc(DTable);
143717 -               if (tableLog > (U32)(dtd.maxTableLog + 1))
143718 -                       return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */
143719 -               dtd.tableType = 0;
143720 -               dtd.tableLog = (BYTE)tableLog;
143721 -               memcpy(DTable, &dtd, sizeof(dtd));
143722 -       }
143724 -       /* Calculate starting value for each rank */
143725 -       {
143726 -               U32 n, nextRankStart = 0;
143727 -               for (n = 1; n < tableLog + 1; n++) {
143728 -                       U32 const curr = nextRankStart;
143729 -                       nextRankStart += (rankVal[n] << (n - 1));
143730 -                       rankVal[n] = curr;
143731 -               }
143732 -       }
143734 -       /* fill DTable */
143735 -       {
143736 -               U32 n;
143737 -               for (n = 0; n < nbSymbols; n++) {
143738 -                       U32 const w = huffWeight[n];
143739 -                       U32 const length = (1 << w) >> 1;
143740 -                       U32 u;
143741 -                       HUF_DEltX2 D;
143742 -                       D.byte = (BYTE)n;
143743 -                       D.nbBits = (BYTE)(tableLog + 1 - w);
143744 -                       for (u = rankVal[w]; u < rankVal[w] + length; u++)
143745 -                               dt[u] = D;
143746 -                       rankVal[w] += length;
143747 -               }
143748 -       }
143750 -       return iSize;
143753 -static BYTE HUF_decodeSymbolX2(BIT_DStream_t *Dstream, const HUF_DEltX2 *dt, const U32 dtLog)
143755 -       size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
143756 -       BYTE const c = dt[val].byte;
143757 -       BIT_skipBits(Dstream, dt[val].nbBits);
143758 -       return c;
143761 -#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
143763 -#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr)         \
143764 -       if (ZSTD_64bits() || (HUF_TABLELOG_MAX <= 12)) \
143765 -       HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
143767 -#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
143768 -       if (ZSTD_64bits())                     \
143769 -       HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
143771 -FORCE_INLINE size_t HUF_decodeStreamX2(BYTE *p, BIT_DStream_t *const bitDPtr, BYTE *const pEnd, const HUF_DEltX2 *const dt, const U32 dtLog)
143773 -       BYTE *const pStart = p;
143775 -       /* up to 4 symbols at a time */
143776 -       while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd - 4)) {
143777 -               HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
143778 -               HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
143779 -               HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
143780 -               HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
143781 -       }
143783 -       /* closer to the end */
143784 -       while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
143785 -               HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
143787 -       /* no more data to retrieve from bitstream, hence no need to reload */
143788 -       while (p < pEnd)
143789 -               HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
143791 -       return pEnd - pStart;
143794 -static size_t HUF_decompress1X2_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
143796 -       BYTE *op = (BYTE *)dst;
143797 -       BYTE *const oend = op + dstSize;
143798 -       const void *dtPtr = DTable + 1;
143799 -       const HUF_DEltX2 *const dt = (const HUF_DEltX2 *)dtPtr;
143800 -       BIT_DStream_t bitD;
143801 -       DTableDesc const dtd = HUF_getDTableDesc(DTable);
143802 -       U32 const dtLog = dtd.tableLog;
143804 -       {
143805 -               size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
143806 -               if (HUF_isError(errorCode))
143807 -                       return errorCode;
143808 -       }
143810 -       HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog);
143812 -       /* check */
143813 -       if (!BIT_endOfDStream(&bitD))
143814 -               return ERROR(corruption_detected);
143816 -       return dstSize;
143819 -size_t HUF_decompress1X2_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
143821 -       DTableDesc dtd = HUF_getDTableDesc(DTable);
143822 -       if (dtd.tableType != 0)
143823 -               return ERROR(GENERIC);
143824 -       return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
143827 -size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable *DCtx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
143829 -       const BYTE *ip = (const BYTE *)cSrc;
143831 -       size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workspace, workspaceSize);
143832 -       if (HUF_isError(hSize))
143833 -               return hSize;
143834 -       if (hSize >= cSrcSize)
143835 -               return ERROR(srcSize_wrong);
143836 -       ip += hSize;
143837 -       cSrcSize -= hSize;
143839 -       return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx);
143842 -static size_t HUF_decompress4X2_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
143844 -       /* Check */
143845 -       if (cSrcSize < 10)
143846 -               return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
143848 -       {
143849 -               const BYTE *const istart = (const BYTE *)cSrc;
143850 -               BYTE *const ostart = (BYTE *)dst;
143851 -               BYTE *const oend = ostart + dstSize;
143852 -               const void *const dtPtr = DTable + 1;
143853 -               const HUF_DEltX2 *const dt = (const HUF_DEltX2 *)dtPtr;
143855 -               /* Init */
143856 -               BIT_DStream_t bitD1;
143857 -               BIT_DStream_t bitD2;
143858 -               BIT_DStream_t bitD3;
143859 -               BIT_DStream_t bitD4;
143860 -               size_t const length1 = ZSTD_readLE16(istart);
143861 -               size_t const length2 = ZSTD_readLE16(istart + 2);
143862 -               size_t const length3 = ZSTD_readLE16(istart + 4);
143863 -               size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
143864 -               const BYTE *const istart1 = istart + 6; /* jumpTable */
143865 -               const BYTE *const istart2 = istart1 + length1;
143866 -               const BYTE *const istart3 = istart2 + length2;
143867 -               const BYTE *const istart4 = istart3 + length3;
143868 -               const size_t segmentSize = (dstSize + 3) / 4;
143869 -               BYTE *const opStart2 = ostart + segmentSize;
143870 -               BYTE *const opStart3 = opStart2 + segmentSize;
143871 -               BYTE *const opStart4 = opStart3 + segmentSize;
143872 -               BYTE *op1 = ostart;
143873 -               BYTE *op2 = opStart2;
143874 -               BYTE *op3 = opStart3;
143875 -               BYTE *op4 = opStart4;
143876 -               U32 endSignal;
143877 -               DTableDesc const dtd = HUF_getDTableDesc(DTable);
143878 -               U32 const dtLog = dtd.tableLog;
143880 -               if (length4 > cSrcSize)
143881 -                       return ERROR(corruption_detected); /* overflow */
143882 -               {
143883 -                       size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1);
143884 -                       if (HUF_isError(errorCode))
143885 -                               return errorCode;
143886 -               }
143887 -               {
143888 -                       size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2);
143889 -                       if (HUF_isError(errorCode))
143890 -                               return errorCode;
143891 -               }
143892 -               {
143893 -                       size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3);
143894 -                       if (HUF_isError(errorCode))
143895 -                               return errorCode;
143896 -               }
143897 -               {
143898 -                       size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4);
143899 -                       if (HUF_isError(errorCode))
143900 -                               return errorCode;
143901 -               }
143903 -               /* 16-32 symbols per loop (4-8 symbols per stream) */
143904 -               endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
143905 -               for (; (endSignal == BIT_DStream_unfinished) && (op4 < (oend - 7));) {
143906 -                       HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
143907 -                       HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
143908 -                       HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
143909 -                       HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
143910 -                       HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
143911 -                       HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
143912 -                       HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
143913 -                       HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
143914 -                       HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
143915 -                       HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
143916 -                       HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
143917 -                       HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
143918 -                       HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
143919 -                       HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
143920 -                       HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
143921 -                       HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
143922 -                       endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
143923 -               }
143925 -               /* check corruption */
143926 -               if (op1 > opStart2)
143927 -                       return ERROR(corruption_detected);
143928 -               if (op2 > opStart3)
143929 -                       return ERROR(corruption_detected);
143930 -               if (op3 > opStart4)
143931 -                       return ERROR(corruption_detected);
143932 -               /* note : op4 supposed already verified within main loop */
143934 -               /* finish bitStreams one by one */
143935 -               HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
143936 -               HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
143937 -               HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
143938 -               HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
143940 -               /* check */
143941 -               endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
143942 -               if (!endSignal)
143943 -                       return ERROR(corruption_detected);
143945 -               /* decoded size */
143946 -               return dstSize;
143947 -       }
143950 -size_t HUF_decompress4X2_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
143952 -       DTableDesc dtd = HUF_getDTableDesc(DTable);
143953 -       if (dtd.tableType != 0)
143954 -               return ERROR(GENERIC);
143955 -       return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
143958 -size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
143960 -       const BYTE *ip = (const BYTE *)cSrc;
143962 -       size_t const hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, workspace, workspaceSize);
143963 -       if (HUF_isError(hSize))
143964 -               return hSize;
143965 -       if (hSize >= cSrcSize)
143966 -               return ERROR(srcSize_wrong);
143967 -       ip += hSize;
143968 -       cSrcSize -= hSize;
143970 -       return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);
143973 -/* *************************/
143974 -/* double-symbols decoding */
143975 -/* *************************/
143976 -typedef struct {
143977 -       U16 sequence;
143978 -       BYTE nbBits;
143979 -       BYTE length;
143980 -} HUF_DEltX4; /* double-symbols decoding */
143982 -typedef struct {
143983 -       BYTE symbol;
143984 -       BYTE weight;
143985 -} sortedSymbol_t;
143987 -/* HUF_fillDTableX4Level2() :
143988 - * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
143989 -static void HUF_fillDTableX4Level2(HUF_DEltX4 *DTable, U32 sizeLog, const U32 consumed, const U32 *rankValOrigin, const int minWeight,
143990 -                                  const sortedSymbol_t *sortedSymbols, const U32 sortedListSize, U32 nbBitsBaseline, U16 baseSeq)
143992 -       HUF_DEltX4 DElt;
143993 -       U32 rankVal[HUF_TABLELOG_MAX + 1];
143995 -       /* get pre-calculated rankVal */
143996 -       memcpy(rankVal, rankValOrigin, sizeof(rankVal));
143998 -       /* fill skipped values */
143999 -       if (minWeight > 1) {
144000 -               U32 i, skipSize = rankVal[minWeight];
144001 -               ZSTD_writeLE16(&(DElt.sequence), baseSeq);
144002 -               DElt.nbBits = (BYTE)(consumed);
144003 -               DElt.length = 1;
144004 -               for (i = 0; i < skipSize; i++)
144005 -                       DTable[i] = DElt;
144006 -       }
144008 -       /* fill DTable */
144009 -       {
144010 -               U32 s;
144011 -               for (s = 0; s < sortedListSize; s++) { /* note : sortedSymbols already skipped */
144012 -                       const U32 symbol = sortedSymbols[s].symbol;
144013 -                       const U32 weight = sortedSymbols[s].weight;
144014 -                       const U32 nbBits = nbBitsBaseline - weight;
144015 -                       const U32 length = 1 << (sizeLog - nbBits);
144016 -                       const U32 start = rankVal[weight];
144017 -                       U32 i = start;
144018 -                       const U32 end = start + length;
144020 -                       ZSTD_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
144021 -                       DElt.nbBits = (BYTE)(nbBits + consumed);
144022 -                       DElt.length = 2;
144023 -                       do {
144024 -                               DTable[i++] = DElt;
144025 -                       } while (i < end); /* since length >= 1 */
144027 -                       rankVal[weight] += length;
144028 -               }
144029 -       }
144032 -typedef U32 rankVal_t[HUF_TABLELOG_MAX][HUF_TABLELOG_MAX + 1];
144033 -typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
144035 -static void HUF_fillDTableX4(HUF_DEltX4 *DTable, const U32 targetLog, const sortedSymbol_t *sortedList, const U32 sortedListSize, const U32 *rankStart,
144036 -                            rankVal_t rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline)
144038 -       U32 rankVal[HUF_TABLELOG_MAX + 1];
144039 -       const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
144040 -       const U32 minBits = nbBitsBaseline - maxWeight;
144041 -       U32 s;
144043 -       memcpy(rankVal, rankValOrigin, sizeof(rankVal));
144045 -       /* fill DTable */
144046 -       for (s = 0; s < sortedListSize; s++) {
144047 -               const U16 symbol = sortedList[s].symbol;
144048 -               const U32 weight = sortedList[s].weight;
144049 -               const U32 nbBits = nbBitsBaseline - weight;
144050 -               const U32 start = rankVal[weight];
144051 -               const U32 length = 1 << (targetLog - nbBits);
144053 -               if (targetLog - nbBits >= minBits) { /* enough room for a second symbol */
144054 -                       U32 sortedRank;
144055 -                       int minWeight = nbBits + scaleLog;
144056 -                       if (minWeight < 1)
144057 -                               minWeight = 1;
144058 -                       sortedRank = rankStart[minWeight];
144059 -                       HUF_fillDTableX4Level2(DTable + start, targetLog - nbBits, nbBits, rankValOrigin[nbBits], minWeight, sortedList + sortedRank,
144060 -                                              sortedListSize - sortedRank, nbBitsBaseline, symbol);
144061 -               } else {
144062 -                       HUF_DEltX4 DElt;
144063 -                       ZSTD_writeLE16(&(DElt.sequence), symbol);
144064 -                       DElt.nbBits = (BYTE)(nbBits);
144065 -                       DElt.length = 1;
144066 -                       {
144067 -                               U32 const end = start + length;
144068 -                               U32 u;
144069 -                               for (u = start; u < end; u++)
144070 -                                       DTable[u] = DElt;
144071 -                       }
144072 -               }
144073 -               rankVal[weight] += length;
144074 -       }
144077 -size_t HUF_readDTableX4_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
144079 -       U32 tableLog, maxW, sizeOfSort, nbSymbols;
144080 -       DTableDesc dtd = HUF_getDTableDesc(DTable);
144081 -       U32 const maxTableLog = dtd.maxTableLog;
144082 -       size_t iSize;
144083 -       void *dtPtr = DTable + 1; /* force compiler to avoid strict-aliasing */
144084 -       HUF_DEltX4 *const dt = (HUF_DEltX4 *)dtPtr;
144085 -       U32 *rankStart;
144087 -       rankValCol_t *rankVal;
144088 -       U32 *rankStats;
144089 -       U32 *rankStart0;
144090 -       sortedSymbol_t *sortedSymbol;
144091 -       BYTE *weightList;
144092 -       size_t spaceUsed32 = 0;
144094 -       HUF_STATIC_ASSERT((sizeof(rankValCol_t) & 3) == 0);
144096 -       rankVal = (rankValCol_t *)((U32 *)workspace + spaceUsed32);
144097 -       spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2;
144098 -       rankStats = (U32 *)workspace + spaceUsed32;
144099 -       spaceUsed32 += HUF_TABLELOG_MAX + 1;
144100 -       rankStart0 = (U32 *)workspace + spaceUsed32;
144101 -       spaceUsed32 += HUF_TABLELOG_MAX + 2;
144102 -       sortedSymbol = (sortedSymbol_t *)((U32 *)workspace + spaceUsed32);
144103 -       spaceUsed32 += ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2;
144104 -       weightList = (BYTE *)((U32 *)workspace + spaceUsed32);
144105 -       spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
144107 -       if ((spaceUsed32 << 2) > workspaceSize)
144108 -               return ERROR(tableLog_tooLarge);
144109 -       workspace = (U32 *)workspace + spaceUsed32;
144110 -       workspaceSize -= (spaceUsed32 << 2);
144112 -       rankStart = rankStart0 + 1;
144113 -       memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1));
144115 -       HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */
144116 -       if (maxTableLog > HUF_TABLELOG_MAX)
144117 -               return ERROR(tableLog_tooLarge);
144118 -       /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
144120 -       iSize = HUF_readStats_wksp(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize);
144121 -       if (HUF_isError(iSize))
144122 -               return iSize;
144124 -       /* check result */
144125 -       if (tableLog > maxTableLog)
144126 -               return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
144128 -       /* find maxWeight */
144129 -       for (maxW = tableLog; rankStats[maxW] == 0; maxW--) {
144130 -       } /* necessarily finds a solution before 0 */
144132 -       /* Get start index of each weight */
144133 -       {
144134 -               U32 w, nextRankStart = 0;
144135 -               for (w = 1; w < maxW + 1; w++) {
144136 -                       U32 curr = nextRankStart;
144137 -                       nextRankStart += rankStats[w];
144138 -                       rankStart[w] = curr;
144139 -               }
144140 -               rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
144141 -               sizeOfSort = nextRankStart;
144142 -       }
144144 -       /* sort symbols by weight */
144145 -       {
144146 -               U32 s;
144147 -               for (s = 0; s < nbSymbols; s++) {
144148 -                       U32 const w = weightList[s];
144149 -                       U32 const r = rankStart[w]++;
144150 -                       sortedSymbol[r].symbol = (BYTE)s;
144151 -                       sortedSymbol[r].weight = (BYTE)w;
144152 -               }
144153 -               rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
144154 -       }
144156 -       /* Build rankVal */
144157 -       {
144158 -               U32 *const rankVal0 = rankVal[0];
144159 -               {
144160 -                       int const rescale = (maxTableLog - tableLog) - 1; /* tableLog <= maxTableLog */
144161 -                       U32 nextRankVal = 0;
144162 -                       U32 w;
144163 -                       for (w = 1; w < maxW + 1; w++) {
144164 -                               U32 curr = nextRankVal;
144165 -                               nextRankVal += rankStats[w] << (w + rescale);
144166 -                               rankVal0[w] = curr;
144167 -                       }
144168 -               }
144169 -               {
144170 -                       U32 const minBits = tableLog + 1 - maxW;
144171 -                       U32 consumed;
144172 -                       for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
144173 -                               U32 *const rankValPtr = rankVal[consumed];
144174 -                               U32 w;
144175 -                               for (w = 1; w < maxW + 1; w++) {
144176 -                                       rankValPtr[w] = rankVal0[w] >> consumed;
144177 -                               }
144178 -                       }
144179 -               }
144180 -       }
144182 -       HUF_fillDTableX4(dt, maxTableLog, sortedSymbol, sizeOfSort, rankStart0, rankVal, maxW, tableLog + 1);
144184 -       dtd.tableLog = (BYTE)maxTableLog;
144185 -       dtd.tableType = 1;
144186 -       memcpy(DTable, &dtd, sizeof(dtd));
144187 -       return iSize;
144190 -static U32 HUF_decodeSymbolX4(void *op, BIT_DStream_t *DStream, const HUF_DEltX4 *dt, const U32 dtLog)
144192 -       size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
144193 -       memcpy(op, dt + val, 2);
144194 -       BIT_skipBits(DStream, dt[val].nbBits);
144195 -       return dt[val].length;
144198 -static U32 HUF_decodeLastSymbolX4(void *op, BIT_DStream_t *DStream, const HUF_DEltX4 *dt, const U32 dtLog)
144200 -       size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
144201 -       memcpy(op, dt + val, 1);
144202 -       if (dt[val].length == 1)
144203 -               BIT_skipBits(DStream, dt[val].nbBits);
144204 -       else {
144205 -               if (DStream->bitsConsumed < (sizeof(DStream->bitContainer) * 8)) {
144206 -                       BIT_skipBits(DStream, dt[val].nbBits);
144207 -                       if (DStream->bitsConsumed > (sizeof(DStream->bitContainer) * 8))
144208 -                               /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
144209 -                               DStream->bitsConsumed = (sizeof(DStream->bitContainer) * 8);
144210 -               }
144211 -       }
144212 -       return 1;
144215 -#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
144217 -#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr)         \
144218 -       if (ZSTD_64bits() || (HUF_TABLELOG_MAX <= 12)) \
144219 -       ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
144221 -#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
144222 -       if (ZSTD_64bits())                     \
144223 -       ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
144225 -FORCE_INLINE size_t HUF_decodeStreamX4(BYTE *p, BIT_DStream_t *bitDPtr, BYTE *const pEnd, const HUF_DEltX4 *const dt, const U32 dtLog)
144227 -       BYTE *const pStart = p;
144229 -       /* up to 8 symbols at a time */
144230 -       while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd - (sizeof(bitDPtr->bitContainer) - 1))) {
144231 -               HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
144232 -               HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
144233 -               HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
144234 -               HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
144235 -       }
144237 -       /* closer to end : up to 2 symbols at a time */
144238 -       while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd - 2))
144239 -               HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
144241 -       while (p <= pEnd - 2)
144242 -               HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
144244 -       if (p < pEnd)
144245 -               p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
144247 -       return p - pStart;
144250 -static size_t HUF_decompress1X4_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
144252 -       BIT_DStream_t bitD;
144254 -       /* Init */
144255 -       {
144256 -               size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
144257 -               if (HUF_isError(errorCode))
144258 -                       return errorCode;
144259 -       }
144261 -       /* decode */
144262 -       {
144263 -               BYTE *const ostart = (BYTE *)dst;
144264 -               BYTE *const oend = ostart + dstSize;
144265 -               const void *const dtPtr = DTable + 1; /* force compiler to not use strict-aliasing */
144266 -               const HUF_DEltX4 *const dt = (const HUF_DEltX4 *)dtPtr;
144267 -               DTableDesc const dtd = HUF_getDTableDesc(DTable);
144268 -               HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog);
144269 -       }
144271 -       /* check */
144272 -       if (!BIT_endOfDStream(&bitD))
144273 -               return ERROR(corruption_detected);
144275 -       /* decoded size */
144276 -       return dstSize;
144279 -size_t HUF_decompress1X4_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
144281 -       DTableDesc dtd = HUF_getDTableDesc(DTable);
144282 -       if (dtd.tableType != 1)
144283 -               return ERROR(GENERIC);
144284 -       return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
144287 -size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable *DCtx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
144289 -       const BYTE *ip = (const BYTE *)cSrc;
144291 -       size_t const hSize = HUF_readDTableX4_wksp(DCtx, cSrc, cSrcSize, workspace, workspaceSize);
144292 -       if (HUF_isError(hSize))
144293 -               return hSize;
144294 -       if (hSize >= cSrcSize)
144295 -               return ERROR(srcSize_wrong);
144296 -       ip += hSize;
144297 -       cSrcSize -= hSize;
144299 -       return HUF_decompress1X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx);
144302 -static size_t HUF_decompress4X4_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
144304 -       if (cSrcSize < 10)
144305 -               return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
144307 -       {
144308 -               const BYTE *const istart = (const BYTE *)cSrc;
144309 -               BYTE *const ostart = (BYTE *)dst;
144310 -               BYTE *const oend = ostart + dstSize;
144311 -               const void *const dtPtr = DTable + 1;
144312 -               const HUF_DEltX4 *const dt = (const HUF_DEltX4 *)dtPtr;
144314 -               /* Init */
144315 -               BIT_DStream_t bitD1;
144316 -               BIT_DStream_t bitD2;
144317 -               BIT_DStream_t bitD3;
144318 -               BIT_DStream_t bitD4;
144319 -               size_t const length1 = ZSTD_readLE16(istart);
144320 -               size_t const length2 = ZSTD_readLE16(istart + 2);
144321 -               size_t const length3 = ZSTD_readLE16(istart + 4);
144322 -               size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
144323 -               const BYTE *const istart1 = istart + 6; /* jumpTable */
144324 -               const BYTE *const istart2 = istart1 + length1;
144325 -               const BYTE *const istart3 = istart2 + length2;
144326 -               const BYTE *const istart4 = istart3 + length3;
144327 -               size_t const segmentSize = (dstSize + 3) / 4;
144328 -               BYTE *const opStart2 = ostart + segmentSize;
144329 -               BYTE *const opStart3 = opStart2 + segmentSize;
144330 -               BYTE *const opStart4 = opStart3 + segmentSize;
144331 -               BYTE *op1 = ostart;
144332 -               BYTE *op2 = opStart2;
144333 -               BYTE *op3 = opStart3;
144334 -               BYTE *op4 = opStart4;
144335 -               U32 endSignal;
144336 -               DTableDesc const dtd = HUF_getDTableDesc(DTable);
144337 -               U32 const dtLog = dtd.tableLog;
144339 -               if (length4 > cSrcSize)
144340 -                       return ERROR(corruption_detected); /* overflow */
144341 -               {
144342 -                       size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1);
144343 -                       if (HUF_isError(errorCode))
144344 -                               return errorCode;
144345 -               }
144346 -               {
144347 -                       size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2);
144348 -                       if (HUF_isError(errorCode))
144349 -                               return errorCode;
144350 -               }
144351 -               {
144352 -                       size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3);
144353 -                       if (HUF_isError(errorCode))
144354 -                               return errorCode;
144355 -               }
144356 -               {
144357 -                       size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4);
144358 -                       if (HUF_isError(errorCode))
144359 -                               return errorCode;
144360 -               }
144362 -               /* 16-32 symbols per loop (4-8 symbols per stream) */
144363 -               endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
144364 -               for (; (endSignal == BIT_DStream_unfinished) & (op4 < (oend - (sizeof(bitD4.bitContainer) - 1)));) {
144365 -                       HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
144366 -                       HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
144367 -                       HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
144368 -                       HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
144369 -                       HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
144370 -                       HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
144371 -                       HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
144372 -                       HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
144373 -                       HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
144374 -                       HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
144375 -                       HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
144376 -                       HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
144377 -                       HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
144378 -                       HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
144379 -                       HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
144380 -                       HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
144382 -                       endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
144383 -               }
144385 -               /* check corruption */
144386 -               if (op1 > opStart2)
144387 -                       return ERROR(corruption_detected);
144388 -               if (op2 > opStart3)
144389 -                       return ERROR(corruption_detected);
144390 -               if (op3 > opStart4)
144391 -                       return ERROR(corruption_detected);
144392 -               /* note : op4 already verified within main loop */
144394 -               /* finish bitStreams one by one */
144395 -               HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
144396 -               HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
144397 -               HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
144398 -               HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
144400 -               /* check */
144401 -               {
144402 -                       U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
144403 -                       if (!endCheck)
144404 -                               return ERROR(corruption_detected);
144405 -               }
144407 -               /* decoded size */
144408 -               return dstSize;
144409 -       }
144412 -size_t HUF_decompress4X4_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
144414 -       DTableDesc dtd = HUF_getDTableDesc(DTable);
144415 -       if (dtd.tableType != 1)
144416 -               return ERROR(GENERIC);
144417 -       return HUF_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
144420 -size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
144422 -       const BYTE *ip = (const BYTE *)cSrc;
144424 -       size_t hSize = HUF_readDTableX4_wksp(dctx, cSrc, cSrcSize, workspace, workspaceSize);
144425 -       if (HUF_isError(hSize))
144426 -               return hSize;
144427 -       if (hSize >= cSrcSize)
144428 -               return ERROR(srcSize_wrong);
144429 -       ip += hSize;
144430 -       cSrcSize -= hSize;
144432 -       return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);
144435 -/* ********************************/
144436 -/* Generic decompression selector */
144437 -/* ********************************/
144439 -size_t HUF_decompress1X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
144441 -       DTableDesc const dtd = HUF_getDTableDesc(DTable);
144442 -       return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable)
144443 -                            : HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
144446 -size_t HUF_decompress4X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
144448 -       DTableDesc const dtd = HUF_getDTableDesc(DTable);
144449 -       return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable)
144450 -                            : HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
144453 -typedef struct {
144454 -       U32 tableTime;
144455 -       U32 decode256Time;
144456 -} algo_time_t;
144457 -static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = {
144458 -    /* single, double, quad */
144459 -    {{0, 0}, {1, 1}, {2, 2}},               /* Q==0 : impossible */
144460 -    {{0, 0}, {1, 1}, {2, 2}},               /* Q==1 : impossible */
144461 -    {{38, 130}, {1313, 74}, {2151, 38}},     /* Q == 2 : 12-18% */
144462 -    {{448, 128}, {1353, 74}, {2238, 41}},    /* Q == 3 : 18-25% */
144463 -    {{556, 128}, {1353, 74}, {2238, 47}},    /* Q == 4 : 25-32% */
144464 -    {{714, 128}, {1418, 74}, {2436, 53}},    /* Q == 5 : 32-38% */
144465 -    {{883, 128}, {1437, 74}, {2464, 61}},    /* Q == 6 : 38-44% */
144466 -    {{897, 128}, {1515, 75}, {2622, 68}},    /* Q == 7 : 44-50% */
144467 -    {{926, 128}, {1613, 75}, {2730, 75}},    /* Q == 8 : 50-56% */
144468 -    {{947, 128}, {1729, 77}, {3359, 77}},    /* Q == 9 : 56-62% */
144469 -    {{1107, 128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */
144470 -    {{1177, 128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */
144471 -    {{1242, 128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */
144472 -    {{1349, 128}, {2644, 106}, {5260, 106}}, /* Q ==13 : 81-87% */
144473 -    {{1455, 128}, {2422, 124}, {4174, 124}}, /* Q ==14 : 87-93% */
144474 -    {{722, 128}, {1891, 145}, {1936, 146}},  /* Q ==15 : 93-99% */
144477 -/** HUF_selectDecoder() :
144478 -*   Tells which decoder is likely to decode faster,
144479 -*   based on a set of pre-determined metrics.
144480 -*   @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
144481 -*   Assumption : 0 < cSrcSize < dstSize <= 128 KB */
144482 -U32 HUF_selectDecoder(size_t dstSize, size_t cSrcSize)
144484 -       /* decoder timing evaluation */
144485 -       U32 const Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */
144486 -       U32 const D256 = (U32)(dstSize >> 8);
144487 -       U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
144488 -       U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
144489 -       DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, for cache eviction */
144491 -       return DTime1 < DTime0;
144494 -typedef size_t (*decompressionAlgo)(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize);
144496 -size_t HUF_decompress4X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
144498 -       /* validation checks */
144499 -       if (dstSize == 0)
144500 -               return ERROR(dstSize_tooSmall);
144501 -       if (cSrcSize > dstSize)
144502 -               return ERROR(corruption_detected); /* invalid */
144503 -       if (cSrcSize == dstSize) {
144504 -               memcpy(dst, cSrc, dstSize);
144505 -               return dstSize;
144506 -       } /* not compressed */
144507 -       if (cSrcSize == 1) {
144508 -               memset(dst, *(const BYTE *)cSrc, dstSize);
144509 -               return dstSize;
144510 -       } /* RLE */
144512 -       {
144513 -               U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
144514 -               return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize)
144515 -                             : HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize);
144516 -       }
144519 -size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
144521 -       /* validation checks */
144522 -       if (dstSize == 0)
144523 -               return ERROR(dstSize_tooSmall);
144524 -       if ((cSrcSize >= dstSize) || (cSrcSize <= 1))
144525 -               return ERROR(corruption_detected); /* invalid */
144527 -       {
144528 -               U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
144529 -               return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize)
144530 -                             : HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize);
144531 -       }
144534 -size_t HUF_decompress1X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
144536 -       /* validation checks */
144537 -       if (dstSize == 0)
144538 -               return ERROR(dstSize_tooSmall);
144539 -       if (cSrcSize > dstSize)
144540 -               return ERROR(corruption_detected); /* invalid */
144541 -       if (cSrcSize == dstSize) {
144542 -               memcpy(dst, cSrc, dstSize);
144543 -               return dstSize;
144544 -       } /* not compressed */
144545 -       if (cSrcSize == 1) {
144546 -               memset(dst, *(const BYTE *)cSrc, dstSize);
144547 -               return dstSize;
144548 -       } /* RLE */
144550 -       {
144551 -               U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
144552 -               return algoNb ? HUF_decompress1X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize)
144553 -                             : HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize);
144554 -       }
144556 diff --git a/lib/zstd/mem.h b/lib/zstd/mem.h
144557 deleted file mode 100644
144558 index 93d7a2c377fe..000000000000
144559 --- a/lib/zstd/mem.h
144560 +++ /dev/null
144561 @@ -1,151 +0,0 @@
144563 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
144564 - * All rights reserved.
144566 - * This source code is licensed under the BSD-style license found in the
144567 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
144568 - * An additional grant of patent rights can be found in the PATENTS file in the
144569 - * same directory.
144571 - * This program is free software; you can redistribute it and/or modify it under
144572 - * the terms of the GNU General Public License version 2 as published by the
144573 - * Free Software Foundation. This program is dual-licensed; you may select
144574 - * either version 2 of the GNU General Public License ("GPL") or BSD license
144575 - * ("BSD").
144576 - */
144578 -#ifndef MEM_H_MODULE
144579 -#define MEM_H_MODULE
144581 -/*-****************************************
144582 -*  Dependencies
144583 -******************************************/
144584 -#include <asm/unaligned.h>
144585 -#include <linux/string.h> /* memcpy */
144586 -#include <linux/types.h>  /* size_t, ptrdiff_t */
144588 -/*-****************************************
144589 -*  Compiler specifics
144590 -******************************************/
144591 -#define ZSTD_STATIC static inline
144593 -/*-**************************************************************
144594 -*  Basic Types
144595 -*****************************************************************/
144596 -typedef uint8_t BYTE;
144597 -typedef uint16_t U16;
144598 -typedef int16_t S16;
144599 -typedef uint32_t U32;
144600 -typedef int32_t S32;
144601 -typedef uint64_t U64;
144602 -typedef int64_t S64;
144603 -typedef ptrdiff_t iPtrDiff;
144604 -typedef uintptr_t uPtrDiff;
144606 -/*-**************************************************************
144607 -*  Memory I/O
144608 -*****************************************************************/
144609 -ZSTD_STATIC unsigned ZSTD_32bits(void) { return sizeof(size_t) == 4; }
144610 -ZSTD_STATIC unsigned ZSTD_64bits(void) { return sizeof(size_t) == 8; }
144612 -#if defined(__LITTLE_ENDIAN)
144613 -#define ZSTD_LITTLE_ENDIAN 1
144614 -#else
144615 -#define ZSTD_LITTLE_ENDIAN 0
144616 -#endif
144618 -ZSTD_STATIC unsigned ZSTD_isLittleEndian(void) { return ZSTD_LITTLE_ENDIAN; }
144620 -ZSTD_STATIC U16 ZSTD_read16(const void *memPtr) { return get_unaligned((const U16 *)memPtr); }
144622 -ZSTD_STATIC U32 ZSTD_read32(const void *memPtr) { return get_unaligned((const U32 *)memPtr); }
144624 -ZSTD_STATIC U64 ZSTD_read64(const void *memPtr) { return get_unaligned((const U64 *)memPtr); }
144626 -ZSTD_STATIC size_t ZSTD_readST(const void *memPtr) { return get_unaligned((const size_t *)memPtr); }
144628 -ZSTD_STATIC void ZSTD_write16(void *memPtr, U16 value) { put_unaligned(value, (U16 *)memPtr); }
144630 -ZSTD_STATIC void ZSTD_write32(void *memPtr, U32 value) { put_unaligned(value, (U32 *)memPtr); }
144632 -ZSTD_STATIC void ZSTD_write64(void *memPtr, U64 value) { put_unaligned(value, (U64 *)memPtr); }
144634 -/*=== Little endian r/w ===*/
144636 -ZSTD_STATIC U16 ZSTD_readLE16(const void *memPtr) { return get_unaligned_le16(memPtr); }
144638 -ZSTD_STATIC void ZSTD_writeLE16(void *memPtr, U16 val) { put_unaligned_le16(val, memPtr); }
144640 -ZSTD_STATIC U32 ZSTD_readLE24(const void *memPtr) { return ZSTD_readLE16(memPtr) + (((const BYTE *)memPtr)[2] << 16); }
144642 -ZSTD_STATIC void ZSTD_writeLE24(void *memPtr, U32 val)
144644 -       ZSTD_writeLE16(memPtr, (U16)val);
144645 -       ((BYTE *)memPtr)[2] = (BYTE)(val >> 16);
144648 -ZSTD_STATIC U32 ZSTD_readLE32(const void *memPtr) { return get_unaligned_le32(memPtr); }
144650 -ZSTD_STATIC void ZSTD_writeLE32(void *memPtr, U32 val32) { put_unaligned_le32(val32, memPtr); }
144652 -ZSTD_STATIC U64 ZSTD_readLE64(const void *memPtr) { return get_unaligned_le64(memPtr); }
144654 -ZSTD_STATIC void ZSTD_writeLE64(void *memPtr, U64 val64) { put_unaligned_le64(val64, memPtr); }
144656 -ZSTD_STATIC size_t ZSTD_readLEST(const void *memPtr)
144658 -       if (ZSTD_32bits())
144659 -               return (size_t)ZSTD_readLE32(memPtr);
144660 -       else
144661 -               return (size_t)ZSTD_readLE64(memPtr);
144664 -ZSTD_STATIC void ZSTD_writeLEST(void *memPtr, size_t val)
144666 -       if (ZSTD_32bits())
144667 -               ZSTD_writeLE32(memPtr, (U32)val);
144668 -       else
144669 -               ZSTD_writeLE64(memPtr, (U64)val);
144672 -/*=== Big endian r/w ===*/
144674 -ZSTD_STATIC U32 ZSTD_readBE32(const void *memPtr) { return get_unaligned_be32(memPtr); }
144676 -ZSTD_STATIC void ZSTD_writeBE32(void *memPtr, U32 val32) { put_unaligned_be32(val32, memPtr); }
144678 -ZSTD_STATIC U64 ZSTD_readBE64(const void *memPtr) { return get_unaligned_be64(memPtr); }
144680 -ZSTD_STATIC void ZSTD_writeBE64(void *memPtr, U64 val64) { put_unaligned_be64(val64, memPtr); }
144682 -ZSTD_STATIC size_t ZSTD_readBEST(const void *memPtr)
144684 -       if (ZSTD_32bits())
144685 -               return (size_t)ZSTD_readBE32(memPtr);
144686 -       else
144687 -               return (size_t)ZSTD_readBE64(memPtr);
144690 -ZSTD_STATIC void ZSTD_writeBEST(void *memPtr, size_t val)
144692 -       if (ZSTD_32bits())
144693 -               ZSTD_writeBE32(memPtr, (U32)val);
144694 -       else
144695 -               ZSTD_writeBE64(memPtr, (U64)val);
144698 -/* function safe only for comparisons */
144699 -ZSTD_STATIC U32 ZSTD_readMINMATCH(const void *memPtr, U32 length)
144701 -       switch (length) {
144702 -       default:
144703 -       case 4: return ZSTD_read32(memPtr);
144704 -       case 3:
144705 -               if (ZSTD_isLittleEndian())
144706 -                       return ZSTD_read32(memPtr) << 8;
144707 -               else
144708 -                       return ZSTD_read32(memPtr) >> 8;
144709 -       }
144712 -#endif /* MEM_H_MODULE */
144713 diff --git a/lib/zstd/zstd_common.c b/lib/zstd/zstd_common.c
144714 deleted file mode 100644
144715 index a282624ee155..000000000000
144716 --- a/lib/zstd/zstd_common.c
144717 +++ /dev/null
144718 @@ -1,75 +0,0 @@
144720 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
144721 - * All rights reserved.
144723 - * This source code is licensed under the BSD-style license found in the
144724 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
144725 - * An additional grant of patent rights can be found in the PATENTS file in the
144726 - * same directory.
144728 - * This program is free software; you can redistribute it and/or modify it under
144729 - * the terms of the GNU General Public License version 2 as published by the
144730 - * Free Software Foundation. This program is dual-licensed; you may select
144731 - * either version 2 of the GNU General Public License ("GPL") or BSD license
144732 - * ("BSD").
144733 - */
144735 -/*-*************************************
144736 -*  Dependencies
144737 -***************************************/
144738 -#include "error_private.h"
144739 -#include "zstd_internal.h" /* declaration of ZSTD_isError, ZSTD_getErrorName, ZSTD_getErrorCode, ZSTD_getErrorString, ZSTD_versionNumber */
144740 -#include <linux/kernel.h>
144742 -/*=**************************************************************
144743 -*  Custom allocator
144744 -****************************************************************/
144746 -#define stack_push(stack, size)                                 \
144747 -       ({                                                      \
144748 -               void *const ptr = ZSTD_PTR_ALIGN((stack)->ptr); \
144749 -               (stack)->ptr = (char *)ptr + (size);            \
144750 -               (stack)->ptr <= (stack)->end ? ptr : NULL;      \
144751 -       })
144753 -ZSTD_customMem ZSTD_initStack(void *workspace, size_t workspaceSize)
144755 -       ZSTD_customMem stackMem = {ZSTD_stackAlloc, ZSTD_stackFree, workspace};
144756 -       ZSTD_stack *stack = (ZSTD_stack *)workspace;
144757 -       /* Verify preconditions */
144758 -       if (!workspace || workspaceSize < sizeof(ZSTD_stack) || workspace != ZSTD_PTR_ALIGN(workspace)) {
144759 -               ZSTD_customMem error = {NULL, NULL, NULL};
144760 -               return error;
144761 -       }
144762 -       /* Initialize the stack */
144763 -       stack->ptr = workspace;
144764 -       stack->end = (char *)workspace + workspaceSize;
144765 -       stack_push(stack, sizeof(ZSTD_stack));
144766 -       return stackMem;
144769 -void *ZSTD_stackAllocAll(void *opaque, size_t *size)
144771 -       ZSTD_stack *stack = (ZSTD_stack *)opaque;
144772 -       *size = (BYTE const *)stack->end - (BYTE *)ZSTD_PTR_ALIGN(stack->ptr);
144773 -       return stack_push(stack, *size);
144776 -void *ZSTD_stackAlloc(void *opaque, size_t size)
144778 -       ZSTD_stack *stack = (ZSTD_stack *)opaque;
144779 -       return stack_push(stack, size);
144781 -void ZSTD_stackFree(void *opaque, void *address)
144783 -       (void)opaque;
144784 -       (void)address;
144787 -void *ZSTD_malloc(size_t size, ZSTD_customMem customMem) { return customMem.customAlloc(customMem.opaque, size); }
144789 -void ZSTD_free(void *ptr, ZSTD_customMem customMem)
144791 -       if (ptr != NULL)
144792 -               customMem.customFree(customMem.opaque, ptr);
144794 diff --git a/lib/zstd/zstd_compress_module.c b/lib/zstd/zstd_compress_module.c
144795 new file mode 100644
144796 index 000000000000..37d08ff43e6e
144797 --- /dev/null
144798 +++ b/lib/zstd/zstd_compress_module.c
144799 @@ -0,0 +1,124 @@
144800 +// SPDX-License-Identifier: GPL-2.0-only
144802 + * Copyright (c) Facebook, Inc.
144803 + * All rights reserved.
144805 + * This source code is licensed under both the BSD-style license (found in the
144806 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
144807 + * in the COPYING file in the root directory of this source tree).
144808 + * You may select, at your option, one of the above-listed licenses.
144809 + */
144811 +#include <linux/kernel.h>
144812 +#include <linux/module.h>
144813 +#include <linux/string.h>
144814 +#include <linux/zstd.h>
144816 +#include "common/zstd_deps.h"
144817 +#include "common/zstd_internal.h"
144819 +int zstd_min_clevel(void)
144821 +       return ZSTD_minCLevel();
144823 +EXPORT_SYMBOL(zstd_min_clevel);
144825 +int zstd_max_clevel(void)
144827 +       return ZSTD_maxCLevel();
144829 +EXPORT_SYMBOL(zstd_max_clevel);
144831 +size_t zstd_compress_bound(size_t src_size)
144833 +       return ZSTD_compressBound(src_size);
144835 +EXPORT_SYMBOL(zstd_compress_bound);
144837 +zstd_parameters zstd_get_params(int level,
144838 +       unsigned long long estimated_src_size)
144840 +       return ZSTD_getParams(level, estimated_src_size, 0);
144842 +EXPORT_SYMBOL(zstd_get_params);
144844 +size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *cparams)
144846 +       return ZSTD_estimateCCtxSize_usingCParams(*cparams);
144848 +EXPORT_SYMBOL(zstd_cctx_workspace_bound);
144850 +zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size)
144852 +       if (workspace == NULL)
144853 +               return NULL;
144854 +       return ZSTD_initStaticCCtx(workspace, workspace_size);
144856 +EXPORT_SYMBOL(zstd_init_cctx);
144858 +size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
144859 +       const void *src, size_t src_size, const zstd_parameters *parameters)
144861 +       return ZSTD_compress_advanced(cctx, dst, dst_capacity, src, src_size, NULL, 0, *parameters);
144863 +EXPORT_SYMBOL(zstd_compress_cctx);
144865 +size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams)
144867 +       return ZSTD_estimateCStreamSize_usingCParams(*cparams);
144869 +EXPORT_SYMBOL(zstd_cstream_workspace_bound);
144871 +zstd_cstream *zstd_init_cstream(const zstd_parameters *parameters,
144872 +       unsigned long long pledged_src_size, void *workspace, size_t workspace_size)
144874 +       zstd_cstream *cstream;
144875 +       size_t ret;
144877 +       if (workspace == NULL)
144878 +               return NULL;
144880 +       cstream = ZSTD_initStaticCStream(workspace, workspace_size);
144881 +       if (cstream == NULL)
144882 +               return NULL;
144884 +       /* 0 means unknown in linux zstd API but means 0 in new zstd API */
144885 +       if (pledged_src_size == 0)
144886 +               pledged_src_size = ZSTD_CONTENTSIZE_UNKNOWN;
144888 +       ret = ZSTD_initCStream_advanced(cstream, NULL, 0, *parameters, pledged_src_size);
144889 +       if (ZSTD_isError(ret))
144890 +               return NULL;
144892 +       return cstream;
144894 +EXPORT_SYMBOL(zstd_init_cstream);
144896 +size_t zstd_reset_cstream(zstd_cstream *cstream,
144897 +       unsigned long long pledged_src_size)
144899 +       return ZSTD_resetCStream(cstream, pledged_src_size);
144901 +EXPORT_SYMBOL(zstd_reset_cstream);
144903 +size_t zstd_compress_stream(zstd_cstream *cstream, zstd_out_buffer *output,
144904 +       zstd_in_buffer *input)
144906 +       return ZSTD_compressStream(cstream, output, input);
144908 +EXPORT_SYMBOL(zstd_compress_stream);
144910 +size_t zstd_flush_stream(zstd_cstream *cstream, zstd_out_buffer *output)
144912 +       return ZSTD_flushStream(cstream, output);
144914 +EXPORT_SYMBOL(zstd_flush_stream);
144916 +size_t zstd_end_stream(zstd_cstream *cstream, zstd_out_buffer *output)
144918 +       return ZSTD_endStream(cstream, output);
144920 +EXPORT_SYMBOL(zstd_end_stream);
144922 +MODULE_LICENSE("Dual BSD/GPL");
144923 +MODULE_DESCRIPTION("Zstd Compressor");
144924 diff --git a/lib/zstd/zstd_decompress_module.c b/lib/zstd/zstd_decompress_module.c
144925 new file mode 100644
144926 index 000000000000..15005cdb9eca
144927 --- /dev/null
144928 +++ b/lib/zstd/zstd_decompress_module.c
144929 @@ -0,0 +1,105 @@
144930 +// SPDX-License-Identifier: GPL-2.0-only
144932 + * Copyright (c) Facebook, Inc.
144933 + * All rights reserved.
144935 + * This source code is licensed under both the BSD-style license (found in the
144936 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
144937 + * in the COPYING file in the root directory of this source tree).
144938 + * You may select, at your option, one of the above-listed licenses.
144939 + */
144941 +#include <linux/kernel.h>
144942 +#include <linux/module.h>
144943 +#include <linux/string.h>
144944 +#include <linux/zstd.h>
144946 +#include "common/zstd_deps.h"
144948 +/* Common symbols. zstd_compress must depend on zstd_decompress. */
144950 +unsigned int zstd_is_error(size_t code)
144952 +       return ZSTD_isError(code);
144954 +EXPORT_SYMBOL(zstd_is_error);
144956 +zstd_error_code zstd_get_error_code(size_t code)
144958 +       return ZSTD_getErrorCode(code);
144960 +EXPORT_SYMBOL(zstd_get_error_code);
144962 +const char *zstd_get_error_name(size_t code)
144964 +       return ZSTD_getErrorName(code);
144966 +EXPORT_SYMBOL(zstd_get_error_name);
144968 +/* Decompression symbols. */
144970 +size_t zstd_dctx_workspace_bound(void)
144972 +       return ZSTD_estimateDCtxSize();
144974 +EXPORT_SYMBOL(zstd_dctx_workspace_bound);
144976 +zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size)
144978 +       if (workspace == NULL)
144979 +               return NULL;
144980 +       return ZSTD_initStaticDCtx(workspace, workspace_size);
144982 +EXPORT_SYMBOL(zstd_init_dctx);
144984 +size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity,
144985 +       const void *src, size_t src_size)
144987 +       return ZSTD_decompressDCtx(dctx, dst, dst_capacity, src, src_size);
144989 +EXPORT_SYMBOL(zstd_decompress_dctx);
144991 +size_t zstd_dstream_workspace_bound(size_t max_window_size)
144993 +       return ZSTD_estimateDStreamSize(max_window_size);
144995 +EXPORT_SYMBOL(zstd_dstream_workspace_bound);
144997 +zstd_dstream *zstd_init_dstream(size_t max_window_size, void *workspace,
144998 +       size_t workspace_size)
145000 +       if (workspace == NULL)
145001 +               return NULL;
145002 +       (void)max_window_size;
145003 +       return ZSTD_initStaticDStream(workspace, workspace_size);
145005 +EXPORT_SYMBOL(zstd_init_dstream);
145007 +size_t zstd_reset_dstream(zstd_dstream *dstream)
145009 +       return ZSTD_resetDStream(dstream);
145011 +EXPORT_SYMBOL(zstd_reset_dstream);
145013 +size_t zstd_decompress_stream(zstd_dstream *dstream, zstd_out_buffer *output,
145014 +       zstd_in_buffer *input)
145016 +       return ZSTD_decompressStream(dstream, output, input);
145018 +EXPORT_SYMBOL(zstd_decompress_stream);
145020 +size_t zstd_find_frame_compressed_size(const void *src, size_t src_size)
145022 +       return ZSTD_findFrameCompressedSize(src, src_size);
145024 +EXPORT_SYMBOL(zstd_find_frame_compressed_size);
145026 +size_t zstd_get_frame_header(zstd_frame_header *header, const void *src,
145027 +       size_t src_size)
145029 +       return ZSTD_getFrameHeader(header, src, src_size);
145031 +EXPORT_SYMBOL(zstd_get_frame_header);
145033 +MODULE_LICENSE("Dual BSD/GPL");
145034 +MODULE_DESCRIPTION("Zstd Decompressor");
145035 diff --git a/lib/zstd/zstd_internal.h b/lib/zstd/zstd_internal.h
145036 deleted file mode 100644
145037 index dac753397f86..000000000000
145038 --- a/lib/zstd/zstd_internal.h
145039 +++ /dev/null
145040 @@ -1,273 +0,0 @@
145042 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
145043 - * All rights reserved.
145045 - * This source code is licensed under the BSD-style license found in the
145046 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
145047 - * An additional grant of patent rights can be found in the PATENTS file in the
145048 - * same directory.
145050 - * This program is free software; you can redistribute it and/or modify it under
145051 - * the terms of the GNU General Public License version 2 as published by the
145052 - * Free Software Foundation. This program is dual-licensed; you may select
145053 - * either version 2 of the GNU General Public License ("GPL") or BSD license
145054 - * ("BSD").
145055 - */
145057 -#ifndef ZSTD_CCOMMON_H_MODULE
145058 -#define ZSTD_CCOMMON_H_MODULE
145060 -/*-*******************************************************
145061 -*  Compiler specifics
145062 -*********************************************************/
145063 -#define FORCE_INLINE static __always_inline
145064 -#define FORCE_NOINLINE static noinline
145066 -/*-*************************************
145067 -*  Dependencies
145068 -***************************************/
145069 -#include "error_private.h"
145070 -#include "mem.h"
145071 -#include <linux/compiler.h>
145072 -#include <linux/kernel.h>
145073 -#include <linux/xxhash.h>
145074 -#include <linux/zstd.h>
145076 -/*-*************************************
145077 -*  shared macros
145078 -***************************************/
145079 -#define MIN(a, b) ((a) < (b) ? (a) : (b))
145080 -#define MAX(a, b) ((a) > (b) ? (a) : (b))
145081 -#define CHECK_F(f)                       \
145082 -       {                                \
145083 -               size_t const errcod = f; \
145084 -               if (ERR_isError(errcod)) \
145085 -                       return errcod;   \
145086 -       } /* check and Forward error code */
145087 -#define CHECK_E(f, e)                    \
145088 -       {                                \
145089 -               size_t const errcod = f; \
145090 -               if (ERR_isError(errcod)) \
145091 -                       return ERROR(e); \
145092 -       } /* check and send Error code */
145093 -#define ZSTD_STATIC_ASSERT(c)                                   \
145094 -       {                                                       \
145095 -               enum { ZSTD_static_assert = 1 / (int)(!!(c)) }; \
145096 -       }
145098 -/*-*************************************
145099 -*  Common constants
145100 -***************************************/
145101 -#define ZSTD_OPT_NUM (1 << 12)
145102 -#define ZSTD_DICT_MAGIC 0xEC30A437 /* v0.7+ */
145104 -#define ZSTD_REP_NUM 3               /* number of repcodes */
145105 -#define ZSTD_REP_CHECK (ZSTD_REP_NUM) /* number of repcodes to check by the optimal parser */
145106 -#define ZSTD_REP_MOVE (ZSTD_REP_NUM - 1)
145107 -#define ZSTD_REP_MOVE_OPT (ZSTD_REP_NUM)
145108 -static const U32 repStartValue[ZSTD_REP_NUM] = {1, 4, 8};
145110 -#define KB *(1 << 10)
145111 -#define MB *(1 << 20)
145112 -#define GB *(1U << 30)
145114 -#define BIT7 128
145115 -#define BIT6 64
145116 -#define BIT5 32
145117 -#define BIT4 16
145118 -#define BIT1 2
145119 -#define BIT0 1
145121 -#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
145122 -static const size_t ZSTD_fcs_fieldSize[4] = {0, 2, 4, 8};
145123 -static const size_t ZSTD_did_fieldSize[4] = {0, 1, 2, 4};
145125 -#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
145126 -static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
145127 -typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
145129 -#define MIN_SEQUENCES_SIZE 1                                                                     /* nbSeq==0 */
145130 -#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
145132 -#define HufLog 12
145133 -typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
145135 -#define LONGNBSEQ 0x7F00
145137 -#define MINMATCH 3
145138 -#define EQUAL_READ32 4
145140 -#define Litbits 8
145141 -#define MaxLit ((1 << Litbits) - 1)
145142 -#define MaxML 52
145143 -#define MaxLL 35
145144 -#define MaxOff 28
145145 -#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
145146 -#define MLFSELog 9
145147 -#define LLFSELog 9
145148 -#define OffFSELog 8
145150 -static const U32 LL_bits[MaxLL + 1] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
145151 -static const S16 LL_defaultNorm[MaxLL + 1] = {4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, -1, -1, -1, -1};
145152 -#define LL_DEFAULTNORMLOG 6 /* for static allocation */
145153 -static const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
145155 -static const U32 ML_bits[MaxML + 1] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,  0,  0,  0,  0,  0,  0, 0,
145156 -                                      0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
145157 -static const S16 ML_defaultNorm[MaxML + 1] = {1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,  1,  1,  1,  1,  1,  1, 1,
145158 -                                             1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1};
145159 -#define ML_DEFAULTNORMLOG 6 /* for static allocation */
145160 -static const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
145162 -static const S16 OF_defaultNorm[MaxOff + 1] = {1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1};
145163 -#define OF_DEFAULTNORMLOG 5 /* for static allocation */
145164 -static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
145166 -/*-*******************************************
145167 -*  Shared functions to include for inlining
145168 -*********************************************/
145169 -ZSTD_STATIC void ZSTD_copy8(void *dst, const void *src) {
145170 -       /*
145171 -        * zstd relies heavily on gcc being able to analyze and inline this
145172 -        * memcpy() call, since it is called in a tight loop. Preboot mode
145173 -        * is compiled in freestanding mode, which stops gcc from analyzing
145174 -        * memcpy(). Use __builtin_memcpy() to tell gcc to analyze this as a
145175 -        * regular memcpy().
145176 -        */
145177 -       __builtin_memcpy(dst, src, 8);
145179 -/*! ZSTD_wildcopy() :
145180 -*   custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
145181 -#define WILDCOPY_OVERLENGTH 8
145182 -ZSTD_STATIC void ZSTD_wildcopy(void *dst, const void *src, ptrdiff_t length)
145184 -       const BYTE* ip = (const BYTE*)src;
145185 -       BYTE* op = (BYTE*)dst;
145186 -       BYTE* const oend = op + length;
145187 -#if defined(GCC_VERSION) && GCC_VERSION >= 70000 && GCC_VERSION < 70200
145188 -       /*
145189 -        * Work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388.
145190 -        * Avoid the bad case where the loop only runs once by handling the
145191 -        * special case separately. This doesn't trigger the bug because it
145192 -        * doesn't involve pointer/integer overflow.
145193 -        */
145194 -       if (length <= 8)
145195 -               return ZSTD_copy8(dst, src);
145196 -#endif
145197 -       do {
145198 -               ZSTD_copy8(op, ip);
145199 -               op += 8;
145200 -               ip += 8;
145201 -       } while (op < oend);
145204 -/*-*******************************************
145205 -*  Private interfaces
145206 -*********************************************/
145207 -typedef struct ZSTD_stats_s ZSTD_stats_t;
145209 -typedef struct {
145210 -       U32 off;
145211 -       U32 len;
145212 -} ZSTD_match_t;
145214 -typedef struct {
145215 -       U32 price;
145216 -       U32 off;
145217 -       U32 mlen;
145218 -       U32 litlen;
145219 -       U32 rep[ZSTD_REP_NUM];
145220 -} ZSTD_optimal_t;
145222 -typedef struct seqDef_s {
145223 -       U32 offset;
145224 -       U16 litLength;
145225 -       U16 matchLength;
145226 -} seqDef;
145228 -typedef struct {
145229 -       seqDef *sequencesStart;
145230 -       seqDef *sequences;
145231 -       BYTE *litStart;
145232 -       BYTE *lit;
145233 -       BYTE *llCode;
145234 -       BYTE *mlCode;
145235 -       BYTE *ofCode;
145236 -       U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
145237 -       U32 longLengthPos;
145238 -       /* opt */
145239 -       ZSTD_optimal_t *priceTable;
145240 -       ZSTD_match_t *matchTable;
145241 -       U32 *matchLengthFreq;
145242 -       U32 *litLengthFreq;
145243 -       U32 *litFreq;
145244 -       U32 *offCodeFreq;
145245 -       U32 matchLengthSum;
145246 -       U32 matchSum;
145247 -       U32 litLengthSum;
145248 -       U32 litSum;
145249 -       U32 offCodeSum;
145250 -       U32 log2matchLengthSum;
145251 -       U32 log2matchSum;
145252 -       U32 log2litLengthSum;
145253 -       U32 log2litSum;
145254 -       U32 log2offCodeSum;
145255 -       U32 factor;
145256 -       U32 staticPrices;
145257 -       U32 cachedPrice;
145258 -       U32 cachedLitLength;
145259 -       const BYTE *cachedLiterals;
145260 -} seqStore_t;
145262 -const seqStore_t *ZSTD_getSeqStore(const ZSTD_CCtx *ctx);
145263 -void ZSTD_seqToCodes(const seqStore_t *seqStorePtr);
145264 -int ZSTD_isSkipFrame(ZSTD_DCtx *dctx);
145266 -/*= Custom memory allocation functions */
145267 -typedef void *(*ZSTD_allocFunction)(void *opaque, size_t size);
145268 -typedef void (*ZSTD_freeFunction)(void *opaque, void *address);
145269 -typedef struct {
145270 -       ZSTD_allocFunction customAlloc;
145271 -       ZSTD_freeFunction customFree;
145272 -       void *opaque;
145273 -} ZSTD_customMem;
145275 -void *ZSTD_malloc(size_t size, ZSTD_customMem customMem);
145276 -void ZSTD_free(void *ptr, ZSTD_customMem customMem);
145278 -/*====== stack allocation  ======*/
145280 -typedef struct {
145281 -       void *ptr;
145282 -       const void *end;
145283 -} ZSTD_stack;
145285 -#define ZSTD_ALIGN(x) ALIGN(x, sizeof(size_t))
145286 -#define ZSTD_PTR_ALIGN(p) PTR_ALIGN(p, sizeof(size_t))
145288 -ZSTD_customMem ZSTD_initStack(void *workspace, size_t workspaceSize);
145290 -void *ZSTD_stackAllocAll(void *opaque, size_t *size);
145291 -void *ZSTD_stackAlloc(void *opaque, size_t size);
145292 -void ZSTD_stackFree(void *opaque, void *address);
145294 -/*======  common function  ======*/
145296 -ZSTD_STATIC U32 ZSTD_highbit32(U32 val) { return 31 - __builtin_clz(val); }
145298 -/* hidden functions */
145300 -/* ZSTD_invalidateRepCodes() :
145301 - * ensures next compression will not use repcodes from previous block.
145302 - * Note : only works with regular variant;
145303 - *        do not use with extDict variant ! */
145304 -void ZSTD_invalidateRepCodes(ZSTD_CCtx *cctx);
145306 -size_t ZSTD_freeCCtx(ZSTD_CCtx *cctx);
145307 -size_t ZSTD_freeDCtx(ZSTD_DCtx *dctx);
145308 -size_t ZSTD_freeCDict(ZSTD_CDict *cdict);
145309 -size_t ZSTD_freeDDict(ZSTD_DDict *cdict);
145310 -size_t ZSTD_freeCStream(ZSTD_CStream *zcs);
145311 -size_t ZSTD_freeDStream(ZSTD_DStream *zds);
145313 -#endif /* ZSTD_CCOMMON_H_MODULE */
145314 diff --git a/lib/zstd/zstd_opt.h b/lib/zstd/zstd_opt.h
145315 deleted file mode 100644
145316 index 55e1b4cba808..000000000000
145317 --- a/lib/zstd/zstd_opt.h
145318 +++ /dev/null
145319 @@ -1,1014 +0,0 @@
145321 - * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
145322 - * All rights reserved.
145324 - * This source code is licensed under the BSD-style license found in the
145325 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
145326 - * An additional grant of patent rights can be found in the PATENTS file in the
145327 - * same directory.
145329 - * This program is free software; you can redistribute it and/or modify it under
145330 - * the terms of the GNU General Public License version 2 as published by the
145331 - * Free Software Foundation. This program is dual-licensed; you may select
145332 - * either version 2 of the GNU General Public License ("GPL") or BSD license
145333 - * ("BSD").
145334 - */
145336 -/* Note : this file is intended to be included within zstd_compress.c */
145338 -#ifndef ZSTD_OPT_H_91842398743
145339 -#define ZSTD_OPT_H_91842398743
145341 -#define ZSTD_LITFREQ_ADD 2
145342 -#define ZSTD_FREQ_DIV 4
145343 -#define ZSTD_MAX_PRICE (1 << 30)
145345 -/*-*************************************
145346 -*  Price functions for optimal parser
145347 -***************************************/
145348 -FORCE_INLINE void ZSTD_setLog2Prices(seqStore_t *ssPtr)
145350 -       ssPtr->log2matchLengthSum = ZSTD_highbit32(ssPtr->matchLengthSum + 1);
145351 -       ssPtr->log2litLengthSum = ZSTD_highbit32(ssPtr->litLengthSum + 1);
145352 -       ssPtr->log2litSum = ZSTD_highbit32(ssPtr->litSum + 1);
145353 -       ssPtr->log2offCodeSum = ZSTD_highbit32(ssPtr->offCodeSum + 1);
145354 -       ssPtr->factor = 1 + ((ssPtr->litSum >> 5) / ssPtr->litLengthSum) + ((ssPtr->litSum << 1) / (ssPtr->litSum + ssPtr->matchSum));
145357 -ZSTD_STATIC void ZSTD_rescaleFreqs(seqStore_t *ssPtr, const BYTE *src, size_t srcSize)
145359 -       unsigned u;
145361 -       ssPtr->cachedLiterals = NULL;
145362 -       ssPtr->cachedPrice = ssPtr->cachedLitLength = 0;
145363 -       ssPtr->staticPrices = 0;
145365 -       if (ssPtr->litLengthSum == 0) {
145366 -               if (srcSize <= 1024)
145367 -                       ssPtr->staticPrices = 1;
145369 -               for (u = 0; u <= MaxLit; u++)
145370 -                       ssPtr->litFreq[u] = 0;
145371 -               for (u = 0; u < srcSize; u++)
145372 -                       ssPtr->litFreq[src[u]]++;
145374 -               ssPtr->litSum = 0;
145375 -               ssPtr->litLengthSum = MaxLL + 1;
145376 -               ssPtr->matchLengthSum = MaxML + 1;
145377 -               ssPtr->offCodeSum = (MaxOff + 1);
145378 -               ssPtr->matchSum = (ZSTD_LITFREQ_ADD << Litbits);
145380 -               for (u = 0; u <= MaxLit; u++) {
145381 -                       ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u] >> ZSTD_FREQ_DIV);
145382 -                       ssPtr->litSum += ssPtr->litFreq[u];
145383 -               }
145384 -               for (u = 0; u <= MaxLL; u++)
145385 -                       ssPtr->litLengthFreq[u] = 1;
145386 -               for (u = 0; u <= MaxML; u++)
145387 -                       ssPtr->matchLengthFreq[u] = 1;
145388 -               for (u = 0; u <= MaxOff; u++)
145389 -                       ssPtr->offCodeFreq[u] = 1;
145390 -       } else {
145391 -               ssPtr->matchLengthSum = 0;
145392 -               ssPtr->litLengthSum = 0;
145393 -               ssPtr->offCodeSum = 0;
145394 -               ssPtr->matchSum = 0;
145395 -               ssPtr->litSum = 0;
145397 -               for (u = 0; u <= MaxLit; u++) {
145398 -                       ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u] >> (ZSTD_FREQ_DIV + 1));
145399 -                       ssPtr->litSum += ssPtr->litFreq[u];
145400 -               }
145401 -               for (u = 0; u <= MaxLL; u++) {
145402 -                       ssPtr->litLengthFreq[u] = 1 + (ssPtr->litLengthFreq[u] >> (ZSTD_FREQ_DIV + 1));
145403 -                       ssPtr->litLengthSum += ssPtr->litLengthFreq[u];
145404 -               }
145405 -               for (u = 0; u <= MaxML; u++) {
145406 -                       ssPtr->matchLengthFreq[u] = 1 + (ssPtr->matchLengthFreq[u] >> ZSTD_FREQ_DIV);
145407 -                       ssPtr->matchLengthSum += ssPtr->matchLengthFreq[u];
145408 -                       ssPtr->matchSum += ssPtr->matchLengthFreq[u] * (u + 3);
145409 -               }
145410 -               ssPtr->matchSum *= ZSTD_LITFREQ_ADD;
145411 -               for (u = 0; u <= MaxOff; u++) {
145412 -                       ssPtr->offCodeFreq[u] = 1 + (ssPtr->offCodeFreq[u] >> ZSTD_FREQ_DIV);
145413 -                       ssPtr->offCodeSum += ssPtr->offCodeFreq[u];
145414 -               }
145415 -       }
145417 -       ZSTD_setLog2Prices(ssPtr);
145420 -FORCE_INLINE U32 ZSTD_getLiteralPrice(seqStore_t *ssPtr, U32 litLength, const BYTE *literals)
145422 -       U32 price, u;
145424 -       if (ssPtr->staticPrices)
145425 -               return ZSTD_highbit32((U32)litLength + 1) + (litLength * 6);
145427 -       if (litLength == 0)
145428 -               return ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[0] + 1);
145430 -       /* literals */
145431 -       if (ssPtr->cachedLiterals == literals) {
145432 -               U32 const additional = litLength - ssPtr->cachedLitLength;
145433 -               const BYTE *literals2 = ssPtr->cachedLiterals + ssPtr->cachedLitLength;
145434 -               price = ssPtr->cachedPrice + additional * ssPtr->log2litSum;
145435 -               for (u = 0; u < additional; u++)
145436 -                       price -= ZSTD_highbit32(ssPtr->litFreq[literals2[u]] + 1);
145437 -               ssPtr->cachedPrice = price;
145438 -               ssPtr->cachedLitLength = litLength;
145439 -       } else {
145440 -               price = litLength * ssPtr->log2litSum;
145441 -               for (u = 0; u < litLength; u++)
145442 -                       price -= ZSTD_highbit32(ssPtr->litFreq[literals[u]] + 1);
145444 -               if (litLength >= 12) {
145445 -                       ssPtr->cachedLiterals = literals;
145446 -                       ssPtr->cachedPrice = price;
145447 -                       ssPtr->cachedLitLength = litLength;
145448 -               }
145449 -       }
145451 -       /* literal Length */
145452 -       {
145453 -               const BYTE LL_deltaCode = 19;
145454 -               const BYTE llCode = (litLength > 63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
145455 -               price += LL_bits[llCode] + ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[llCode] + 1);
145456 -       }
145458 -       return price;
145461 -FORCE_INLINE U32 ZSTD_getPrice(seqStore_t *seqStorePtr, U32 litLength, const BYTE *literals, U32 offset, U32 matchLength, const int ultra)
145463 -       /* offset */
145464 -       U32 price;
145465 -       BYTE const offCode = (BYTE)ZSTD_highbit32(offset + 1);
145467 -       if (seqStorePtr->staticPrices)
145468 -               return ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + ZSTD_highbit32((U32)matchLength + 1) + 16 + offCode;
145470 -       price = offCode + seqStorePtr->log2offCodeSum - ZSTD_highbit32(seqStorePtr->offCodeFreq[offCode] + 1);
145471 -       if (!ultra && offCode >= 20)
145472 -               price += (offCode - 19) * 2;
145474 -       /* match Length */
145475 -       {
145476 -               const BYTE ML_deltaCode = 36;
145477 -               const BYTE mlCode = (matchLength > 127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
145478 -               price += ML_bits[mlCode] + seqStorePtr->log2matchLengthSum - ZSTD_highbit32(seqStorePtr->matchLengthFreq[mlCode] + 1);
145479 -       }
145481 -       return price + ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + seqStorePtr->factor;
145484 -ZSTD_STATIC void ZSTD_updatePrice(seqStore_t *seqStorePtr, U32 litLength, const BYTE *literals, U32 offset, U32 matchLength)
145486 -       U32 u;
145488 -       /* literals */
145489 -       seqStorePtr->litSum += litLength * ZSTD_LITFREQ_ADD;
145490 -       for (u = 0; u < litLength; u++)
145491 -               seqStorePtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
145493 -       /* literal Length */
145494 -       {
145495 -               const BYTE LL_deltaCode = 19;
145496 -               const BYTE llCode = (litLength > 63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
145497 -               seqStorePtr->litLengthFreq[llCode]++;
145498 -               seqStorePtr->litLengthSum++;
145499 -       }
145501 -       /* match offset */
145502 -       {
145503 -               BYTE const offCode = (BYTE)ZSTD_highbit32(offset + 1);
145504 -               seqStorePtr->offCodeSum++;
145505 -               seqStorePtr->offCodeFreq[offCode]++;
145506 -       }
145508 -       /* match Length */
145509 -       {
145510 -               const BYTE ML_deltaCode = 36;
145511 -               const BYTE mlCode = (matchLength > 127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
145512 -               seqStorePtr->matchLengthFreq[mlCode]++;
145513 -               seqStorePtr->matchLengthSum++;
145514 -       }
145516 -       ZSTD_setLog2Prices(seqStorePtr);
145519 -#define SET_PRICE(pos, mlen_, offset_, litlen_, price_)           \
145520 -       {                                                         \
145521 -               while (last_pos < pos) {                          \
145522 -                       opt[last_pos + 1].price = ZSTD_MAX_PRICE; \
145523 -                       last_pos++;                               \
145524 -               }                                                 \
145525 -               opt[pos].mlen = mlen_;                            \
145526 -               opt[pos].off = offset_;                           \
145527 -               opt[pos].litlen = litlen_;                        \
145528 -               opt[pos].price = price_;                          \
145529 -       }
145531 -/* Update hashTable3 up to ip (excluded)
145532 -   Assumption : always within prefix (i.e. not within extDict) */
145533 -FORCE_INLINE
145534 -U32 ZSTD_insertAndFindFirstIndexHash3(ZSTD_CCtx *zc, const BYTE *ip)
145536 -       U32 *const hashTable3 = zc->hashTable3;
145537 -       U32 const hashLog3 = zc->hashLog3;
145538 -       const BYTE *const base = zc->base;
145539 -       U32 idx = zc->nextToUpdate3;
145540 -       const U32 target = zc->nextToUpdate3 = (U32)(ip - base);
145541 -       const size_t hash3 = ZSTD_hash3Ptr(ip, hashLog3);
145543 -       while (idx < target) {
145544 -               hashTable3[ZSTD_hash3Ptr(base + idx, hashLog3)] = idx;
145545 -               idx++;
145546 -       }
145548 -       return hashTable3[hash3];
145551 -/*-*************************************
145552 -*  Binary Tree search
145553 -***************************************/
145554 -static U32 ZSTD_insertBtAndGetAllMatches(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, U32 nbCompares, const U32 mls, U32 extDict,
145555 -                                        ZSTD_match_t *matches, const U32 minMatchLen)
145557 -       const BYTE *const base = zc->base;
145558 -       const U32 curr = (U32)(ip - base);
145559 -       const U32 hashLog = zc->params.cParams.hashLog;
145560 -       const size_t h = ZSTD_hashPtr(ip, hashLog, mls);
145561 -       U32 *const hashTable = zc->hashTable;
145562 -       U32 matchIndex = hashTable[h];
145563 -       U32 *const bt = zc->chainTable;
145564 -       const U32 btLog = zc->params.cParams.chainLog - 1;
145565 -       const U32 btMask = (1U << btLog) - 1;
145566 -       size_t commonLengthSmaller = 0, commonLengthLarger = 0;
145567 -       const BYTE *const dictBase = zc->dictBase;
145568 -       const U32 dictLimit = zc->dictLimit;
145569 -       const BYTE *const dictEnd = dictBase + dictLimit;
145570 -       const BYTE *const prefixStart = base + dictLimit;
145571 -       const U32 btLow = btMask >= curr ? 0 : curr - btMask;
145572 -       const U32 windowLow = zc->lowLimit;
145573 -       U32 *smallerPtr = bt + 2 * (curr & btMask);
145574 -       U32 *largerPtr = bt + 2 * (curr & btMask) + 1;
145575 -       U32 matchEndIdx = curr + 8;
145576 -       U32 dummy32; /* to be nullified at the end */
145577 -       U32 mnum = 0;
145579 -       const U32 minMatch = (mls == 3) ? 3 : 4;
145580 -       size_t bestLength = minMatchLen - 1;
145582 -       if (minMatch == 3) { /* HC3 match finder */
145583 -               U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(zc, ip);
145584 -               if (matchIndex3 > windowLow && (curr - matchIndex3 < (1 << 18))) {
145585 -                       const BYTE *match;
145586 -                       size_t currMl = 0;
145587 -                       if ((!extDict) || matchIndex3 >= dictLimit) {
145588 -                               match = base + matchIndex3;
145589 -                               if (match[bestLength] == ip[bestLength])
145590 -                                       currMl = ZSTD_count(ip, match, iLimit);
145591 -                       } else {
145592 -                               match = dictBase + matchIndex3;
145593 -                               if (ZSTD_readMINMATCH(match, MINMATCH) ==
145594 -                                   ZSTD_readMINMATCH(ip, MINMATCH)) /* assumption : matchIndex3 <= dictLimit-4 (by table construction) */
145595 -                                       currMl = ZSTD_count_2segments(ip + MINMATCH, match + MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH;
145596 -                       }
145598 -                       /* save best solution */
145599 -                       if (currMl > bestLength) {
145600 -                               bestLength = currMl;
145601 -                               matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex3;
145602 -                               matches[mnum].len = (U32)currMl;
145603 -                               mnum++;
145604 -                               if (currMl > ZSTD_OPT_NUM)
145605 -                                       goto update;
145606 -                               if (ip + currMl == iLimit)
145607 -                                       goto update; /* best possible, and avoid read overflow*/
145608 -                       }
145609 -               }
145610 -       }
145612 -       hashTable[h] = curr; /* Update Hash Table */
145614 -       while (nbCompares-- && (matchIndex > windowLow)) {
145615 -               U32 *nextPtr = bt + 2 * (matchIndex & btMask);
145616 -               size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
145617 -               const BYTE *match;
145619 -               if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
145620 -                       match = base + matchIndex;
145621 -                       if (match[matchLength] == ip[matchLength]) {
145622 -                               matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iLimit) + 1;
145623 -                       }
145624 -               } else {
145625 -                       match = dictBase + matchIndex;
145626 -                       matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iLimit, dictEnd, prefixStart);
145627 -                       if (matchIndex + matchLength >= dictLimit)
145628 -                               match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
145629 -               }
145631 -               if (matchLength > bestLength) {
145632 -                       if (matchLength > matchEndIdx - matchIndex)
145633 -                               matchEndIdx = matchIndex + (U32)matchLength;
145634 -                       bestLength = matchLength;
145635 -                       matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex;
145636 -                       matches[mnum].len = (U32)matchLength;
145637 -                       mnum++;
145638 -                       if (matchLength > ZSTD_OPT_NUM)
145639 -                               break;
145640 -                       if (ip + matchLength == iLimit) /* equal : no way to know if inf or sup */
145641 -                               break;                  /* drop, to guarantee consistency (miss a little bit of compression) */
145642 -               }
145644 -               if (match[matchLength] < ip[matchLength]) {
145645 -                       /* match is smaller than curr */
145646 -                       *smallerPtr = matchIndex;         /* update smaller idx */
145647 -                       commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
145648 -                       if (matchIndex <= btLow) {
145649 -                               smallerPtr = &dummy32;
145650 -                               break;
145651 -                       }                         /* beyond tree size, stop the search */
145652 -                       smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */
145653 -                       matchIndex = nextPtr[1];  /* new matchIndex larger than previous (closer to curr) */
145654 -               } else {
145655 -                       /* match is larger than curr */
145656 -                       *largerPtr = matchIndex;
145657 -                       commonLengthLarger = matchLength;
145658 -                       if (matchIndex <= btLow) {
145659 -                               largerPtr = &dummy32;
145660 -                               break;
145661 -                       } /* beyond tree size, stop the search */
145662 -                       largerPtr = nextPtr;
145663 -                       matchIndex = nextPtr[0];
145664 -               }
145665 -       }
145667 -       *smallerPtr = *largerPtr = 0;
145669 -update:
145670 -       zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr + 1;
145671 -       return mnum;
145674 -/** Tree updater, providing best match */
145675 -static U32 ZSTD_BtGetAllMatches(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, const U32 maxNbAttempts, const U32 mls, ZSTD_match_t *matches,
145676 -                               const U32 minMatchLen)
145678 -       if (ip < zc->base + zc->nextToUpdate)
145679 -               return 0; /* skipped area */
145680 -       ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
145681 -       return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 0, matches, minMatchLen);
145684 -static U32 ZSTD_BtGetAllMatches_selectMLS(ZSTD_CCtx *zc, /* Index table will be updated */
145685 -                                         const BYTE *ip, const BYTE *const iHighLimit, const U32 maxNbAttempts, const U32 matchLengthSearch,
145686 -                                         ZSTD_match_t *matches, const U32 minMatchLen)
145688 -       switch (matchLengthSearch) {
145689 -       case 3: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen);
145690 -       default:
145691 -       case 4: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
145692 -       case 5: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
145693 -       case 7:
145694 -       case 6: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
145695 -       }
145698 -/** Tree updater, providing best match */
145699 -static U32 ZSTD_BtGetAllMatches_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, const U32 maxNbAttempts, const U32 mls,
145700 -                                       ZSTD_match_t *matches, const U32 minMatchLen)
145702 -       if (ip < zc->base + zc->nextToUpdate)
145703 -               return 0; /* skipped area */
145704 -       ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
145705 -       return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 1, matches, minMatchLen);
145708 -static U32 ZSTD_BtGetAllMatches_selectMLS_extDict(ZSTD_CCtx *zc, /* Index table will be updated */
145709 -                                                 const BYTE *ip, const BYTE *const iHighLimit, const U32 maxNbAttempts, const U32 matchLengthSearch,
145710 -                                                 ZSTD_match_t *matches, const U32 minMatchLen)
145712 -       switch (matchLengthSearch) {
145713 -       case 3: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen);
145714 -       default:
145715 -       case 4: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
145716 -       case 5: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
145717 -       case 7:
145718 -       case 6: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
145719 -       }
145722 -/*-*******************************
145723 -*  Optimal parser
145724 -*********************************/
145725 -FORCE_INLINE
145726 -void ZSTD_compressBlock_opt_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const int ultra)
145728 -       seqStore_t *seqStorePtr = &(ctx->seqStore);
145729 -       const BYTE *const istart = (const BYTE *)src;
145730 -       const BYTE *ip = istart;
145731 -       const BYTE *anchor = istart;
145732 -       const BYTE *const iend = istart + srcSize;
145733 -       const BYTE *const ilimit = iend - 8;
145734 -       const BYTE *const base = ctx->base;
145735 -       const BYTE *const prefixStart = base + ctx->dictLimit;
145737 -       const U32 maxSearches = 1U << ctx->params.cParams.searchLog;
145738 -       const U32 sufficient_len = ctx->params.cParams.targetLength;
145739 -       const U32 mls = ctx->params.cParams.searchLength;
145740 -       const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4;
145742 -       ZSTD_optimal_t *opt = seqStorePtr->priceTable;
145743 -       ZSTD_match_t *matches = seqStorePtr->matchTable;
145744 -       const BYTE *inr;
145745 -       U32 offset, rep[ZSTD_REP_NUM];
145747 -       /* init */
145748 -       ctx->nextToUpdate3 = ctx->nextToUpdate;
145749 -       ZSTD_rescaleFreqs(seqStorePtr, (const BYTE *)src, srcSize);
145750 -       ip += (ip == prefixStart);
145751 -       {
145752 -               U32 i;
145753 -               for (i = 0; i < ZSTD_REP_NUM; i++)
145754 -                       rep[i] = ctx->rep[i];
145755 -       }
145757 -       /* Match Loop */
145758 -       while (ip < ilimit) {
145759 -               U32 cur, match_num, last_pos, litlen, price;
145760 -               U32 u, mlen, best_mlen, best_off, litLength;
145761 -               memset(opt, 0, sizeof(ZSTD_optimal_t));
145762 -               last_pos = 0;
145763 -               litlen = (U32)(ip - anchor);
145765 -               /* check repCode */
145766 -               {
145767 -                       U32 i, last_i = ZSTD_REP_CHECK + (ip == anchor);
145768 -                       for (i = (ip == anchor); i < last_i; i++) {
145769 -                               const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
145770 -                               if ((repCur > 0) && (repCur < (S32)(ip - prefixStart)) &&
145771 -                                   (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repCur, minMatch))) {
145772 -                                       mlen = (U32)ZSTD_count(ip + minMatch, ip + minMatch - repCur, iend) + minMatch;
145773 -                                       if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
145774 -                                               best_mlen = mlen;
145775 -                                               best_off = i;
145776 -                                               cur = 0;
145777 -                                               last_pos = 1;
145778 -                                               goto _storeSequence;
145779 -                                       }
145780 -                                       best_off = i - (ip == anchor);
145781 -                                       do {
145782 -                                               price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
145783 -                                               if (mlen > last_pos || price < opt[mlen].price)
145784 -                                                       SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
145785 -                                               mlen--;
145786 -                                       } while (mlen >= minMatch);
145787 -                               }
145788 -                       }
145789 -               }
145791 -               match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, ip, iend, maxSearches, mls, matches, minMatch);
145793 -               if (!last_pos && !match_num) {
145794 -                       ip++;
145795 -                       continue;
145796 -               }
145798 -               if (match_num && (matches[match_num - 1].len > sufficient_len || matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
145799 -                       best_mlen = matches[match_num - 1].len;
145800 -                       best_off = matches[match_num - 1].off;
145801 -                       cur = 0;
145802 -                       last_pos = 1;
145803 -                       goto _storeSequence;
145804 -               }
145806 -               /* set prices using matches at position = 0 */
145807 -               best_mlen = (last_pos) ? last_pos : minMatch;
145808 -               for (u = 0; u < match_num; u++) {
145809 -                       mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
145810 -                       best_mlen = matches[u].len;
145811 -                       while (mlen <= best_mlen) {
145812 -                               price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
145813 -                               if (mlen > last_pos || price < opt[mlen].price)
145814 -                                       SET_PRICE(mlen, mlen, matches[u].off, litlen, price); /* note : macro modifies last_pos */
145815 -                               mlen++;
145816 -                       }
145817 -               }
145819 -               if (last_pos < minMatch) {
145820 -                       ip++;
145821 -                       continue;
145822 -               }
145824 -               /* initialize opt[0] */
145825 -               {
145826 -                       U32 i;
145827 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
145828 -                               opt[0].rep[i] = rep[i];
145829 -               }
145830 -               opt[0].mlen = 1;
145831 -               opt[0].litlen = litlen;
145833 -               /* check further positions */
145834 -               for (cur = 1; cur <= last_pos; cur++) {
145835 -                       inr = ip + cur;
145837 -                       if (opt[cur - 1].mlen == 1) {
145838 -                               litlen = opt[cur - 1].litlen + 1;
145839 -                               if (cur > litlen) {
145840 -                                       price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - litlen);
145841 -                               } else
145842 -                                       price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor);
145843 -                       } else {
145844 -                               litlen = 1;
145845 -                               price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - 1);
145846 -                       }
145848 -                       if (cur > last_pos || price <= opt[cur].price)
145849 -                               SET_PRICE(cur, 1, 0, litlen, price);
145851 -                       if (cur == last_pos)
145852 -                               break;
145854 -                       if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */
145855 -                               continue;
145857 -                       mlen = opt[cur].mlen;
145858 -                       if (opt[cur].off > ZSTD_REP_MOVE_OPT) {
145859 -                               opt[cur].rep[2] = opt[cur - mlen].rep[1];
145860 -                               opt[cur].rep[1] = opt[cur - mlen].rep[0];
145861 -                               opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT;
145862 -                       } else {
145863 -                               opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur - mlen].rep[1] : opt[cur - mlen].rep[2];
145864 -                               opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur - mlen].rep[0] : opt[cur - mlen].rep[1];
145865 -                               opt[cur].rep[0] =
145866 -                                   ((opt[cur].off == ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur - mlen].rep[0] - 1) : (opt[cur - mlen].rep[opt[cur].off]);
145867 -                       }
145869 -                       best_mlen = minMatch;
145870 -                       {
145871 -                               U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
145872 -                               for (i = (opt[cur].mlen != 1); i < last_i; i++) { /* check rep */
145873 -                                       const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
145874 -                                       if ((repCur > 0) && (repCur < (S32)(inr - prefixStart)) &&
145875 -                                           (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(inr - repCur, minMatch))) {
145876 -                                               mlen = (U32)ZSTD_count(inr + minMatch, inr + minMatch - repCur, iend) + minMatch;
145878 -                                               if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
145879 -                                                       best_mlen = mlen;
145880 -                                                       best_off = i;
145881 -                                                       last_pos = cur + 1;
145882 -                                                       goto _storeSequence;
145883 -                                               }
145885 -                                               best_off = i - (opt[cur].mlen != 1);
145886 -                                               if (mlen > best_mlen)
145887 -                                                       best_mlen = mlen;
145889 -                                               do {
145890 -                                                       if (opt[cur].mlen == 1) {
145891 -                                                               litlen = opt[cur].litlen;
145892 -                                                               if (cur > litlen) {
145893 -                                                                       price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr - litlen,
145894 -                                                                                                                       best_off, mlen - MINMATCH, ultra);
145895 -                                                               } else
145896 -                                                                       price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
145897 -                                                       } else {
145898 -                                                               litlen = 0;
145899 -                                                               price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
145900 -                                                       }
145902 -                                                       if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
145903 -                                                               SET_PRICE(cur + mlen, mlen, i, litlen, price);
145904 -                                                       mlen--;
145905 -                                               } while (mlen >= minMatch);
145906 -                                       }
145907 -                               }
145908 -                       }
145910 -                       match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, inr, iend, maxSearches, mls, matches, best_mlen);
145912 -                       if (match_num > 0 && (matches[match_num - 1].len > sufficient_len || cur + matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
145913 -                               best_mlen = matches[match_num - 1].len;
145914 -                               best_off = matches[match_num - 1].off;
145915 -                               last_pos = cur + 1;
145916 -                               goto _storeSequence;
145917 -                       }
145919 -                       /* set prices using matches at position = cur */
145920 -                       for (u = 0; u < match_num; u++) {
145921 -                               mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
145922 -                               best_mlen = matches[u].len;
145924 -                               while (mlen <= best_mlen) {
145925 -                                       if (opt[cur].mlen == 1) {
145926 -                                               litlen = opt[cur].litlen;
145927 -                                               if (cur > litlen)
145928 -                                                       price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip + cur - litlen,
145929 -                                                                                                       matches[u].off - 1, mlen - MINMATCH, ultra);
145930 -                                               else
145931 -                                                       price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
145932 -                                       } else {
145933 -                                               litlen = 0;
145934 -                                               price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off - 1, mlen - MINMATCH, ultra);
145935 -                                       }
145937 -                                       if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
145938 -                                               SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price);
145940 -                                       mlen++;
145941 -                               }
145942 -                       }
145943 -               }
145945 -               best_mlen = opt[last_pos].mlen;
145946 -               best_off = opt[last_pos].off;
145947 -               cur = last_pos - best_mlen;
145949 -       /* store sequence */
145950 -_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
145951 -               opt[0].mlen = 1;
145953 -               while (1) {
145954 -                       mlen = opt[cur].mlen;
145955 -                       offset = opt[cur].off;
145956 -                       opt[cur].mlen = best_mlen;
145957 -                       opt[cur].off = best_off;
145958 -                       best_mlen = mlen;
145959 -                       best_off = offset;
145960 -                       if (mlen > cur)
145961 -                               break;
145962 -                       cur -= mlen;
145963 -               }
145965 -               for (u = 0; u <= last_pos;) {
145966 -                       u += opt[u].mlen;
145967 -               }
145969 -               for (cur = 0; cur < last_pos;) {
145970 -                       mlen = opt[cur].mlen;
145971 -                       if (mlen == 1) {
145972 -                               ip++;
145973 -                               cur++;
145974 -                               continue;
145975 -                       }
145976 -                       offset = opt[cur].off;
145977 -                       cur += mlen;
145978 -                       litLength = (U32)(ip - anchor);
145980 -                       if (offset > ZSTD_REP_MOVE_OPT) {
145981 -                               rep[2] = rep[1];
145982 -                               rep[1] = rep[0];
145983 -                               rep[0] = offset - ZSTD_REP_MOVE_OPT;
145984 -                               offset--;
145985 -                       } else {
145986 -                               if (offset != 0) {
145987 -                                       best_off = (offset == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
145988 -                                       if (offset != 1)
145989 -                                               rep[2] = rep[1];
145990 -                                       rep[1] = rep[0];
145991 -                                       rep[0] = best_off;
145992 -                               }
145993 -                               if (litLength == 0)
145994 -                                       offset--;
145995 -                       }
145997 -                       ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
145998 -                       ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
145999 -                       anchor = ip = ip + mlen;
146000 -               }
146001 -       } /* for (cur=0; cur < last_pos; ) */
146003 -       /* Save reps for next block */
146004 -       {
146005 -               int i;
146006 -               for (i = 0; i < ZSTD_REP_NUM; i++)
146007 -                       ctx->repToConfirm[i] = rep[i];
146008 -       }
146010 -       /* Last Literals */
146011 -       {
146012 -               size_t const lastLLSize = iend - anchor;
146013 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
146014 -               seqStorePtr->lit += lastLLSize;
146015 -       }
146018 -FORCE_INLINE
146019 -void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const int ultra)
146021 -       seqStore_t *seqStorePtr = &(ctx->seqStore);
146022 -       const BYTE *const istart = (const BYTE *)src;
146023 -       const BYTE *ip = istart;
146024 -       const BYTE *anchor = istart;
146025 -       const BYTE *const iend = istart + srcSize;
146026 -       const BYTE *const ilimit = iend - 8;
146027 -       const BYTE *const base = ctx->base;
146028 -       const U32 lowestIndex = ctx->lowLimit;
146029 -       const U32 dictLimit = ctx->dictLimit;
146030 -       const BYTE *const prefixStart = base + dictLimit;
146031 -       const BYTE *const dictBase = ctx->dictBase;
146032 -       const BYTE *const dictEnd = dictBase + dictLimit;
146034 -       const U32 maxSearches = 1U << ctx->params.cParams.searchLog;
146035 -       const U32 sufficient_len = ctx->params.cParams.targetLength;
146036 -       const U32 mls = ctx->params.cParams.searchLength;
146037 -       const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4;
146039 -       ZSTD_optimal_t *opt = seqStorePtr->priceTable;
146040 -       ZSTD_match_t *matches = seqStorePtr->matchTable;
146041 -       const BYTE *inr;
146043 -       /* init */
146044 -       U32 offset, rep[ZSTD_REP_NUM];
146045 -       {
146046 -               U32 i;
146047 -               for (i = 0; i < ZSTD_REP_NUM; i++)
146048 -                       rep[i] = ctx->rep[i];
146049 -       }
146051 -       ctx->nextToUpdate3 = ctx->nextToUpdate;
146052 -       ZSTD_rescaleFreqs(seqStorePtr, (const BYTE *)src, srcSize);
146053 -       ip += (ip == prefixStart);
146055 -       /* Match Loop */
146056 -       while (ip < ilimit) {
146057 -               U32 cur, match_num, last_pos, litlen, price;
146058 -               U32 u, mlen, best_mlen, best_off, litLength;
146059 -               U32 curr = (U32)(ip - base);
146060 -               memset(opt, 0, sizeof(ZSTD_optimal_t));
146061 -               last_pos = 0;
146062 -               opt[0].litlen = (U32)(ip - anchor);
146064 -               /* check repCode */
146065 -               {
146066 -                       U32 i, last_i = ZSTD_REP_CHECK + (ip == anchor);
146067 -                       for (i = (ip == anchor); i < last_i; i++) {
146068 -                               const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
146069 -                               const U32 repIndex = (U32)(curr - repCur);
146070 -                               const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
146071 -                               const BYTE *const repMatch = repBase + repIndex;
146072 -                               if ((repCur > 0 && repCur <= (S32)curr) &&
146073 -                                   (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
146074 -                                   && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch))) {
146075 -                                       /* repcode detected we should take it */
146076 -                                       const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
146077 -                                       mlen = (U32)ZSTD_count_2segments(ip + minMatch, repMatch + minMatch, iend, repEnd, prefixStart) + minMatch;
146079 -                                       if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
146080 -                                               best_mlen = mlen;
146081 -                                               best_off = i;
146082 -                                               cur = 0;
146083 -                                               last_pos = 1;
146084 -                                               goto _storeSequence;
146085 -                                       }
146087 -                                       best_off = i - (ip == anchor);
146088 -                                       litlen = opt[0].litlen;
146089 -                                       do {
146090 -                                               price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
146091 -                                               if (mlen > last_pos || price < opt[mlen].price)
146092 -                                                       SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
146093 -                                               mlen--;
146094 -                                       } while (mlen >= minMatch);
146095 -                               }
146096 -                       }
146097 -               }
146099 -               match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, ip, iend, maxSearches, mls, matches, minMatch); /* first search (depth 0) */
146101 -               if (!last_pos && !match_num) {
146102 -                       ip++;
146103 -                       continue;
146104 -               }
146106 -               {
146107 -                       U32 i;
146108 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
146109 -                               opt[0].rep[i] = rep[i];
146110 -               }
146111 -               opt[0].mlen = 1;
146113 -               if (match_num && (matches[match_num - 1].len > sufficient_len || matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
146114 -                       best_mlen = matches[match_num - 1].len;
146115 -                       best_off = matches[match_num - 1].off;
146116 -                       cur = 0;
146117 -                       last_pos = 1;
146118 -                       goto _storeSequence;
146119 -               }
146121 -               best_mlen = (last_pos) ? last_pos : minMatch;
146123 -               /* set prices using matches at position = 0 */
146124 -               for (u = 0; u < match_num; u++) {
146125 -                       mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
146126 -                       best_mlen = matches[u].len;
146127 -                       litlen = opt[0].litlen;
146128 -                       while (mlen <= best_mlen) {
146129 -                               price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
146130 -                               if (mlen > last_pos || price < opt[mlen].price)
146131 -                                       SET_PRICE(mlen, mlen, matches[u].off, litlen, price);
146132 -                               mlen++;
146133 -                       }
146134 -               }
146136 -               if (last_pos < minMatch) {
146137 -                       ip++;
146138 -                       continue;
146139 -               }
146141 -               /* check further positions */
146142 -               for (cur = 1; cur <= last_pos; cur++) {
146143 -                       inr = ip + cur;
146145 -                       if (opt[cur - 1].mlen == 1) {
146146 -                               litlen = opt[cur - 1].litlen + 1;
146147 -                               if (cur > litlen) {
146148 -                                       price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - litlen);
146149 -                               } else
146150 -                                       price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor);
146151 -                       } else {
146152 -                               litlen = 1;
146153 -                               price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - 1);
146154 -                       }
146156 -                       if (cur > last_pos || price <= opt[cur].price)
146157 -                               SET_PRICE(cur, 1, 0, litlen, price);
146159 -                       if (cur == last_pos)
146160 -                               break;
146162 -                       if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */
146163 -                               continue;
146165 -                       mlen = opt[cur].mlen;
146166 -                       if (opt[cur].off > ZSTD_REP_MOVE_OPT) {
146167 -                               opt[cur].rep[2] = opt[cur - mlen].rep[1];
146168 -                               opt[cur].rep[1] = opt[cur - mlen].rep[0];
146169 -                               opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT;
146170 -                       } else {
146171 -                               opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur - mlen].rep[1] : opt[cur - mlen].rep[2];
146172 -                               opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur - mlen].rep[0] : opt[cur - mlen].rep[1];
146173 -                               opt[cur].rep[0] =
146174 -                                   ((opt[cur].off == ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur - mlen].rep[0] - 1) : (opt[cur - mlen].rep[opt[cur].off]);
146175 -                       }
146177 -                       best_mlen = minMatch;
146178 -                       {
146179 -                               U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
146180 -                               for (i = (mlen != 1); i < last_i; i++) {
146181 -                                       const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
146182 -                                       const U32 repIndex = (U32)(curr + cur - repCur);
146183 -                                       const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
146184 -                                       const BYTE *const repMatch = repBase + repIndex;
146185 -                                       if ((repCur > 0 && repCur <= (S32)(curr + cur)) &&
146186 -                                           (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
146187 -                                           && (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch))) {
146188 -                                               /* repcode detected */
146189 -                                               const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
146190 -                                               mlen = (U32)ZSTD_count_2segments(inr + minMatch, repMatch + minMatch, iend, repEnd, prefixStart) + minMatch;
146192 -                                               if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
146193 -                                                       best_mlen = mlen;
146194 -                                                       best_off = i;
146195 -                                                       last_pos = cur + 1;
146196 -                                                       goto _storeSequence;
146197 -                                               }
146199 -                                               best_off = i - (opt[cur].mlen != 1);
146200 -                                               if (mlen > best_mlen)
146201 -                                                       best_mlen = mlen;
146203 -                                               do {
146204 -                                                       if (opt[cur].mlen == 1) {
146205 -                                                               litlen = opt[cur].litlen;
146206 -                                                               if (cur > litlen) {
146207 -                                                                       price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr - litlen,
146208 -                                                                                                                       best_off, mlen - MINMATCH, ultra);
146209 -                                                               } else
146210 -                                                                       price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
146211 -                                                       } else {
146212 -                                                               litlen = 0;
146213 -                                                               price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
146214 -                                                       }
146216 -                                                       if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
146217 -                                                               SET_PRICE(cur + mlen, mlen, i, litlen, price);
146218 -                                                       mlen--;
146219 -                                               } while (mlen >= minMatch);
146220 -                                       }
146221 -                               }
146222 -                       }
146224 -                       match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, inr, iend, maxSearches, mls, matches, minMatch);
146226 -                       if (match_num > 0 && (matches[match_num - 1].len > sufficient_len || cur + matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
146227 -                               best_mlen = matches[match_num - 1].len;
146228 -                               best_off = matches[match_num - 1].off;
146229 -                               last_pos = cur + 1;
146230 -                               goto _storeSequence;
146231 -                       }
146233 -                       /* set prices using matches at position = cur */
146234 -                       for (u = 0; u < match_num; u++) {
146235 -                               mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
146236 -                               best_mlen = matches[u].len;
146238 -                               while (mlen <= best_mlen) {
146239 -                                       if (opt[cur].mlen == 1) {
146240 -                                               litlen = opt[cur].litlen;
146241 -                                               if (cur > litlen)
146242 -                                                       price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip + cur - litlen,
146243 -                                                                                                       matches[u].off - 1, mlen - MINMATCH, ultra);
146244 -                                               else
146245 -                                                       price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
146246 -                                       } else {
146247 -                                               litlen = 0;
146248 -                                               price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off - 1, mlen - MINMATCH, ultra);
146249 -                                       }
146251 -                                       if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
146252 -                                               SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price);
146254 -                                       mlen++;
146255 -                               }
146256 -                       }
146257 -               } /* for (cur = 1; cur <= last_pos; cur++) */
146259 -               best_mlen = opt[last_pos].mlen;
146260 -               best_off = opt[last_pos].off;
146261 -               cur = last_pos - best_mlen;
146263 -       /* store sequence */
146264 -_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
146265 -               opt[0].mlen = 1;
146267 -               while (1) {
146268 -                       mlen = opt[cur].mlen;
146269 -                       offset = opt[cur].off;
146270 -                       opt[cur].mlen = best_mlen;
146271 -                       opt[cur].off = best_off;
146272 -                       best_mlen = mlen;
146273 -                       best_off = offset;
146274 -                       if (mlen > cur)
146275 -                               break;
146276 -                       cur -= mlen;
146277 -               }
146279 -               for (u = 0; u <= last_pos;) {
146280 -                       u += opt[u].mlen;
146281 -               }
146283 -               for (cur = 0; cur < last_pos;) {
146284 -                       mlen = opt[cur].mlen;
146285 -                       if (mlen == 1) {
146286 -                               ip++;
146287 -                               cur++;
146288 -                               continue;
146289 -                       }
146290 -                       offset = opt[cur].off;
146291 -                       cur += mlen;
146292 -                       litLength = (U32)(ip - anchor);
146294 -                       if (offset > ZSTD_REP_MOVE_OPT) {
146295 -                               rep[2] = rep[1];
146296 -                               rep[1] = rep[0];
146297 -                               rep[0] = offset - ZSTD_REP_MOVE_OPT;
146298 -                               offset--;
146299 -                       } else {
146300 -                               if (offset != 0) {
146301 -                                       best_off = (offset == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
146302 -                                       if (offset != 1)
146303 -                                               rep[2] = rep[1];
146304 -                                       rep[1] = rep[0];
146305 -                                       rep[0] = best_off;
146306 -                               }
146308 -                               if (litLength == 0)
146309 -                                       offset--;
146310 -                       }
146312 -                       ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
146313 -                       ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
146314 -                       anchor = ip = ip + mlen;
146315 -               }
146316 -       } /* for (cur=0; cur < last_pos; ) */
146318 -       /* Save reps for next block */
146319 -       {
146320 -               int i;
146321 -               for (i = 0; i < ZSTD_REP_NUM; i++)
146322 -                       ctx->repToConfirm[i] = rep[i];
146323 -       }
146325 -       /* Last Literals */
146326 -       {
146327 -               size_t lastLLSize = iend - anchor;
146328 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
146329 -               seqStorePtr->lit += lastLLSize;
146330 -       }
146333 -#endif /* ZSTD_OPT_H_91842398743 */
146334 diff --git a/localversion b/localversion
146335 new file mode 100644
146336 index 000000000000..c21af2f75ee0
146337 --- /dev/null
146338 +++ b/localversion
146339 @@ -0,0 +1 @@
146340 +-xanmod1
146341 diff --git a/mm/Kconfig b/mm/Kconfig
146342 index 24c045b24b95..5650c2d3c9c2 100644
146343 --- a/mm/Kconfig
146344 +++ b/mm/Kconfig
146345 @@ -122,6 +122,41 @@ config SPARSEMEM_VMEMMAP
146346           pfn_to_page and page_to_pfn operations.  This is the most
146347           efficient option when sufficient kernel resources are available.
146349 +config CLEAN_LOW_KBYTES
146350 +       int "Default value for vm.clean_low_kbytes"
146351 +       depends on SYSCTL
146352 +       default "150000"
146353 +       help
146354 +         The vm.clean_low_kbytes sysctl knob provides *best-effort*
146355 +         protection of clean file pages. The clean file pages on the current
146356 +         node won't be reclaimed under memory pressure when their amount is
146357 +         below vm.clean_low_kbytes *unless* we threaten to OOM or have
146358 +         no free swap space or vm.swappiness=0.
146360 +         Protection of clean file pages may be used to prevent thrashing and
146361 +         reducing I/O under low-memory conditions.
146363 +         Setting it to a high value may result in a early eviction of anonymous
146364 +         pages into the swap space by attempting to hold the protected amount of
146365 +         clean file pages in memory.
146367 +config CLEAN_MIN_KBYTES
146368 +       int "Default value for vm.clean_min_kbytes"
146369 +       depends on SYSCTL
146370 +       default "0"
146371 +       help
146372 +         The vm.clean_min_kbytes sysctl knob provides *hard* protection
146373 +         of clean file pages. The clean file pages on the current node won't be
146374 +         reclaimed under memory pressure when their amount is below
146375 +         vm.clean_min_kbytes.
146377 +         Hard protection of clean file pages may be used to avoid high latency and
146378 +         prevent livelock in near-OOM conditions.
146380 +         Setting it to a high value may result in a early out-of-memory condition
146381 +         due to the inability to reclaim the protected amount of clean file pages
146382 +         when other types of pages cannot be reclaimed.
146384  config HAVE_MEMBLOCK_PHYS_MAP
146385         bool
146387 @@ -872,4 +907,59 @@ config MAPPING_DIRTY_HELPERS
146388  config KMAP_LOCAL
146389         bool
146391 +config LRU_GEN
146392 +       bool "Multigenerational LRU"
146393 +       depends on MMU
146394 +       help
146395 +         A high performance LRU implementation to heavily overcommit workloads
146396 +         that are not IO bound. See Documentation/vm/multigen_lru.rst for
146397 +         details.
146399 +         Warning: do not enable this option unless you plan to use it because
146400 +         it introduces a small per-process and per-memcg and per-node memory
146401 +         overhead.
146403 +config NR_LRU_GENS
146404 +       int "Max number of generations"
146405 +       depends on LRU_GEN
146406 +       range 4 31
146407 +       default 7
146408 +       help
146409 +         This will use order_base_2(N+1) spare bits from page flags.
146411 +         Warning: do not use numbers larger than necessary because each
146412 +         generation introduces a small per-node and per-memcg memory overhead.
146414 +config TIERS_PER_GEN
146415 +       int "Number of tiers per generation"
146416 +       depends on LRU_GEN
146417 +       range 2 5
146418 +       default 4
146419 +       help
146420 +         This will use N-2 spare bits from page flags.
146422 +         Higher values generally offer better protection to active pages under
146423 +         heavy buffered I/O workloads.
146425 +config LRU_GEN_ENABLED
146426 +       bool "Turn on by default"
146427 +       depends on LRU_GEN
146428 +       help
146429 +         The default value of /sys/kernel/mm/lru_gen/enabled is 0. This option
146430 +         changes it to 1.
146432 +         Warning: the default value is the fast path. See
146433 +         Documentation/static-keys.txt for details.
146435 +config LRU_GEN_STATS
146436 +       bool "Full stats for debugging"
146437 +       depends on LRU_GEN
146438 +       help
146439 +         This option keeps full stats for each generation, which can be read
146440 +         from /sys/kernel/debug/lru_gen_full.
146442 +         Warning: do not enable this option unless you plan to use it because
146443 +         it introduces an additional small per-process and per-memcg and
146444 +         per-node memory overhead.
146446  endmenu
146447 diff --git a/mm/gup.c b/mm/gup.c
146448 index ef7d2da9f03f..4164a70160e3 100644
146449 --- a/mm/gup.c
146450 +++ b/mm/gup.c
146451 @@ -1535,10 +1535,6 @@ struct page *get_dump_page(unsigned long addr)
146452                                       FOLL_FORCE | FOLL_DUMP | FOLL_GET);
146453         if (locked)
146454                 mmap_read_unlock(mm);
146456 -       if (ret == 1 && is_page_poisoned(page))
146457 -               return NULL;
146459         return (ret == 1) ? page : NULL;
146461  #endif /* CONFIG_ELF_CORE */
146462 @@ -1551,54 +1547,60 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
146463                                         struct vm_area_struct **vmas,
146464                                         unsigned int gup_flags)
146466 -       unsigned long i;
146467 -       unsigned long step;
146468 -       bool drain_allow = true;
146469 -       bool migrate_allow = true;
146470 +       unsigned long i, isolation_error_count;
146471 +       bool drain_allow;
146472         LIST_HEAD(cma_page_list);
146473         long ret = nr_pages;
146474 +       struct page *prev_head, *head;
146475         struct migration_target_control mtc = {
146476                 .nid = NUMA_NO_NODE,
146477                 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN,
146478         };
146480  check_again:
146481 -       for (i = 0; i < nr_pages;) {
146483 -               struct page *head = compound_head(pages[i]);
146485 -               /*
146486 -                * gup may start from a tail page. Advance step by the left
146487 -                * part.
146488 -                */
146489 -               step = compound_nr(head) - (pages[i] - head);
146490 +       prev_head = NULL;
146491 +       isolation_error_count = 0;
146492 +       drain_allow = true;
146493 +       for (i = 0; i < nr_pages; i++) {
146494 +               head = compound_head(pages[i]);
146495 +               if (head == prev_head)
146496 +                       continue;
146497 +               prev_head = head;
146498                 /*
146499                  * If we get a page from the CMA zone, since we are going to
146500                  * be pinning these entries, we might as well move them out
146501                  * of the CMA zone if possible.
146502                  */
146503                 if (is_migrate_cma_page(head)) {
146504 -                       if (PageHuge(head))
146505 -                               isolate_huge_page(head, &cma_page_list);
146506 -                       else {
146507 +                       if (PageHuge(head)) {
146508 +                               if (!isolate_huge_page(head, &cma_page_list))
146509 +                                       isolation_error_count++;
146510 +                       } else {
146511                                 if (!PageLRU(head) && drain_allow) {
146512                                         lru_add_drain_all();
146513                                         drain_allow = false;
146514                                 }
146516 -                               if (!isolate_lru_page(head)) {
146517 -                                       list_add_tail(&head->lru, &cma_page_list);
146518 -                                       mod_node_page_state(page_pgdat(head),
146519 -                                                           NR_ISOLATED_ANON +
146520 -                                                           page_is_file_lru(head),
146521 -                                                           thp_nr_pages(head));
146522 +                               if (isolate_lru_page(head)) {
146523 +                                       isolation_error_count++;
146524 +                                       continue;
146525                                 }
146526 +                               list_add_tail(&head->lru, &cma_page_list);
146527 +                               mod_node_page_state(page_pgdat(head),
146528 +                                                   NR_ISOLATED_ANON +
146529 +                                                   page_is_file_lru(head),
146530 +                                                   thp_nr_pages(head));
146531                         }
146532                 }
146534 -               i += step;
146535         }
146537 +       /*
146538 +        * If list is empty, and no isolation errors, means that all pages are
146539 +        * in the correct zone.
146540 +        */
146541 +       if (list_empty(&cma_page_list) && !isolation_error_count)
146542 +               return ret;
146544         if (!list_empty(&cma_page_list)) {
146545                 /*
146546                  * drop the above get_user_pages reference.
146547 @@ -1609,34 +1611,28 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
146548                         for (i = 0; i < nr_pages; i++)
146549                                 put_page(pages[i]);
146551 -               if (migrate_pages(&cma_page_list, alloc_migration_target, NULL,
146552 -                       (unsigned long)&mtc, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
146553 -                       /*
146554 -                        * some of the pages failed migration. Do get_user_pages
146555 -                        * without migration.
146556 -                        */
146557 -                       migrate_allow = false;
146559 +               ret = migrate_pages(&cma_page_list, alloc_migration_target,
146560 +                                   NULL, (unsigned long)&mtc, MIGRATE_SYNC,
146561 +                                   MR_CONTIG_RANGE);
146562 +               if (ret) {
146563                         if (!list_empty(&cma_page_list))
146564                                 putback_movable_pages(&cma_page_list);
146565 +                       return ret > 0 ? -ENOMEM : ret;
146566                 }
146567 -               /*
146568 -                * We did migrate all the pages, Try to get the page references
146569 -                * again migrating any new CMA pages which we failed to isolate
146570 -                * earlier.
146571 -                */
146572 -               ret = __get_user_pages_locked(mm, start, nr_pages,
146573 -                                                  pages, vmas, NULL,
146574 -                                                  gup_flags);
146576 -               if ((ret > 0) && migrate_allow) {
146577 -                       nr_pages = ret;
146578 -                       drain_allow = true;
146579 -                       goto check_again;
146580 -               }
146582 +               /* We unpinned pages before migration, pin them again */
146583 +               ret = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
146584 +                                             NULL, gup_flags);
146585 +               if (ret <= 0)
146586 +                       return ret;
146587 +               nr_pages = ret;
146588         }
146590 -       return ret;
146591 +       /*
146592 +        * check again because pages were unpinned, and we also might have
146593 +        * had isolation errors and need more pages to migrate.
146594 +        */
146595 +       goto check_again;
146597  #else
146598  static long check_and_migrate_cma_pages(struct mm_struct *mm,
146599 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
146600 index ae907a9c2050..2cf46270c84b 100644
146601 --- a/mm/huge_memory.c
146602 +++ b/mm/huge_memory.c
146603 @@ -637,7 +637,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
146604                 entry = mk_huge_pmd(page, vma->vm_page_prot);
146605                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
146606                 page_add_new_anon_rmap(page, vma, haddr, true);
146607 -               lru_cache_add_inactive_or_unevictable(page, vma);
146608 +               lru_cache_add_page_vma(page, vma, true);
146609                 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
146610                 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
146611                 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
146612 @@ -2418,7 +2418,8 @@ static void __split_huge_page_tail(struct page *head, int tail,
146613  #ifdef CONFIG_64BIT
146614                          (1L << PG_arch_2) |
146615  #endif
146616 -                        (1L << PG_dirty)));
146617 +                        (1L << PG_dirty) |
146618 +                        LRU_GEN_MASK | LRU_USAGE_MASK));
146620         /* ->mapping in first tail page is compound_mapcount */
146621         VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
146622 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
146623 index a86a58ef132d..96b722af092e 100644
146624 --- a/mm/hugetlb.c
146625 +++ b/mm/hugetlb.c
146626 @@ -743,13 +743,20 @@ void hugetlb_fix_reserve_counts(struct inode *inode)
146628         struct hugepage_subpool *spool = subpool_inode(inode);
146629         long rsv_adjust;
146630 +       bool reserved = false;
146632         rsv_adjust = hugepage_subpool_get_pages(spool, 1);
146633 -       if (rsv_adjust) {
146634 +       if (rsv_adjust > 0) {
146635                 struct hstate *h = hstate_inode(inode);
146637 -               hugetlb_acct_memory(h, 1);
146638 +               if (!hugetlb_acct_memory(h, 1))
146639 +                       reserved = true;
146640 +       } else if (!rsv_adjust) {
146641 +               reserved = true;
146642         }
146644 +       if (!reserved)
146645 +               pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
146649 @@ -3898,6 +3905,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
146650                                  * See Documentation/vm/mmu_notifier.rst
146651                                  */
146652                                 huge_ptep_set_wrprotect(src, addr, src_pte);
146653 +                               entry = huge_pte_wrprotect(entry);
146654                         }
146656                         page_dup_rmap(ptepage, true);
146657 diff --git a/mm/internal.h b/mm/internal.h
146658 index cb3c5e0a7799..1432feec62df 100644
146659 --- a/mm/internal.h
146660 +++ b/mm/internal.h
146661 @@ -97,26 +97,6 @@ static inline void set_page_refcounted(struct page *page)
146662         set_page_count(page, 1);
146666 - * When kernel touch the user page, the user page may be have been marked
146667 - * poison but still mapped in user space, if without this page, the kernel
146668 - * can guarantee the data integrity and operation success, the kernel is
146669 - * better to check the posion status and avoid touching it, be good not to
146670 - * panic, coredump for process fatal signal is a sample case matching this
146671 - * scenario. Or if kernel can't guarantee the data integrity, it's better
146672 - * not to call this function, let kernel touch the poison page and get to
146673 - * panic.
146674 - */
146675 -static inline bool is_page_poisoned(struct page *page)
146677 -       if (PageHWPoison(page))
146678 -               return true;
146679 -       else if (PageHuge(page) && PageHWPoison(compound_head(page)))
146680 -               return true;
146682 -       return false;
146685  extern unsigned long highest_memmap_pfn;
146688 diff --git a/mm/kfence/core.c b/mm/kfence/core.c
146689 index d53c91f881a4..f0be2c5038b5 100644
146690 --- a/mm/kfence/core.c
146691 +++ b/mm/kfence/core.c
146692 @@ -10,6 +10,7 @@
146693  #include <linux/atomic.h>
146694  #include <linux/bug.h>
146695  #include <linux/debugfs.h>
146696 +#include <linux/irq_work.h>
146697  #include <linux/kcsan-checks.h>
146698  #include <linux/kfence.h>
146699  #include <linux/kmemleak.h>
146700 @@ -586,6 +587,17 @@ late_initcall(kfence_debugfs_init);
146702  /* === Allocation Gate Timer ================================================ */
146704 +#ifdef CONFIG_KFENCE_STATIC_KEYS
146705 +/* Wait queue to wake up allocation-gate timer task. */
146706 +static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
146708 +static void wake_up_kfence_timer(struct irq_work *work)
146710 +       wake_up(&allocation_wait);
146712 +static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
146713 +#endif
146716   * Set up delayed work, which will enable and disable the static key. We need to
146717   * use a work queue (rather than a simple timer), since enabling and disabling a
146718 @@ -603,25 +615,13 @@ static void toggle_allocation_gate(struct work_struct *work)
146719         if (!READ_ONCE(kfence_enabled))
146720                 return;
146722 -       /* Enable static key, and await allocation to happen. */
146723         atomic_set(&kfence_allocation_gate, 0);
146724  #ifdef CONFIG_KFENCE_STATIC_KEYS
146725 +       /* Enable static key, and await allocation to happen. */
146726         static_branch_enable(&kfence_allocation_key);
146727 -       /*
146728 -        * Await an allocation. Timeout after 1 second, in case the kernel stops
146729 -        * doing allocations, to avoid stalling this worker task for too long.
146730 -        */
146731 -       {
146732 -               unsigned long end_wait = jiffies + HZ;
146734 -               do {
146735 -                       set_current_state(TASK_UNINTERRUPTIBLE);
146736 -                       if (atomic_read(&kfence_allocation_gate) != 0)
146737 -                               break;
146738 -                       schedule_timeout(1);
146739 -               } while (time_before(jiffies, end_wait));
146740 -               __set_current_state(TASK_RUNNING);
146741 -       }
146743 +       wait_event_timeout(allocation_wait, atomic_read(&kfence_allocation_gate), HZ);
146745         /* Disable static key and reset timer. */
146746         static_branch_disable(&kfence_allocation_key);
146747  #endif
146748 @@ -728,6 +728,19 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
146749          */
146750         if (atomic_read(&kfence_allocation_gate) || atomic_inc_return(&kfence_allocation_gate) > 1)
146751                 return NULL;
146752 +#ifdef CONFIG_KFENCE_STATIC_KEYS
146753 +       /*
146754 +        * waitqueue_active() is fully ordered after the update of
146755 +        * kfence_allocation_gate per atomic_inc_return().
146756 +        */
146757 +       if (waitqueue_active(&allocation_wait)) {
146758 +               /*
146759 +                * Calling wake_up() here may deadlock when allocations happen
146760 +                * from within timer code. Use an irq_work to defer it.
146761 +                */
146762 +               irq_work_queue(&wake_up_kfence_timer_work);
146763 +       }
146764 +#endif
146766         if (!READ_ONCE(kfence_enabled))
146767                 return NULL;
146768 diff --git a/mm/khugepaged.c b/mm/khugepaged.c
146769 index a7d6cb912b05..fd14b1e3c6f1 100644
146770 --- a/mm/khugepaged.c
146771 +++ b/mm/khugepaged.c
146772 @@ -716,17 +716,17 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
146773                 if (pte_write(pteval))
146774                         writable = true;
146775         }
146776 -       if (likely(writable)) {
146777 -               if (likely(referenced)) {
146778 -                       result = SCAN_SUCCEED;
146779 -                       trace_mm_collapse_huge_page_isolate(page, none_or_zero,
146780 -                                                           referenced, writable, result);
146781 -                       return 1;
146782 -               }
146783 -       } else {
146785 +       if (unlikely(!writable)) {
146786                 result = SCAN_PAGE_RO;
146787 +       } else if (unlikely(!referenced)) {
146788 +               result = SCAN_LACK_REFERENCED_PAGE;
146789 +       } else {
146790 +               result = SCAN_SUCCEED;
146791 +               trace_mm_collapse_huge_page_isolate(page, none_or_zero,
146792 +                                                   referenced, writable, result);
146793 +               return 1;
146794         }
146796  out:
146797         release_pte_pages(pte, _pte, compound_pagelist);
146798         trace_mm_collapse_huge_page_isolate(page, none_or_zero,
146799 @@ -1199,7 +1199,7 @@ static void collapse_huge_page(struct mm_struct *mm,
146800         spin_lock(pmd_ptl);
146801         BUG_ON(!pmd_none(*pmd));
146802         page_add_new_anon_rmap(new_page, vma, address, true);
146803 -       lru_cache_add_inactive_or_unevictable(new_page, vma);
146804 +       lru_cache_add_page_vma(new_page, vma, true);
146805         pgtable_trans_huge_deposit(mm, pmd, pgtable);
146806         set_pmd_at(mm, address, pmd, _pmd);
146807         update_mmu_cache_pmd(vma, address, pmd);
146808 diff --git a/mm/ksm.c b/mm/ksm.c
146809 index 9694ee2c71de..b32391ccf6d5 100644
146810 --- a/mm/ksm.c
146811 +++ b/mm/ksm.c
146812 @@ -794,6 +794,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
146813                 stable_node->rmap_hlist_len--;
146815                 put_anon_vma(rmap_item->anon_vma);
146816 +               rmap_item->head = NULL;
146817                 rmap_item->address &= PAGE_MASK;
146819         } else if (rmap_item->address & UNSTABLE_FLAG) {
146820 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
146821 index e064ac0d850a..594f99eba9c0 100644
146822 --- a/mm/memcontrol.c
146823 +++ b/mm/memcontrol.c
146824 @@ -3181,9 +3181,17 @@ static void drain_obj_stock(struct memcg_stock_pcp *stock)
146825                 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
146827                 if (nr_pages) {
146828 +                       struct mem_cgroup *memcg;
146830                         rcu_read_lock();
146831 -                       __memcg_kmem_uncharge(obj_cgroup_memcg(old), nr_pages);
146832 +retry:
146833 +                       memcg = obj_cgroup_memcg(old);
146834 +                       if (unlikely(!css_tryget(&memcg->css)))
146835 +                               goto retry;
146836                         rcu_read_unlock();
146838 +                       __memcg_kmem_uncharge(memcg, nr_pages);
146839 +                       css_put(&memcg->css);
146840                 }
146842                 /*
146843 @@ -5206,6 +5214,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
146844                 free_mem_cgroup_per_node_info(memcg, node);
146845         free_percpu(memcg->vmstats_percpu);
146846         free_percpu(memcg->vmstats_local);
146847 +       lru_gen_free_mm_list(memcg);
146848         kfree(memcg);
146851 @@ -5258,6 +5267,9 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
146852                 if (alloc_mem_cgroup_per_node_info(memcg, node))
146853                         goto fail;
146855 +       if (lru_gen_alloc_mm_list(memcg))
146856 +               goto fail;
146858         if (memcg_wb_domain_init(memcg, GFP_KERNEL))
146859                 goto fail;
146861 @@ -6162,6 +6174,29 @@ static void mem_cgroup_move_task(void)
146863  #endif
146865 +#ifdef CONFIG_LRU_GEN
146866 +static void mem_cgroup_attach(struct cgroup_taskset *tset)
146868 +       struct cgroup_subsys_state *css;
146869 +       struct task_struct *task = NULL;
146871 +       cgroup_taskset_for_each_leader(task, css, tset)
146872 +               ;
146874 +       if (!task)
146875 +               return;
146877 +       task_lock(task);
146878 +       if (task->mm && task->mm->owner == task)
146879 +               lru_gen_migrate_mm(task->mm);
146880 +       task_unlock(task);
146882 +#else
146883 +static void mem_cgroup_attach(struct cgroup_taskset *tset)
146886 +#endif
146888  static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
146890         if (value == PAGE_COUNTER_MAX)
146891 @@ -6502,6 +6537,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
146892         .css_free = mem_cgroup_css_free,
146893         .css_reset = mem_cgroup_css_reset,
146894         .can_attach = mem_cgroup_can_attach,
146895 +       .attach = mem_cgroup_attach,
146896         .cancel_attach = mem_cgroup_cancel_attach,
146897         .post_attach = mem_cgroup_move_task,
146898         .dfl_cftypes = memory_files,
146899 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
146900 index 24210c9bd843..bd3945446d47 100644
146901 --- a/mm/memory-failure.c
146902 +++ b/mm/memory-failure.c
146903 @@ -1368,7 +1368,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
146904                  * communicated in siginfo, see kill_proc()
146905                  */
146906                 start = (page->index << PAGE_SHIFT) & ~(size - 1);
146907 -               unmap_mapping_range(page->mapping, start, start + size, 0);
146908 +               unmap_mapping_range(page->mapping, start, size, 0);
146909         }
146910         kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags);
146911         rc = 0;
146912 diff --git a/mm/memory.c b/mm/memory.c
146913 index 550405fc3b5e..a1332ba9c0da 100644
146914 --- a/mm/memory.c
146915 +++ b/mm/memory.c
146916 @@ -73,6 +73,7 @@
146917  #include <linux/perf_event.h>
146918  #include <linux/ptrace.h>
146919  #include <linux/vmalloc.h>
146920 +#include <linux/mm_inline.h>
146922  #include <trace/events/kmem.h>
146924 @@ -839,7 +840,7 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
146925         copy_user_highpage(new_page, page, addr, src_vma);
146926         __SetPageUptodate(new_page);
146927         page_add_new_anon_rmap(new_page, dst_vma, addr, false);
146928 -       lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
146929 +       lru_cache_add_page_vma(new_page, dst_vma, false);
146930         rss[mm_counter(new_page)]++;
146932         /* All done, just insert the new page copy in the child */
146933 @@ -1548,6 +1549,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
146934         mmu_notifier_invalidate_range_end(&range);
146935         tlb_finish_mmu(&tlb);
146937 +EXPORT_SYMBOL(zap_page_range);
146939  /**
146940   * zap_page_range_single - remove user pages in a given range
146941 @@ -2907,7 +2909,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
146942                  */
146943                 ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
146944                 page_add_new_anon_rmap(new_page, vma, vmf->address, false);
146945 -               lru_cache_add_inactive_or_unevictable(new_page, vma);
146946 +               lru_cache_add_page_vma(new_page, vma, true);
146947                 /*
146948                  * We call the notify macro here because, when using secondary
146949                  * mmu page tables (such as kvm shadow page tables), we want the
146950 @@ -3438,9 +3440,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
146951         /* ksm created a completely new copy */
146952         if (unlikely(page != swapcache && swapcache)) {
146953                 page_add_new_anon_rmap(page, vma, vmf->address, false);
146954 -               lru_cache_add_inactive_or_unevictable(page, vma);
146955 +               lru_cache_add_page_vma(page, vma, true);
146956         } else {
146957                 do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
146958 +               lru_gen_activation(page, vma);
146959         }
146961         swap_free(entry);
146962 @@ -3584,7 +3587,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
146964         inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
146965         page_add_new_anon_rmap(page, vma, vmf->address, false);
146966 -       lru_cache_add_inactive_or_unevictable(page, vma);
146967 +       lru_cache_add_page_vma(page, vma, true);
146968  setpte:
146969         set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
146971 @@ -3709,6 +3712,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
146973         add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
146974         page_add_file_rmap(page, true);
146975 +       lru_gen_activation(page, vma);
146976         /*
146977          * deposit and withdraw with pmd lock held
146978          */
146979 @@ -3752,10 +3756,11 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
146980         if (write && !(vma->vm_flags & VM_SHARED)) {
146981                 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
146982                 page_add_new_anon_rmap(page, vma, addr, false);
146983 -               lru_cache_add_inactive_or_unevictable(page, vma);
146984 +               lru_cache_add_page_vma(page, vma, true);
146985         } else {
146986                 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
146987                 page_add_file_rmap(page, false);
146988 +               lru_gen_activation(page, vma);
146989         }
146990         set_pte_at(vma->vm_mm, addr, vmf->pte, entry);
146992 diff --git a/mm/migrate.c b/mm/migrate.c
146993 index 62b81d5257aa..9a50fd026236 100644
146994 --- a/mm/migrate.c
146995 +++ b/mm/migrate.c
146996 @@ -2973,6 +2973,13 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
146998                         swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
146999                         entry = swp_entry_to_pte(swp_entry);
147000 +               } else {
147001 +                       /*
147002 +                        * For now we only support migrating to un-addressable
147003 +                        * device memory.
147004 +                        */
147005 +                       pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
147006 +                       goto abort;
147007                 }
147008         } else {
147009                 entry = mk_pte(page, vma->vm_page_prot);
147010 @@ -3004,7 +3011,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
147011         inc_mm_counter(mm, MM_ANONPAGES);
147012         page_add_new_anon_rmap(page, vma, addr, false);
147013         if (!is_zone_device_page(page))
147014 -               lru_cache_add_inactive_or_unevictable(page, vma);
147015 +               lru_cache_add_page_vma(page, vma, false);
147016         get_page(page);
147018         if (flush) {
147019 diff --git a/mm/mm_init.c b/mm/mm_init.c
147020 index 8e02e865cc65..6303ed7aa511 100644
147021 --- a/mm/mm_init.c
147022 +++ b/mm/mm_init.c
147023 @@ -71,27 +71,33 @@ void __init mminit_verify_pageflags_layout(void)
147024         width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
147025                 - LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH;
147026         mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
147027 -               "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Flags %d\n",
147028 +               "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d lru gen %d tier %d Flags %d\n",
147029                 SECTIONS_WIDTH,
147030                 NODES_WIDTH,
147031                 ZONES_WIDTH,
147032                 LAST_CPUPID_WIDTH,
147033                 KASAN_TAG_WIDTH,
147034 +               LRU_GEN_WIDTH,
147035 +               LRU_USAGE_WIDTH,
147036                 NR_PAGEFLAGS);
147037         mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
147038 -               "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
147039 +               "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d lru gen %d tier %d\n",
147040                 SECTIONS_SHIFT,
147041                 NODES_SHIFT,
147042                 ZONES_SHIFT,
147043                 LAST_CPUPID_SHIFT,
147044 -               KASAN_TAG_WIDTH);
147045 +               KASAN_TAG_WIDTH,
147046 +               LRU_GEN_WIDTH,
147047 +               LRU_USAGE_WIDTH);
147048         mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
147049 -               "Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
147050 +               "Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu lru gen %lu tier %lu\n",
147051                 (unsigned long)SECTIONS_PGSHIFT,
147052                 (unsigned long)NODES_PGSHIFT,
147053                 (unsigned long)ZONES_PGSHIFT,
147054                 (unsigned long)LAST_CPUPID_PGSHIFT,
147055 -               (unsigned long)KASAN_TAG_PGSHIFT);
147056 +               (unsigned long)KASAN_TAG_PGSHIFT,
147057 +               (unsigned long)LRU_GEN_PGOFF,
147058 +               (unsigned long)LRU_USAGE_PGOFF);
147059         mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
147060                 "Node/Zone ID: %lu -> %lu\n",
147061                 (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
147062 diff --git a/mm/mmzone.c b/mm/mmzone.c
147063 index eb89d6e018e2..2ec0d7793424 100644
147064 --- a/mm/mmzone.c
147065 +++ b/mm/mmzone.c
147066 @@ -81,6 +81,8 @@ void lruvec_init(struct lruvec *lruvec)
147068         for_each_lru(lru)
147069                 INIT_LIST_HEAD(&lruvec->lists[lru]);
147071 +       lru_gen_init_lruvec(lruvec);
147074  #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
147075 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
147076 index cfc72873961d..4bb3cdfc47f8 100644
147077 --- a/mm/page_alloc.c
147078 +++ b/mm/page_alloc.c
147079 @@ -764,32 +764,36 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
147080   */
147081  void init_mem_debugging_and_hardening(void)
147083 +       bool page_poisoning_requested = false;
147085 +#ifdef CONFIG_PAGE_POISONING
147086 +       /*
147087 +        * Page poisoning is debug page alloc for some arches. If
147088 +        * either of those options are enabled, enable poisoning.
147089 +        */
147090 +       if (page_poisoning_enabled() ||
147091 +            (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
147092 +             debug_pagealloc_enabled())) {
147093 +               static_branch_enable(&_page_poisoning_enabled);
147094 +               page_poisoning_requested = true;
147095 +       }
147096 +#endif
147098         if (_init_on_alloc_enabled_early) {
147099 -               if (page_poisoning_enabled())
147100 +               if (page_poisoning_requested)
147101                         pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
147102                                 "will take precedence over init_on_alloc\n");
147103                 else
147104                         static_branch_enable(&init_on_alloc);
147105         }
147106         if (_init_on_free_enabled_early) {
147107 -               if (page_poisoning_enabled())
147108 +               if (page_poisoning_requested)
147109                         pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
147110                                 "will take precedence over init_on_free\n");
147111                 else
147112                         static_branch_enable(&init_on_free);
147113         }
147115 -#ifdef CONFIG_PAGE_POISONING
147116 -       /*
147117 -        * Page poisoning is debug page alloc for some arches. If
147118 -        * either of those options are enabled, enable poisoning.
147119 -        */
147120 -       if (page_poisoning_enabled() ||
147121 -            (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
147122 -             debug_pagealloc_enabled()))
147123 -               static_branch_enable(&_page_poisoning_enabled);
147124 -#endif
147126  #ifdef CONFIG_DEBUG_PAGEALLOC
147127         if (!debug_pagealloc_enabled())
147128                 return;
147129 diff --git a/mm/rmap.c b/mm/rmap.c
147130 index b0fc27e77d6d..d600b282ced5 100644
147131 --- a/mm/rmap.c
147132 +++ b/mm/rmap.c
147133 @@ -72,6 +72,7 @@
147134  #include <linux/page_idle.h>
147135  #include <linux/memremap.h>
147136  #include <linux/userfaultfd_k.h>
147137 +#include <linux/mm_inline.h>
147139  #include <asm/tlbflush.h>
147141 @@ -792,6 +793,11 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
147142                 }
147144                 if (pvmw.pte) {
147145 +                       /* the multigenerational lru exploits the spatial locality */
147146 +                       if (lru_gen_enabled() && pte_young(*pvmw.pte)) {
147147 +                               lru_gen_scan_around(&pvmw);
147148 +                               referenced++;
147149 +                       }
147150                         if (ptep_clear_flush_young_notify(vma, address,
147151                                                 pvmw.pte)) {
147152                                 /*
147153 diff --git a/mm/shmem.c b/mm/shmem.c
147154 index b2db4ed0fbc7..9dd24a2f0b7a 100644
147155 --- a/mm/shmem.c
147156 +++ b/mm/shmem.c
147157 @@ -2258,25 +2258,11 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
147158  static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
147160         struct shmem_inode_info *info = SHMEM_I(file_inode(file));
147161 +       int ret;
147163 -       if (info->seals & F_SEAL_FUTURE_WRITE) {
147164 -               /*
147165 -                * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
147166 -                * "future write" seal active.
147167 -                */
147168 -               if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
147169 -                       return -EPERM;
147171 -               /*
147172 -                * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
147173 -                * MAP_SHARED and read-only, take care to not allow mprotect to
147174 -                * revert protections on such mappings. Do this only for shared
147175 -                * mappings. For private mappings, don't need to mask
147176 -                * VM_MAYWRITE as we still want them to be COW-writable.
147177 -                */
147178 -               if (vma->vm_flags & VM_SHARED)
147179 -                       vma->vm_flags &= ~(VM_MAYWRITE);
147180 -       }
147181 +       ret = seal_check_future_write(info->seals, vma);
147182 +       if (ret)
147183 +               return ret;
147185         /* arm64 - allow memory tagging on RAM-based files */
147186         vma->vm_flags |= VM_MTE_ALLOWED;
147187 @@ -2375,8 +2361,18 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
147188         pgoff_t offset, max_off;
147190         ret = -ENOMEM;
147191 -       if (!shmem_inode_acct_block(inode, 1))
147192 +       if (!shmem_inode_acct_block(inode, 1)) {
147193 +               /*
147194 +                * We may have got a page, returned -ENOENT triggering a retry,
147195 +                * and now we find ourselves with -ENOMEM. Release the page, to
147196 +                * avoid a BUG_ON in our caller.
147197 +                */
147198 +               if (unlikely(*pagep)) {
147199 +                       put_page(*pagep);
147200 +                       *pagep = NULL;
147201 +               }
147202                 goto out;
147203 +       }
147205         if (!*pagep) {
147206                 page = shmem_alloc_page(gfp, info, pgoff);
147207 @@ -4233,6 +4229,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
147209         return 0;
147211 +EXPORT_SYMBOL_GPL(shmem_zero_setup);
147213  /**
147214   * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
147215 diff --git a/mm/sparse.c b/mm/sparse.c
147216 index 7bd23f9d6cef..33406ea2ecc4 100644
147217 --- a/mm/sparse.c
147218 +++ b/mm/sparse.c
147219 @@ -547,6 +547,7 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
147220                         pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.",
147221                                __func__, nid);
147222                         pnum_begin = pnum;
147223 +                       sparse_buffer_fini();
147224                         goto failed;
147225                 }
147226                 check_usemap_section_nr(nid, usage);
147227 diff --git a/mm/swap.c b/mm/swap.c
147228 index 31b844d4ed94..d6458ee1e9f8 100644
147229 --- a/mm/swap.c
147230 +++ b/mm/swap.c
147231 @@ -306,7 +306,7 @@ void lru_note_cost_page(struct page *page)
147233  static void __activate_page(struct page *page, struct lruvec *lruvec)
147235 -       if (!PageActive(page) && !PageUnevictable(page)) {
147236 +       if (!PageUnevictable(page) && !page_is_active(page, lruvec)) {
147237                 int nr_pages = thp_nr_pages(page);
147239                 del_page_from_lru_list(page, lruvec);
147240 @@ -334,10 +334,10 @@ static bool need_activate_page_drain(int cpu)
147241         return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
147244 -static void activate_page(struct page *page)
147245 +static void activate_page_on_lru(struct page *page)
147247         page = compound_head(page);
147248 -       if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
147249 +       if (PageLRU(page) && !PageUnevictable(page) && !page_is_active(page, NULL)) {
147250                 struct pagevec *pvec;
147252                 local_lock(&lru_pvecs.lock);
147253 @@ -354,7 +354,7 @@ static inline void activate_page_drain(int cpu)
147257 -static void activate_page(struct page *page)
147258 +static void activate_page_on_lru(struct page *page)
147260         struct lruvec *lruvec;
147262 @@ -368,11 +368,22 @@ static void activate_page(struct page *page)
147264  #endif
147266 -static void __lru_cache_activate_page(struct page *page)
147268 + * If the page is on the LRU, queue it for activation via
147269 + * lru_pvecs.activate_page. Otherwise, assume the page is on a
147270 + * pagevec, mark it active and it'll be moved to the active
147271 + * LRU on the next drain.
147272 + */
147273 +void activate_page(struct page *page)
147275         struct pagevec *pvec;
147276         int i;
147278 +       if (PageLRU(page)) {
147279 +               activate_page_on_lru(page);
147280 +               return;
147281 +       }
147283         local_lock(&lru_pvecs.lock);
147284         pvec = this_cpu_ptr(&lru_pvecs.lru_add);
147286 @@ -420,17 +431,8 @@ void mark_page_accessed(struct page *page)
147287                  * this list is never rotated or maintained, so marking an
147288                  * evictable page accessed has no effect.
147289                  */
147290 -       } else if (!PageActive(page)) {
147291 -               /*
147292 -                * If the page is on the LRU, queue it for activation via
147293 -                * lru_pvecs.activate_page. Otherwise, assume the page is on a
147294 -                * pagevec, mark it active and it'll be moved to the active
147295 -                * LRU on the next drain.
147296 -                */
147297 -               if (PageLRU(page))
147298 -                       activate_page(page);
147299 -               else
147300 -                       __lru_cache_activate_page(page);
147301 +       } else if (!page_inc_usage(page)) {
147302 +               activate_page(page);
147303                 ClearPageReferenced(page);
147304                 workingset_activation(page);
147305         }
147306 @@ -465,15 +467,14 @@ void lru_cache_add(struct page *page)
147307  EXPORT_SYMBOL(lru_cache_add);
147309  /**
147310 - * lru_cache_add_inactive_or_unevictable
147311 + * lru_cache_add_page_vma
147312   * @page:  the page to be added to LRU
147313   * @vma:   vma in which page is mapped for determining reclaimability
147314   *
147315 - * Place @page on the inactive or unevictable LRU list, depending on its
147316 - * evictability.
147317 + * Place @page on an LRU list, depending on its evictability.
147318   */
147319 -void lru_cache_add_inactive_or_unevictable(struct page *page,
147320 -                                        struct vm_area_struct *vma)
147321 +void lru_cache_add_page_vma(struct page *page, struct vm_area_struct *vma,
147322 +                           bool faulting)
147324         bool unevictable;
147326 @@ -490,6 +491,11 @@ void lru_cache_add_inactive_or_unevictable(struct page *page,
147327                 __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
147328                 count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
147329         }
147331 +       /* tell the multigenerational lru that the page is being faulted in */
147332 +       if (lru_gen_enabled() && !unevictable && faulting)
147333 +               SetPageActive(page);
147335         lru_cache_add(page);
147338 @@ -516,7 +522,7 @@ void lru_cache_add_inactive_or_unevictable(struct page *page,
147339   */
147340  static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
147342 -       bool active = PageActive(page);
147343 +       bool active = page_is_active(page, lruvec);
147344         int nr_pages = thp_nr_pages(page);
147346         if (PageUnevictable(page))
147347 @@ -556,7 +562,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
147349  static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
147351 -       if (PageActive(page) && !PageUnevictable(page)) {
147352 +       if (!PageUnevictable(page) && page_is_active(page, lruvec)) {
147353                 int nr_pages = thp_nr_pages(page);
147355                 del_page_from_lru_list(page, lruvec);
147356 @@ -670,7 +676,7 @@ void deactivate_file_page(struct page *page)
147357   */
147358  void deactivate_page(struct page *page)
147360 -       if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
147361 +       if (PageLRU(page) && !PageUnevictable(page) && page_is_active(page, NULL)) {
147362                 struct pagevec *pvec;
147364                 local_lock(&lru_pvecs.lock);
147365 diff --git a/mm/swapfile.c b/mm/swapfile.c
147366 index 084a5b9a18e5..ab3b5ca404fd 100644
147367 --- a/mm/swapfile.c
147368 +++ b/mm/swapfile.c
147369 @@ -1936,7 +1936,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
147370                 page_add_anon_rmap(page, vma, addr, false);
147371         } else { /* ksm created a completely new copy */
147372                 page_add_new_anon_rmap(page, vma, addr, false);
147373 -               lru_cache_add_inactive_or_unevictable(page, vma);
147374 +               lru_cache_add_page_vma(page, vma, false);
147375         }
147376         swap_free(entry);
147377  out:
147378 @@ -2702,6 +2702,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
147379         err = 0;
147380         atomic_inc(&proc_poll_event);
147381         wake_up_interruptible(&proc_poll_wait);
147382 +       /* stop tracking anon if the multigenerational lru is enabled */
147383 +       lru_gen_set_state(false, false, true);
147385  out_dput:
147386         filp_close(victim, NULL);
147387 @@ -3348,6 +3350,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
147388         mutex_unlock(&swapon_mutex);
147389         atomic_inc(&proc_poll_event);
147390         wake_up_interruptible(&proc_poll_wait);
147391 +       /* start tracking anon if the multigenerational lru is enabled */
147392 +       lru_gen_set_state(true, false, true);
147394         error = 0;
147395         goto out;
147396 diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
147397 index 9a3d451402d7..d7382fd886cc 100644
147398 --- a/mm/userfaultfd.c
147399 +++ b/mm/userfaultfd.c
147400 @@ -123,7 +123,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
147402         inc_mm_counter(dst_mm, MM_ANONPAGES);
147403         page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
147404 -       lru_cache_add_inactive_or_unevictable(page, dst_vma);
147405 +       lru_cache_add_page_vma(page, dst_vma, true);
147407         set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
147409 @@ -362,38 +362,38 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
147410                  * If a reservation for the page existed in the reservation
147411                  * map of a private mapping, the map was modified to indicate
147412                  * the reservation was consumed when the page was allocated.
147413 -                * We clear the PagePrivate flag now so that the global
147414 +                * We clear the HPageRestoreReserve flag now so that the global
147415                  * reserve count will not be incremented in free_huge_page.
147416                  * The reservation map will still indicate the reservation
147417                  * was consumed and possibly prevent later page allocation.
147418                  * This is better than leaking a global reservation.  If no
147419 -                * reservation existed, it is still safe to clear PagePrivate
147420 -                * as no adjustments to reservation counts were made during
147421 -                * allocation.
147422 +                * reservation existed, it is still safe to clear
147423 +                * HPageRestoreReserve as no adjustments to reservation counts
147424 +                * were made during allocation.
147425                  *
147426                  * The reservation map for shared mappings indicates which
147427                  * pages have reservations.  When a huge page is allocated
147428                  * for an address with a reservation, no change is made to
147429 -                * the reserve map.  In this case PagePrivate will be set
147430 -                * to indicate that the global reservation count should be
147431 +                * the reserve map.  In this case HPageRestoreReserve will be
147432 +                * set to indicate that the global reservation count should be
147433                  * incremented when the page is freed.  This is the desired
147434                  * behavior.  However, when a huge page is allocated for an
147435                  * address without a reservation a reservation entry is added
147436 -                * to the reservation map, and PagePrivate will not be set.
147437 -                * When the page is freed, the global reserve count will NOT
147438 -                * be incremented and it will appear as though we have leaked
147439 -                * reserved page.  In this case, set PagePrivate so that the
147440 -                * global reserve count will be incremented to match the
147441 -                * reservation map entry which was created.
147442 +                * to the reservation map, and HPageRestoreReserve will not be
147443 +                * set. When the page is freed, the global reserve count will
147444 +                * NOT be incremented and it will appear as though we have
147445 +                * leaked reserved page.  In this case, set HPageRestoreReserve
147446 +                * so that the global reserve count will be incremented to
147447 +                * match the reservation map entry which was created.
147448                  *
147449                  * Note that vm_alloc_shared is based on the flags of the vma
147450                  * for which the page was originally allocated.  dst_vma could
147451                  * be different or NULL on error.
147452                  */
147453                 if (vm_alloc_shared)
147454 -                       SetPagePrivate(page);
147455 +                       SetHPageRestoreReserve(page);
147456                 else
147457 -                       ClearPagePrivate(page);
147458 +                       ClearHPageRestoreReserve(page);
147459                 put_page(page);
147460         }
147461         BUG_ON(copied < 0);
147462 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
147463 index 4f5f8c907897..64ab133ee816 100644
147464 --- a/mm/vmalloc.c
147465 +++ b/mm/vmalloc.c
147466 @@ -316,6 +316,7 @@ int map_kernel_range_noflush(unsigned long addr, unsigned long size,
147468         return 0;
147470 +EXPORT_SYMBOL(map_kernel_range_noflush);
147472  int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
147473                 struct page **pages)
147474 @@ -2131,6 +2132,7 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
147475                                   NUMA_NO_NODE, GFP_KERNEL,
147476                                   __builtin_return_address(0));
147478 +EXPORT_SYMBOL(get_vm_area);
147480  struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
147481                                 const void *caller)
147482 diff --git a/mm/vmscan.c b/mm/vmscan.c
147483 index 562e87cbd7a1..4a34cc622681 100644
147484 --- a/mm/vmscan.c
147485 +++ b/mm/vmscan.c
147486 @@ -49,6 +49,11 @@
147487  #include <linux/printk.h>
147488  #include <linux/dax.h>
147489  #include <linux/psi.h>
147490 +#include <linux/memory.h>
147491 +#include <linux/pagewalk.h>
147492 +#include <linux/shmem_fs.h>
147493 +#include <linux/ctype.h>
147494 +#include <linux/debugfs.h>
147496  #include <asm/tlbflush.h>
147497  #include <asm/div64.h>
147498 @@ -118,6 +123,19 @@ struct scan_control {
147499         /* The file pages on the current node are dangerously low */
147500         unsigned int file_is_tiny:1;
147502 +       /*
147503 +        * The clean file pages on the current node won't be reclaimed when
147504 +        * their amount is below vm.clean_low_kbytes *unless* we threaten
147505 +        * to OOM or have no free swap space or vm.swappiness=0.
147506 +        */
147507 +       unsigned int clean_below_low:1;
147509 +       /*
147510 +        * The clean file pages on the current node won't be reclaimed when
147511 +        * their amount is below vm.clean_min_kbytes.
147512 +        */
147513 +       unsigned int clean_below_min:1;
147515         /* Allocation order */
147516         s8 order;
147518 @@ -164,10 +182,21 @@ struct scan_control {
147519  #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
147520  #endif
147522 +#if CONFIG_CLEAN_LOW_KBYTES < 0
147523 +#error "CONFIG_CLEAN_LOW_KBYTES must be >= 0"
147524 +#endif
147526 +#if CONFIG_CLEAN_MIN_KBYTES < 0
147527 +#error "CONFIG_CLEAN_MIN_KBYTES must be >= 0"
147528 +#endif
147530 +unsigned long sysctl_clean_low_kbytes __read_mostly = CONFIG_CLEAN_LOW_KBYTES;
147531 +unsigned long sysctl_clean_min_kbytes __read_mostly = CONFIG_CLEAN_MIN_KBYTES;
147534   * From 0 .. 200.  Higher means more swappy.
147535   */
147536 -int vm_swappiness = 60;
147537 +int vm_swappiness = 30;
147539  static void set_task_reclaim_state(struct task_struct *task,
147540                                    struct reclaim_state *rs)
147541 @@ -897,9 +926,11 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
147543         if (PageSwapCache(page)) {
147544                 swp_entry_t swap = { .val = page_private(page) };
147545 -               mem_cgroup_swapout(page, swap);
147547 +               /* get a shadow entry before page_memcg() is cleared */
147548                 if (reclaimed && !mapping_exiting(mapping))
147549                         shadow = workingset_eviction(page, target_memcg);
147550 +               mem_cgroup_swapout(page, swap);
147551                 __delete_from_swap_cache(page, swap, shadow);
147552                 xa_unlock_irqrestore(&mapping->i_pages, flags);
147553                 put_swap_page(page, swap);
147554 @@ -1110,6 +1141,10 @@ static unsigned int shrink_page_list(struct list_head *page_list,
147555                 if (!sc->may_unmap && page_mapped(page))
147556                         goto keep_locked;
147558 +               /* in case the page was found accessed by lru_gen_scan_around() */
147559 +               if (lru_gen_enabled() && !ignore_references && PageReferenced(page))
147560 +                       goto keep_locked;
147562                 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
147563                         (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
147565 @@ -2224,6 +2259,135 @@ enum scan_balance {
147566         SCAN_FILE,
147569 +static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc)
147571 +       unsigned long file;
147572 +       struct lruvec *target_lruvec;
147574 +       /* the multigenerational lru doesn't use these counters */
147575 +       if (lru_gen_enabled())
147576 +               return;
147578 +       target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
147580 +       /*
147581 +        * Determine the scan balance between anon and file LRUs.
147582 +        */
147583 +       spin_lock_irq(&target_lruvec->lru_lock);
147584 +       sc->anon_cost = target_lruvec->anon_cost;
147585 +       sc->file_cost = target_lruvec->file_cost;
147586 +       spin_unlock_irq(&target_lruvec->lru_lock);
147588 +       /*
147589 +        * Target desirable inactive:active list ratios for the anon
147590 +        * and file LRU lists.
147591 +        */
147592 +       if (!sc->force_deactivate) {
147593 +               unsigned long refaults;
147595 +               refaults = lruvec_page_state(target_lruvec,
147596 +                               WORKINGSET_ACTIVATE_ANON);
147597 +               if (refaults != target_lruvec->refaults[0] ||
147598 +                       inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
147599 +                       sc->may_deactivate |= DEACTIVATE_ANON;
147600 +               else
147601 +                       sc->may_deactivate &= ~DEACTIVATE_ANON;
147603 +               /*
147604 +                * When refaults are being observed, it means a new
147605 +                * workingset is being established. Deactivate to get
147606 +                * rid of any stale active pages quickly.
147607 +                */
147608 +               refaults = lruvec_page_state(target_lruvec,
147609 +                               WORKINGSET_ACTIVATE_FILE);
147610 +               if (refaults != target_lruvec->refaults[1] ||
147611 +                   inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
147612 +                       sc->may_deactivate |= DEACTIVATE_FILE;
147613 +               else
147614 +                       sc->may_deactivate &= ~DEACTIVATE_FILE;
147615 +       } else
147616 +               sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
147618 +       /*
147619 +        * If we have plenty of inactive file pages that aren't
147620 +        * thrashing, try to reclaim those first before touching
147621 +        * anonymous pages.
147622 +        */
147623 +       file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
147624 +       if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
147625 +               sc->cache_trim_mode = 1;
147626 +       else
147627 +               sc->cache_trim_mode = 0;
147629 +       /*
147630 +        * Prevent the reclaimer from falling into the cache trap: as
147631 +        * cache pages start out inactive, every cache fault will tip
147632 +        * the scan balance towards the file LRU.  And as the file LRU
147633 +        * shrinks, so does the window for rotation from references.
147634 +        * This means we have a runaway feedback loop where a tiny
147635 +        * thrashing file LRU becomes infinitely more attractive than
147636 +        * anon pages.  Try to detect this based on file LRU size.
147637 +        */
147638 +       if (!cgroup_reclaim(sc)) {
147639 +               unsigned long total_high_wmark = 0;
147640 +               unsigned long free, anon;
147641 +               int z;
147643 +               free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
147644 +               file = node_page_state(pgdat, NR_ACTIVE_FILE) +
147645 +                          node_page_state(pgdat, NR_INACTIVE_FILE);
147647 +               for (z = 0; z < MAX_NR_ZONES; z++) {
147648 +                       struct zone *zone = &pgdat->node_zones[z];
147650 +                       if (!managed_zone(zone))
147651 +                               continue;
147653 +                       total_high_wmark += high_wmark_pages(zone);
147654 +               }
147656 +               /*
147657 +                * Consider anon: if that's low too, this isn't a
147658 +                * runaway file reclaim problem, but rather just
147659 +                * extreme pressure. Reclaim as per usual then.
147660 +                */
147661 +               anon = node_page_state(pgdat, NR_INACTIVE_ANON);
147663 +               sc->file_is_tiny =
147664 +                       file + free <= total_high_wmark &&
147665 +                       !(sc->may_deactivate & DEACTIVATE_ANON) &&
147666 +                       anon >> sc->priority;
147668 +               /*
147669 +               * Check the number of clean file pages to protect them from
147670 +               * reclaiming if their amount is below the specified.
147671 +               */
147672 +               if (sysctl_clean_low_kbytes || sysctl_clean_min_kbytes) {
147673 +                       unsigned long reclaimable_file, dirty, clean;
147675 +                       reclaimable_file =
147676 +                               node_page_state(pgdat, NR_ACTIVE_FILE) +
147677 +                               node_page_state(pgdat, NR_INACTIVE_FILE) +
147678 +                               node_page_state(pgdat, NR_ISOLATED_FILE);
147679 +                       dirty = node_page_state(pgdat, NR_FILE_DIRTY);
147680 +                       /*
147681 +                       * node_page_state() sum can go out of sync since
147682 +                       * all the values are not read at once.
147683 +                       */
147684 +                       if (likely(reclaimable_file > dirty))
147685 +                               clean = (reclaimable_file - dirty) << (PAGE_SHIFT - 10);
147686 +                       else
147687 +                               clean = 0;
147689 +                       sc->clean_below_low = clean < sysctl_clean_low_kbytes;
147690 +                       sc->clean_below_min = clean < sysctl_clean_min_kbytes;
147691 +               } else {
147692 +                       sc->clean_below_low = false;
147693 +                       sc->clean_below_min = false;
147694 +               }
147695 +       }
147699   * Determine how aggressively the anon and file LRU lists should be
147700   * scanned.  The relative value of each set of LRU lists is determined
147701 @@ -2281,6 +2445,16 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
147702                 goto out;
147703         }
147705 +       /*
147706 +        * Force-scan anon if clean file pages is under vm.clean_min_kbytes
147707 +        * or vm.clean_low_kbytes (unless the swappiness setting
147708 +        * disagrees with swapping).
147709 +        */
147710 +       if ((sc->clean_below_low || sc->clean_below_min) && swappiness) {
147711 +               scan_balance = SCAN_ANON;
147712 +               goto out;
147713 +       }
147715         /*
147716          * If there is enough inactive page cache, we do not reclaim
147717          * anything from the anonymous working right now.
147718 @@ -2417,10 +2591,30 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
147719                         BUG();
147720                 }
147722 +               /*
147723 +                * Don't reclaim clean file pages when their amount is below
147724 +                * vm.clean_min_kbytes.
147725 +                */
147726 +               if (file && sc->clean_below_min)
147727 +                       scan = 0;
147729                 nr[lru] = scan;
147730         }
147733 +#ifdef CONFIG_LRU_GEN
147734 +static void age_lru_gens(struct pglist_data *pgdat, struct scan_control *sc);
147735 +static void shrink_lru_gens(struct lruvec *lruvec, struct scan_control *sc);
147736 +#else
147737 +static void age_lru_gens(struct pglist_data *pgdat, struct scan_control *sc)
147741 +static void shrink_lru_gens(struct lruvec *lruvec, struct scan_control *sc)
147744 +#endif
147746  static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
147748         unsigned long nr[NR_LRU_LISTS];
147749 @@ -2432,6 +2626,11 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
147750         struct blk_plug plug;
147751         bool scan_adjusted;
147753 +       if (lru_gen_enabled()) {
147754 +               shrink_lru_gens(lruvec, sc);
147755 +               return;
147756 +       }
147758         get_scan_count(lruvec, sc, nr);
147760         /* Record the original scan target for proportional adjustments later */
147761 @@ -2669,7 +2868,6 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
147762         unsigned long nr_reclaimed, nr_scanned;
147763         struct lruvec *target_lruvec;
147764         bool reclaimable = false;
147765 -       unsigned long file;
147767         target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
147769 @@ -2679,93 +2877,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
147770         nr_reclaimed = sc->nr_reclaimed;
147771         nr_scanned = sc->nr_scanned;
147773 -       /*
147774 -        * Determine the scan balance between anon and file LRUs.
147775 -        */
147776 -       spin_lock_irq(&target_lruvec->lru_lock);
147777 -       sc->anon_cost = target_lruvec->anon_cost;
147778 -       sc->file_cost = target_lruvec->file_cost;
147779 -       spin_unlock_irq(&target_lruvec->lru_lock);
147781 -       /*
147782 -        * Target desirable inactive:active list ratios for the anon
147783 -        * and file LRU lists.
147784 -        */
147785 -       if (!sc->force_deactivate) {
147786 -               unsigned long refaults;
147788 -               refaults = lruvec_page_state(target_lruvec,
147789 -                               WORKINGSET_ACTIVATE_ANON);
147790 -               if (refaults != target_lruvec->refaults[0] ||
147791 -                       inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
147792 -                       sc->may_deactivate |= DEACTIVATE_ANON;
147793 -               else
147794 -                       sc->may_deactivate &= ~DEACTIVATE_ANON;
147796 -               /*
147797 -                * When refaults are being observed, it means a new
147798 -                * workingset is being established. Deactivate to get
147799 -                * rid of any stale active pages quickly.
147800 -                */
147801 -               refaults = lruvec_page_state(target_lruvec,
147802 -                               WORKINGSET_ACTIVATE_FILE);
147803 -               if (refaults != target_lruvec->refaults[1] ||
147804 -                   inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
147805 -                       sc->may_deactivate |= DEACTIVATE_FILE;
147806 -               else
147807 -                       sc->may_deactivate &= ~DEACTIVATE_FILE;
147808 -       } else
147809 -               sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
147811 -       /*
147812 -        * If we have plenty of inactive file pages that aren't
147813 -        * thrashing, try to reclaim those first before touching
147814 -        * anonymous pages.
147815 -        */
147816 -       file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
147817 -       if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
147818 -               sc->cache_trim_mode = 1;
147819 -       else
147820 -               sc->cache_trim_mode = 0;
147822 -       /*
147823 -        * Prevent the reclaimer from falling into the cache trap: as
147824 -        * cache pages start out inactive, every cache fault will tip
147825 -        * the scan balance towards the file LRU.  And as the file LRU
147826 -        * shrinks, so does the window for rotation from references.
147827 -        * This means we have a runaway feedback loop where a tiny
147828 -        * thrashing file LRU becomes infinitely more attractive than
147829 -        * anon pages.  Try to detect this based on file LRU size.
147830 -        */
147831 -       if (!cgroup_reclaim(sc)) {
147832 -               unsigned long total_high_wmark = 0;
147833 -               unsigned long free, anon;
147834 -               int z;
147836 -               free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
147837 -               file = node_page_state(pgdat, NR_ACTIVE_FILE) +
147838 -                          node_page_state(pgdat, NR_INACTIVE_FILE);
147840 -               for (z = 0; z < MAX_NR_ZONES; z++) {
147841 -                       struct zone *zone = &pgdat->node_zones[z];
147842 -                       if (!managed_zone(zone))
147843 -                               continue;
147845 -                       total_high_wmark += high_wmark_pages(zone);
147846 -               }
147848 -               /*
147849 -                * Consider anon: if that's low too, this isn't a
147850 -                * runaway file reclaim problem, but rather just
147851 -                * extreme pressure. Reclaim as per usual then.
147852 -                */
147853 -               anon = node_page_state(pgdat, NR_INACTIVE_ANON);
147855 -               sc->file_is_tiny =
147856 -                       file + free <= total_high_wmark &&
147857 -                       !(sc->may_deactivate & DEACTIVATE_ANON) &&
147858 -                       anon >> sc->priority;
147859 -       }
147860 +       prepare_scan_count(pgdat, sc);
147862         shrink_node_memcgs(pgdat, sc);
147864 @@ -2985,6 +3097,10 @@ static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
147865         struct lruvec *target_lruvec;
147866         unsigned long refaults;
147868 +       /* the multigenerational lru doesn't use these counters */
147869 +       if (lru_gen_enabled())
147870 +               return;
147872         target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
147873         refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON);
147874         target_lruvec->refaults[0] = refaults;
147875 @@ -3359,6 +3475,11 @@ static void age_active_anon(struct pglist_data *pgdat,
147876         struct mem_cgroup *memcg;
147877         struct lruvec *lruvec;
147879 +       if (lru_gen_enabled()) {
147880 +               age_lru_gens(pgdat, sc);
147881 +               return;
147882 +       }
147884         if (!total_swap_pages)
147885                 return;
147887 @@ -4304,3 +4425,2365 @@ void check_move_unevictable_pages(struct pagevec *pvec)
147888         }
147890  EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
147892 +#ifdef CONFIG_LRU_GEN
147895 + * After pages are faulted in, the aging must scan them twice before the
147896 + * eviction can. The first scan clears the accessed bit set during initial
147897 + * faults. And the second scan makes sure they haven't been used since the
147898 + * first.
147899 + */
147900 +#define MIN_NR_GENS    2
147902 +#define MAX_BATCH_SIZE 8192
147904 +/******************************************************************************
147905 + *                          shorthand helpers
147906 + ******************************************************************************/
147908 +#define DEFINE_MAX_SEQ()                                               \
147909 +       unsigned long max_seq = READ_ONCE(lruvec->evictable.max_seq)
147911 +#define DEFINE_MIN_SEQ()                                               \
147912 +       unsigned long min_seq[ANON_AND_FILE] = {                        \
147913 +               READ_ONCE(lruvec->evictable.min_seq[0]),                \
147914 +               READ_ONCE(lruvec->evictable.min_seq[1]),                \
147915 +       }
147917 +#define for_each_type_zone(file, zone)                                 \
147918 +       for ((file) = 0; (file) < ANON_AND_FILE; (file)++)              \
147919 +               for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
147921 +#define for_each_gen_type_zone(gen, file, zone)                                \
147922 +       for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++)                   \
147923 +               for ((file) = 0; (file) < ANON_AND_FILE; (file)++)      \
147924 +                       for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
147926 +static int get_nr_gens(struct lruvec *lruvec, int file)
147928 +       return lruvec->evictable.max_seq - lruvec->evictable.min_seq[file] + 1;
147931 +static int min_nr_gens(unsigned long max_seq, unsigned long *min_seq, int swappiness)
147933 +       return max_seq - max(min_seq[!swappiness], min_seq[1]) + 1;
147936 +static int max_nr_gens(unsigned long max_seq, unsigned long *min_seq, int swappiness)
147938 +       return max_seq - min(min_seq[!swappiness], min_seq[1]) + 1;
147941 +static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
147943 +       lockdep_assert_held(&lruvec->lru_lock);
147945 +       return get_nr_gens(lruvec, 0) >= MIN_NR_GENS &&
147946 +              get_nr_gens(lruvec, 0) <= MAX_NR_GENS &&
147947 +              get_nr_gens(lruvec, 1) >= MIN_NR_GENS &&
147948 +              get_nr_gens(lruvec, 1) <= MAX_NR_GENS;
147951 +/******************************************************************************
147952 + *                          refault feedback loop
147953 + ******************************************************************************/
147956 + * A feedback loop modeled after the PID controller. Currently supports the
147957 + * proportional (P) and the integral (I) terms; the derivative (D) term can be
147958 + * added if necessary. The setpoint (SP) is the desired position; the process
147959 + * variable (PV) is the measured position. The error is the difference between
147960 + * the SP and the PV. A positive error results in a positive control output
147961 + * correction, which, in our case, is to allow eviction.
147963 + * The P term is the current refault rate refaulted/(evicted+activated), which
147964 + * has a weight of 1. The I term is the arithmetic mean of the last N refault
147965 + * rates, weighted by geometric series 1/2, 1/4, ..., 1/(1<<N).
147967 + * Our goal is to make sure upper tiers have similar refault rates as the base
147968 + * tier. That is we try to be fair to all tiers by maintaining similar refault
147969 + * rates across them.
147970 + */
147971 +struct controller_pos {
147972 +       unsigned long refaulted;
147973 +       unsigned long total;
147974 +       int gain;
147977 +static void read_controller_pos(struct controller_pos *pos, struct lruvec *lruvec,
147978 +                               int file, int tier, int gain)
147980 +       struct lrugen *lrugen = &lruvec->evictable;
147981 +       int sid = sid_from_seq_or_gen(lrugen->min_seq[file]);
147983 +       pos->refaulted = lrugen->avg_refaulted[file][tier] +
147984 +                        atomic_long_read(&lrugen->refaulted[sid][file][tier]);
147985 +       pos->total = lrugen->avg_total[file][tier] +
147986 +                    atomic_long_read(&lrugen->evicted[sid][file][tier]);
147987 +       if (tier)
147988 +               pos->total += lrugen->activated[sid][file][tier - 1];
147989 +       pos->gain = gain;
147992 +static void reset_controller_pos(struct lruvec *lruvec, int gen, int file)
147994 +       int tier;
147995 +       int sid = sid_from_seq_or_gen(gen);
147996 +       struct lrugen *lrugen = &lruvec->evictable;
147997 +       bool carryover = gen == lru_gen_from_seq(lrugen->min_seq[file]);
147999 +       if (!carryover && NR_STAT_GENS == 1)
148000 +               return;
148002 +       for (tier = 0; tier < MAX_NR_TIERS; tier++) {
148003 +               if (carryover) {
148004 +                       unsigned long sum;
148006 +                       sum = lrugen->avg_refaulted[file][tier] +
148007 +                             atomic_long_read(&lrugen->refaulted[sid][file][tier]);
148008 +                       WRITE_ONCE(lrugen->avg_refaulted[file][tier], sum >> 1);
148010 +                       sum = lrugen->avg_total[file][tier] +
148011 +                             atomic_long_read(&lrugen->evicted[sid][file][tier]);
148012 +                       if (tier)
148013 +                               sum += lrugen->activated[sid][file][tier - 1];
148014 +                       WRITE_ONCE(lrugen->avg_total[file][tier], sum >> 1);
148016 +                       if (NR_STAT_GENS > 1)
148017 +                               continue;
148018 +               }
148020 +               atomic_long_set(&lrugen->refaulted[sid][file][tier], 0);
148021 +               atomic_long_set(&lrugen->evicted[sid][file][tier], 0);
148022 +               if (tier)
148023 +                       WRITE_ONCE(lrugen->activated[sid][file][tier - 1], 0);
148024 +       }
148027 +static bool positive_ctrl_err(struct controller_pos *sp, struct controller_pos *pv)
148029 +       /*
148030 +        * Allow eviction if the PV has a limited number of refaulted pages or a
148031 +        * lower refault rate than the SP.
148032 +        */
148033 +       return pv->refaulted < SWAP_CLUSTER_MAX ||
148034 +              pv->refaulted * max(sp->total, 1UL) * sp->gain <=
148035 +              sp->refaulted * max(pv->total, 1UL) * pv->gain;
148038 +/******************************************************************************
148039 + *                          mm_struct list
148040 + ******************************************************************************/
148042 +enum {
148043 +       MM_SCHED_ACTIVE,        /* running processes */
148044 +       MM_SCHED_INACTIVE,      /* sleeping processes */
148045 +       MM_LOCK_CONTENTION,     /* lock contentions */
148046 +       MM_VMA_INTERVAL,        /* VMAs within the range of current table */
148047 +       MM_LEAF_OTHER_NODE,     /* entries not from node under reclaim */
148048 +       MM_LEAF_OTHER_MEMCG,    /* entries not from memcg under reclaim */
148049 +       MM_LEAF_OLD,            /* old entries */
148050 +       MM_LEAF_YOUNG,          /* young entries */
148051 +       MM_LEAF_DIRTY,          /* dirty entries */
148052 +       MM_LEAF_HOLE,           /* non-present entries */
148053 +       MM_NONLEAF_OLD,         /* old non-leaf pmd entries */
148054 +       MM_NONLEAF_YOUNG,       /* young non-leaf pmd entries */
148055 +       NR_MM_STATS
148058 +/* mnemonic codes for the stats above */
148059 +#define MM_STAT_CODES          "aicvnmoydhlu"
148061 +struct lru_gen_mm_list {
148062 +       /* the head of a global or per-memcg mm_struct list */
148063 +       struct list_head head;
148064 +       /* protects the list */
148065 +       spinlock_t lock;
148066 +       struct {
148067 +               /* set to max_seq after each round of walk */
148068 +               unsigned long cur_seq;
148069 +               /* the next mm on the list to walk */
148070 +               struct list_head *iter;
148071 +               /* to wait for the last worker to finish */
148072 +               struct wait_queue_head wait;
148073 +               /* the number of concurrent workers */
148074 +               int nr_workers;
148075 +               /* stats for debugging */
148076 +               unsigned long stats[NR_STAT_GENS][NR_MM_STATS];
148077 +       } nodes[0];
148080 +static struct lru_gen_mm_list *global_mm_list;
148082 +static struct lru_gen_mm_list *alloc_mm_list(void)
148084 +       int nid;
148085 +       struct lru_gen_mm_list *mm_list;
148087 +       mm_list = kzalloc(struct_size(mm_list, nodes, nr_node_ids), GFP_KERNEL);
148088 +       if (!mm_list)
148089 +               return NULL;
148091 +       INIT_LIST_HEAD(&mm_list->head);
148092 +       spin_lock_init(&mm_list->lock);
148094 +       for_each_node(nid) {
148095 +               mm_list->nodes[nid].cur_seq = MIN_NR_GENS;
148096 +               mm_list->nodes[nid].iter = &mm_list->head;
148097 +               init_waitqueue_head(&mm_list->nodes[nid].wait);
148098 +       }
148100 +       return mm_list;
148103 +static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
148105 +#ifdef CONFIG_MEMCG
148106 +       if (!mem_cgroup_disabled())
148107 +               return memcg ? memcg->mm_list : root_mem_cgroup->mm_list;
148108 +#endif
148109 +       VM_BUG_ON(memcg);
148111 +       return global_mm_list;
148114 +void lru_gen_init_mm(struct mm_struct *mm)
148116 +       int file;
148118 +       INIT_LIST_HEAD(&mm->lrugen.list);
148119 +#ifdef CONFIG_MEMCG
148120 +       mm->lrugen.memcg = NULL;
148121 +#endif
148122 +#ifndef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
148123 +       atomic_set(&mm->lrugen.nr_cpus, 0);
148124 +#endif
148125 +       for (file = 0; file < ANON_AND_FILE; file++)
148126 +               nodes_clear(mm->lrugen.nodes[file]);
148129 +void lru_gen_add_mm(struct mm_struct *mm)
148131 +       struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm);
148132 +       struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
148134 +       VM_BUG_ON_MM(!list_empty(&mm->lrugen.list), mm);
148135 +#ifdef CONFIG_MEMCG
148136 +       VM_BUG_ON_MM(mm->lrugen.memcg, mm);
148137 +       WRITE_ONCE(mm->lrugen.memcg, memcg);
148138 +#endif
148139 +       spin_lock(&mm_list->lock);
148140 +       list_add_tail(&mm->lrugen.list, &mm_list->head);
148141 +       spin_unlock(&mm_list->lock);
148144 +void lru_gen_del_mm(struct mm_struct *mm)
148146 +       int nid;
148147 +#ifdef CONFIG_MEMCG
148148 +       struct lru_gen_mm_list *mm_list = get_mm_list(mm->lrugen.memcg);
148149 +#else
148150 +       struct lru_gen_mm_list *mm_list = get_mm_list(NULL);
148151 +#endif
148153 +       spin_lock(&mm_list->lock);
148155 +       for_each_node(nid) {
148156 +               if (mm_list->nodes[nid].iter != &mm->lrugen.list)
148157 +                       continue;
148159 +               mm_list->nodes[nid].iter = mm_list->nodes[nid].iter->next;
148160 +               if (mm_list->nodes[nid].iter == &mm_list->head)
148161 +                       WRITE_ONCE(mm_list->nodes[nid].cur_seq,
148162 +                                  mm_list->nodes[nid].cur_seq + 1);
148163 +       }
148165 +       list_del_init(&mm->lrugen.list);
148167 +       spin_unlock(&mm_list->lock);
148169 +#ifdef CONFIG_MEMCG
148170 +       mem_cgroup_put(mm->lrugen.memcg);
148171 +       WRITE_ONCE(mm->lrugen.memcg, NULL);
148172 +#endif
148175 +#ifdef CONFIG_MEMCG
148176 +int lru_gen_alloc_mm_list(struct mem_cgroup *memcg)
148178 +       if (mem_cgroup_disabled())
148179 +               return 0;
148181 +       memcg->mm_list = alloc_mm_list();
148183 +       return memcg->mm_list ? 0 : -ENOMEM;
148186 +void lru_gen_free_mm_list(struct mem_cgroup *memcg)
148188 +       kfree(memcg->mm_list);
148189 +       memcg->mm_list = NULL;
148192 +void lru_gen_migrate_mm(struct mm_struct *mm)
148194 +       struct mem_cgroup *memcg;
148196 +       lockdep_assert_held(&mm->owner->alloc_lock);
148198 +       if (mem_cgroup_disabled())
148199 +               return;
148201 +       rcu_read_lock();
148202 +       memcg = mem_cgroup_from_task(mm->owner);
148203 +       rcu_read_unlock();
148204 +       if (memcg == mm->lrugen.memcg)
148205 +               return;
148207 +       VM_BUG_ON_MM(!mm->lrugen.memcg, mm);
148208 +       VM_BUG_ON_MM(list_empty(&mm->lrugen.list), mm);
148210 +       lru_gen_del_mm(mm);
148211 +       lru_gen_add_mm(mm);
148214 +static bool mm_has_migrated(struct mm_struct *mm, struct mem_cgroup *memcg)
148216 +       return READ_ONCE(mm->lrugen.memcg) != memcg;
148218 +#else
148219 +static bool mm_has_migrated(struct mm_struct *mm, struct mem_cgroup *memcg)
148221 +       return false;
148223 +#endif
148225 +struct mm_walk_args {
148226 +       struct mem_cgroup *memcg;
148227 +       unsigned long max_seq;
148228 +       unsigned long next_addr;
148229 +       unsigned long start_pfn;
148230 +       unsigned long end_pfn;
148231 +       int node_id;
148232 +       int batch_size;
148233 +       int mm_stats[NR_MM_STATS];
148234 +       int nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
148235 +       bool should_walk[ANON_AND_FILE];
148236 +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG)
148237 +       unsigned long bitmap[BITS_TO_LONGS(PTRS_PER_PMD)];
148238 +#endif
148241 +static void reset_mm_stats(struct lru_gen_mm_list *mm_list, bool last,
148242 +                          struct mm_walk_args *args)
148244 +       int i;
148245 +       int nid = args->node_id;
148246 +       int sid = sid_from_seq_or_gen(args->max_seq);
148248 +       lockdep_assert_held(&mm_list->lock);
148250 +       for (i = 0; i < NR_MM_STATS; i++) {
148251 +               WRITE_ONCE(mm_list->nodes[nid].stats[sid][i],
148252 +                          mm_list->nodes[nid].stats[sid][i] + args->mm_stats[i]);
148253 +               args->mm_stats[i] = 0;
148254 +       }
148256 +       if (!last || NR_STAT_GENS == 1)
148257 +               return;
148259 +       sid = sid_from_seq_or_gen(args->max_seq + 1);
148260 +       for (i = 0; i < NR_MM_STATS; i++)
148261 +               WRITE_ONCE(mm_list->nodes[nid].stats[sid][i], 0);
148264 +static bool should_skip_mm(struct mm_struct *mm, int nid, int swappiness)
148266 +       int file;
148267 +       unsigned long size = 0;
148269 +       if (mm_is_oom_victim(mm))
148270 +               return true;
148272 +       for (file = !swappiness; file < ANON_AND_FILE; file++) {
148273 +               if (lru_gen_mm_is_active(mm) || node_isset(nid, mm->lrugen.nodes[file]))
148274 +                       size += file ? get_mm_counter(mm, MM_FILEPAGES) :
148275 +                                      get_mm_counter(mm, MM_ANONPAGES) +
148276 +                                      get_mm_counter(mm, MM_SHMEMPAGES);
148277 +       }
148279 +       /* leave the legwork to the rmap if mapped pages are too sparse */
148280 +       if (size < max(SWAP_CLUSTER_MAX, mm_pgtables_bytes(mm) / PAGE_SIZE))
148281 +               return true;
148283 +       return !mmget_not_zero(mm);
148286 +/* To support multiple workers that concurrently walk mm_struct list. */
148287 +static bool get_next_mm(struct mm_walk_args *args, int swappiness, struct mm_struct **iter)
148289 +       bool last = true;
148290 +       struct mm_struct *mm = NULL;
148291 +       int nid = args->node_id;
148292 +       struct lru_gen_mm_list *mm_list = get_mm_list(args->memcg);
148294 +       if (*iter)
148295 +               mmput_async(*iter);
148296 +       else if (args->max_seq <= READ_ONCE(mm_list->nodes[nid].cur_seq))
148297 +               return false;
148299 +       spin_lock(&mm_list->lock);
148301 +       VM_BUG_ON(args->max_seq > mm_list->nodes[nid].cur_seq + 1);
148302 +       VM_BUG_ON(*iter && args->max_seq < mm_list->nodes[nid].cur_seq);
148303 +       VM_BUG_ON(*iter && !mm_list->nodes[nid].nr_workers);
148305 +       if (args->max_seq <= mm_list->nodes[nid].cur_seq) {
148306 +               last = *iter;
148307 +               goto done;
148308 +       }
148310 +       if (mm_list->nodes[nid].iter == &mm_list->head) {
148311 +               VM_BUG_ON(*iter || mm_list->nodes[nid].nr_workers);
148312 +               mm_list->nodes[nid].iter = mm_list->nodes[nid].iter->next;
148313 +       }
148315 +       while (!mm && mm_list->nodes[nid].iter != &mm_list->head) {
148316 +               mm = list_entry(mm_list->nodes[nid].iter, struct mm_struct, lrugen.list);
148317 +               mm_list->nodes[nid].iter = mm_list->nodes[nid].iter->next;
148318 +               if (should_skip_mm(mm, nid, swappiness))
148319 +                       mm = NULL;
148321 +               args->mm_stats[mm ? MM_SCHED_ACTIVE : MM_SCHED_INACTIVE]++;
148322 +       }
148324 +       if (mm_list->nodes[nid].iter == &mm_list->head)
148325 +               WRITE_ONCE(mm_list->nodes[nid].cur_seq,
148326 +                          mm_list->nodes[nid].cur_seq + 1);
148327 +done:
148328 +       if (*iter && !mm)
148329 +               mm_list->nodes[nid].nr_workers--;
148330 +       if (!*iter && mm)
148331 +               mm_list->nodes[nid].nr_workers++;
148333 +       last = last && !mm_list->nodes[nid].nr_workers &&
148334 +              mm_list->nodes[nid].iter == &mm_list->head;
148336 +       reset_mm_stats(mm_list, last, args);
148338 +       spin_unlock(&mm_list->lock);
148340 +       *iter = mm;
148342 +       return last;
148345 +/******************************************************************************
148346 + *                          the aging
148347 + ******************************************************************************/
148349 +static void update_batch_size(struct page *page, int old_gen, int new_gen,
148350 +                             struct mm_walk_args *args)
148352 +       int file = page_is_file_lru(page);
148353 +       int zone = page_zonenum(page);
148354 +       int delta = thp_nr_pages(page);
148356 +       VM_BUG_ON(old_gen >= MAX_NR_GENS);
148357 +       VM_BUG_ON(new_gen >= MAX_NR_GENS);
148359 +       args->batch_size++;
148361 +       args->nr_pages[old_gen][file][zone] -= delta;
148362 +       args->nr_pages[new_gen][file][zone] += delta;
148365 +static void reset_batch_size(struct lruvec *lruvec, struct mm_walk_args *args)
148367 +       int gen, file, zone;
148368 +       struct lrugen *lrugen = &lruvec->evictable;
148370 +       args->batch_size = 0;
148372 +       spin_lock_irq(&lruvec->lru_lock);
148374 +       for_each_gen_type_zone(gen, file, zone) {
148375 +               enum lru_list lru = LRU_FILE * file;
148376 +               int total = args->nr_pages[gen][file][zone];
148378 +               if (!total)
148379 +                       continue;
148381 +               args->nr_pages[gen][file][zone] = 0;
148382 +               WRITE_ONCE(lrugen->sizes[gen][file][zone],
148383 +                          lrugen->sizes[gen][file][zone] + total);
148385 +               if (lru_gen_is_active(lruvec, gen))
148386 +                       lru += LRU_ACTIVE;
148387 +               update_lru_size(lruvec, lru, zone, total);
148388 +       }
148390 +       spin_unlock_irq(&lruvec->lru_lock);
148393 +static int page_update_gen(struct page *page, int new_gen)
148395 +       int old_gen;
148396 +       unsigned long old_flags, new_flags;
148398 +       VM_BUG_ON(new_gen >= MAX_NR_GENS);
148400 +       do {
148401 +               old_flags = READ_ONCE(page->flags);
148403 +               old_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
148404 +               if (old_gen < 0)
148405 +                       new_flags = old_flags | BIT(PG_referenced);
148406 +               else
148407 +                       new_flags = (old_flags & ~(LRU_GEN_MASK | LRU_USAGE_MASK |
148408 +                                    LRU_TIER_FLAGS)) | ((new_gen + 1UL) << LRU_GEN_PGOFF);
148410 +               if (old_flags == new_flags)
148411 +                       break;
148412 +       } while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
148414 +       return old_gen;
148417 +static int should_skip_vma(unsigned long start, unsigned long end, struct mm_walk *walk)
148419 +       struct address_space *mapping;
148420 +       struct vm_area_struct *vma = walk->vma;
148421 +       struct mm_walk_args *args = walk->private;
148423 +       if (!vma_is_accessible(vma) || is_vm_hugetlb_page(vma) ||
148424 +           (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)))
148425 +               return true;
148427 +       if (vma_is_anonymous(vma))
148428 +               return !args->should_walk[0];
148430 +       if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping))
148431 +               return true;
148433 +       mapping = vma->vm_file->f_mapping;
148434 +       if (!mapping->a_ops->writepage)
148435 +               return true;
148437 +       if (shmem_mapping(mapping))
148438 +               return !args->should_walk[0] ||
148439 +                      mapping_unevictable(vma->vm_file->f_mapping);
148441 +       return !args->should_walk[1] || mapping_unevictable(mapping);
148445 + * Some userspace memory allocators create many single-page VMAs. So instead of
148446 + * returning back to the PGD table for each of such VMAs, we finish at least an
148447 + * entire PMD table and therefore avoid many zigzags. This optimizes page table
148448 + * walks for workloads that have large numbers of tiny VMAs.
148450 + * We scan PMD tables in two pass. The first pass reaches to PTE tables and
148451 + * doesn't take the PMD lock. The second pass clears the accessed bit on PMD
148452 + * entries and needs to take the PMD lock. The second pass is only done on the
148453 + * PMD entries that first pass has found the accessed bit is set, and they must
148454 + * be:
148455 + *   1) leaf entries mapping huge pages from the node under reclaim
148456 + *   2) non-leaf entries whose leaf entries only map pages from the node under
148457 + *   reclaim, when CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG=y.
148458 + */
148459 +static bool get_next_interval(struct mm_walk *walk, unsigned long mask, unsigned long size,
148460 +                             unsigned long *start, unsigned long *end)
148462 +       unsigned long next = round_up(*end, size);
148463 +       struct mm_walk_args *args = walk->private;
148465 +       VM_BUG_ON(mask & size);
148466 +       VM_BUG_ON(*start != *end);
148467 +       VM_BUG_ON(!(*end & ~mask));
148468 +       VM_BUG_ON((*end & mask) != (next & mask));
148470 +       while (walk->vma) {
148471 +               if (next >= walk->vma->vm_end) {
148472 +                       walk->vma = walk->vma->vm_next;
148473 +                       continue;
148474 +               }
148476 +               if ((next & mask) != (walk->vma->vm_start & mask))
148477 +                       return false;
148479 +               if (should_skip_vma(walk->vma->vm_start, walk->vma->vm_end, walk)) {
148480 +                       walk->vma = walk->vma->vm_next;
148481 +                       continue;
148482 +               }
148484 +               args->mm_stats[MM_VMA_INTERVAL]++;
148486 +               *start = max(next, walk->vma->vm_start);
148487 +               next = (next | ~mask) + 1;
148488 +               /* rounded-up boundaries can wrap to 0 */
148489 +               *end = next && next < walk->vma->vm_end ? next : walk->vma->vm_end;
148491 +               return true;
148492 +       }
148494 +       return false;
148497 +static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
148498 +                          struct mm_walk *walk)
148500 +       int i;
148501 +       pte_t *pte;
148502 +       spinlock_t *ptl;
148503 +       int remote = 0;
148504 +       struct mm_walk_args *args = walk->private;
148505 +       int old_gen, new_gen = lru_gen_from_seq(args->max_seq);
148507 +       VM_BUG_ON(pmd_leaf(*pmd));
148509 +       pte = pte_offset_map_lock(walk->mm, pmd, start & PMD_MASK, &ptl);
148510 +       arch_enter_lazy_mmu_mode();
148511 +restart:
148512 +       for (i = pte_index(start); start != end; i++, start += PAGE_SIZE) {
148513 +               struct page *page;
148514 +               unsigned long pfn = pte_pfn(pte[i]);
148516 +               if (!pte_present(pte[i]) || is_zero_pfn(pfn)) {
148517 +                       args->mm_stats[MM_LEAF_HOLE]++;
148518 +                       continue;
148519 +               }
148521 +               if (WARN_ON_ONCE(pte_devmap(pte[i]) || pte_special(pte[i])))
148522 +                       continue;
148524 +               if (!pte_young(pte[i])) {
148525 +                       args->mm_stats[MM_LEAF_OLD]++;
148526 +                       continue;
148527 +               }
148529 +               if (pfn < args->start_pfn || pfn >= args->end_pfn) {
148530 +                       remote++;
148531 +                       args->mm_stats[MM_LEAF_OTHER_NODE]++;
148532 +                       continue;
148533 +               }
148535 +               page = compound_head(pfn_to_page(pfn));
148536 +               if (page_to_nid(page) != args->node_id) {
148537 +                       remote++;
148538 +                       args->mm_stats[MM_LEAF_OTHER_NODE]++;
148539 +                       continue;
148540 +               }
148542 +               if (!ptep_test_and_clear_young(walk->vma, start, pte + i))
148543 +                       continue;
148545 +               if (pte_dirty(pte[i]) && !PageDirty(page) &&
148546 +                   !(PageAnon(page) && PageSwapBacked(page) && !PageSwapCache(page))) {
148547 +                       set_page_dirty(page);
148548 +                       args->mm_stats[MM_LEAF_DIRTY]++;
148549 +               }
148551 +               if (page_memcg_rcu(page) != args->memcg) {
148552 +                       args->mm_stats[MM_LEAF_OTHER_MEMCG]++;
148553 +                       continue;
148554 +               }
148556 +               old_gen = page_update_gen(page, new_gen);
148557 +               if (old_gen >= 0 && old_gen != new_gen)
148558 +                       update_batch_size(page, old_gen, new_gen, args);
148559 +               args->mm_stats[MM_LEAF_YOUNG]++;
148560 +       }
148562 +       if (i < PTRS_PER_PTE && get_next_interval(walk, PMD_MASK, PAGE_SIZE, &start, &end))
148563 +               goto restart;
148565 +       arch_leave_lazy_mmu_mode();
148566 +       pte_unmap_unlock(pte, ptl);
148568 +       return !remote;
148571 +static bool walk_pmd_range_unlocked(pud_t *pud, unsigned long start, unsigned long end,
148572 +                                   struct mm_walk *walk)
148574 +       int i;
148575 +       pmd_t *pmd;
148576 +       unsigned long next;
148577 +       int young = 0;
148578 +       struct mm_walk_args *args = walk->private;
148580 +       VM_BUG_ON(pud_leaf(*pud));
148582 +       pmd = pmd_offset(pud, start & PUD_MASK);
148583 +restart:
148584 +       for (i = pmd_index(start); start != end; i++, start = next) {
148585 +               pmd_t val = pmd_read_atomic(pmd + i);
148587 +               next = pmd_addr_end(start, end);
148589 +               barrier();
148590 +               if (!pmd_present(val) || is_huge_zero_pmd(val)) {
148591 +                       args->mm_stats[MM_LEAF_HOLE]++;
148592 +                       continue;
148593 +               }
148595 +               if (pmd_trans_huge(val)) {
148596 +                       unsigned long pfn = pmd_pfn(val);
148598 +                       if (!pmd_young(val)) {
148599 +                               args->mm_stats[MM_LEAF_OLD]++;
148600 +                               continue;
148601 +                       }
148603 +                       if (pfn < args->start_pfn || pfn >= args->end_pfn) {
148604 +                               args->mm_stats[MM_LEAF_OTHER_NODE]++;
148605 +                               continue;
148606 +                       }
148608 +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
148609 +                       young++;
148610 +                       __set_bit(i, args->bitmap);
148611 +#endif
148612 +                       continue;
148613 +               }
148615 +#ifdef CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG
148616 +               if (!pmd_young(val)) {
148617 +                       args->mm_stats[MM_NONLEAF_OLD]++;
148618 +                       continue;
148619 +               }
148620 +#endif
148622 +               if (walk_pte_range(&val, start, next, walk)) {
148623 +#ifdef CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG
148624 +                       young++;
148625 +                       __set_bit(i, args->bitmap);
148626 +#endif
148627 +               }
148628 +       }
148630 +       if (i < PTRS_PER_PMD && get_next_interval(walk, PUD_MASK, PMD_SIZE, &start, &end))
148631 +               goto restart;
148633 +       return young;
148636 +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG)
148637 +static void walk_pmd_range_locked(pud_t *pud, unsigned long start, unsigned long end,
148638 +                                 struct mm_walk *walk)
148640 +       int i;
148641 +       pmd_t *pmd;
148642 +       spinlock_t *ptl;
148643 +       struct mm_walk_args *args = walk->private;
148644 +       int old_gen, new_gen = lru_gen_from_seq(args->max_seq);
148646 +       VM_BUG_ON(pud_leaf(*pud));
148648 +       start &= PUD_MASK;
148649 +       pmd = pmd_offset(pud, start);
148650 +       ptl = pmd_lock(walk->mm, pmd);
148651 +       arch_enter_lazy_mmu_mode();
148653 +       for_each_set_bit(i, args->bitmap, PTRS_PER_PMD) {
148654 +               struct page *page;
148655 +               unsigned long pfn = pmd_pfn(pmd[i]);
148656 +               unsigned long addr = start + PMD_SIZE * i;
148658 +               if (!pmd_present(pmd[i]) || is_huge_zero_pmd(pmd[i])) {
148659 +                       args->mm_stats[MM_LEAF_HOLE]++;
148660 +                       continue;
148661 +               }
148663 +               if (WARN_ON_ONCE(pmd_devmap(pmd[i])))
148664 +                       continue;
148666 +               if (!pmd_young(pmd[i])) {
148667 +                       args->mm_stats[MM_LEAF_OLD]++;
148668 +                       continue;
148669 +               }
148671 +               if (!pmd_trans_huge(pmd[i])) {
148672 +#ifdef CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG
148673 +                       args->mm_stats[MM_NONLEAF_YOUNG]++;
148674 +                       pmdp_test_and_clear_young(walk->vma, addr, pmd + i);
148675 +#endif
148676 +                       continue;
148677 +               }
148679 +               if (pfn < args->start_pfn || pfn >= args->end_pfn) {
148680 +                       args->mm_stats[MM_LEAF_OTHER_NODE]++;
148681 +                       continue;
148682 +               }
148684 +               page = pfn_to_page(pfn);
148685 +               VM_BUG_ON_PAGE(PageTail(page), page);
148686 +               if (page_to_nid(page) != args->node_id) {
148687 +                       args->mm_stats[MM_LEAF_OTHER_NODE]++;
148688 +                       continue;
148689 +               }
148691 +               if (!pmdp_test_and_clear_young(walk->vma, addr, pmd + i))
148692 +                       continue;
148694 +               if (pmd_dirty(pmd[i]) && !PageDirty(page) &&
148695 +                   !(PageAnon(page) && PageSwapBacked(page) && !PageSwapCache(page))) {
148696 +                       set_page_dirty(page);
148697 +                       args->mm_stats[MM_LEAF_DIRTY]++;
148698 +               }
148700 +               if (page_memcg_rcu(page) != args->memcg) {
148701 +                       args->mm_stats[MM_LEAF_OTHER_MEMCG]++;
148702 +                       continue;
148703 +               }
148705 +               old_gen = page_update_gen(page, new_gen);
148706 +               if (old_gen >= 0 && old_gen != new_gen)
148707 +                       update_batch_size(page, old_gen, new_gen, args);
148708 +               args->mm_stats[MM_LEAF_YOUNG]++;
148709 +       }
148711 +       arch_leave_lazy_mmu_mode();
148712 +       spin_unlock(ptl);
148714 +       memset(args->bitmap, 0, sizeof(args->bitmap));
148716 +#else
148717 +static void walk_pmd_range_locked(pud_t *pud, unsigned long start, unsigned long end,
148718 +                                 struct mm_walk *walk)
148721 +#endif
148723 +static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end,
148724 +                         struct mm_walk *walk)
148726 +       int i;
148727 +       pud_t *pud;
148728 +       unsigned long next;
148729 +       struct mm_walk_args *args = walk->private;
148731 +       VM_BUG_ON(p4d_leaf(*p4d));
148733 +       pud = pud_offset(p4d, start & P4D_MASK);
148734 +restart:
148735 +       for (i = pud_index(start); start != end; i++, start = next) {
148736 +               pud_t val = READ_ONCE(pud[i]);
148738 +               next = pud_addr_end(start, end);
148740 +               if (!pud_present(val) || WARN_ON_ONCE(pud_leaf(val)))
148741 +                       continue;
148743 +               if (walk_pmd_range_unlocked(&val, start, next, walk))
148744 +                       walk_pmd_range_locked(&val, start, next, walk);
148746 +               if (args->batch_size >= MAX_BATCH_SIZE) {
148747 +                       end = (start | ~PUD_MASK) + 1;
148748 +                       goto done;
148749 +               }
148750 +       }
148752 +       if (i < PTRS_PER_PUD && get_next_interval(walk, P4D_MASK, PUD_SIZE, &start, &end))
148753 +               goto restart;
148755 +       end = round_up(end, P4D_SIZE);
148756 +done:
148757 +       /* rounded-up boundaries can wrap to 0 */
148758 +       args->next_addr = end && walk->vma ? max(end, walk->vma->vm_start) : 0;
148760 +       return -EAGAIN;
148763 +static void walk_mm(struct mm_walk_args *args, int swappiness, struct mm_struct *mm)
148765 +       static const struct mm_walk_ops mm_walk_ops = {
148766 +               .test_walk = should_skip_vma,
148767 +               .p4d_entry = walk_pud_range,
148768 +       };
148770 +       int err;
148771 +       int file;
148772 +       int nid = args->node_id;
148773 +       struct mem_cgroup *memcg = args->memcg;
148774 +       struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
148776 +       args->next_addr = FIRST_USER_ADDRESS;
148777 +       for (file = !swappiness; file < ANON_AND_FILE; file++)
148778 +               args->should_walk[file] = lru_gen_mm_is_active(mm) ||
148779 +                                         node_isset(nid, mm->lrugen.nodes[file]);
148781 +       do {
148782 +               unsigned long start = args->next_addr;
148783 +               unsigned long end = mm->highest_vm_end;
148785 +               err = -EBUSY;
148787 +               preempt_disable();
148788 +               rcu_read_lock();
148790 +#ifdef CONFIG_MEMCG
148791 +               if (memcg && atomic_read(&memcg->moving_account)) {
148792 +                       args->mm_stats[MM_LOCK_CONTENTION]++;
148793 +                       goto contended;
148794 +               }
148795 +#endif
148796 +               if (!mmap_read_trylock(mm)) {
148797 +                       args->mm_stats[MM_LOCK_CONTENTION]++;
148798 +                       goto contended;
148799 +               }
148801 +               err = walk_page_range(mm, start, end, &mm_walk_ops, args);
148803 +               mmap_read_unlock(mm);
148805 +               if (args->batch_size)
148806 +                       reset_batch_size(lruvec, args);
148807 +contended:
148808 +               rcu_read_unlock();
148809 +               preempt_enable();
148811 +               cond_resched();
148812 +       } while (err == -EAGAIN && args->next_addr &&
148813 +                !mm_is_oom_victim(mm) && !mm_has_migrated(mm, memcg));
148815 +       if (err == -EBUSY)
148816 +               return;
148818 +       for (file = !swappiness; file < ANON_AND_FILE; file++) {
148819 +               if (args->should_walk[file])
148820 +                       node_clear(nid, mm->lrugen.nodes[file]);
148821 +       }
148824 +static void page_inc_gen(struct page *page, struct lruvec *lruvec, bool front)
148826 +       int old_gen, new_gen;
148827 +       unsigned long old_flags, new_flags;
148828 +       int file = page_is_file_lru(page);
148829 +       int zone = page_zonenum(page);
148830 +       struct lrugen *lrugen = &lruvec->evictable;
148832 +       old_gen = lru_gen_from_seq(lrugen->min_seq[file]);
148834 +       do {
148835 +               old_flags = READ_ONCE(page->flags);
148836 +               new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
148837 +               VM_BUG_ON_PAGE(new_gen < 0, page);
148838 +               if (new_gen >= 0 && new_gen != old_gen)
148839 +                       goto sort;
148841 +               new_gen = (old_gen + 1) % MAX_NR_GENS;
148842 +               new_flags = (old_flags & ~(LRU_GEN_MASK | LRU_USAGE_MASK | LRU_TIER_FLAGS)) |
148843 +                           ((new_gen + 1UL) << LRU_GEN_PGOFF);
148844 +               /* mark the page for reclaim if it's pending writeback */
148845 +               if (front)
148846 +                       new_flags |= BIT(PG_reclaim);
148847 +       } while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
148849 +       lru_gen_update_size(page, lruvec, old_gen, new_gen);
148850 +sort:
148851 +       if (front)
148852 +               list_move(&page->lru, &lrugen->lists[new_gen][file][zone]);
148853 +       else
148854 +               list_move_tail(&page->lru, &lrugen->lists[new_gen][file][zone]);
148857 +static bool try_inc_min_seq(struct lruvec *lruvec, int file)
148859 +       int gen, zone;
148860 +       bool success = false;
148861 +       struct lrugen *lrugen = &lruvec->evictable;
148863 +       VM_BUG_ON(!seq_is_valid(lruvec));
148865 +       while (get_nr_gens(lruvec, file) > MIN_NR_GENS) {
148866 +               gen = lru_gen_from_seq(lrugen->min_seq[file]);
148868 +               for (zone = 0; zone < MAX_NR_ZONES; zone++) {
148869 +                       if (!list_empty(&lrugen->lists[gen][file][zone]))
148870 +                               return success;
148871 +               }
148873 +               reset_controller_pos(lruvec, gen, file);
148874 +               WRITE_ONCE(lrugen->min_seq[file], lrugen->min_seq[file] + 1);
148876 +               success = true;
148877 +       }
148879 +       return success;
148882 +static bool inc_min_seq(struct lruvec *lruvec, int file)
148884 +       int gen, zone;
148885 +       int batch_size = 0;
148886 +       struct lrugen *lrugen = &lruvec->evictable;
148888 +       VM_BUG_ON(!seq_is_valid(lruvec));
148890 +       if (get_nr_gens(lruvec, file) != MAX_NR_GENS)
148891 +               return true;
148893 +       gen = lru_gen_from_seq(lrugen->min_seq[file]);
148895 +       for (zone = 0; zone < MAX_NR_ZONES; zone++) {
148896 +               struct list_head *head = &lrugen->lists[gen][file][zone];
148898 +               while (!list_empty(head)) {
148899 +                       struct page *page = lru_to_page(head);
148901 +                       VM_BUG_ON_PAGE(PageTail(page), page);
148902 +                       VM_BUG_ON_PAGE(PageUnevictable(page), page);
148903 +                       VM_BUG_ON_PAGE(PageActive(page), page);
148904 +                       VM_BUG_ON_PAGE(page_is_file_lru(page) != file, page);
148905 +                       VM_BUG_ON_PAGE(page_zonenum(page) != zone, page);
148907 +                       prefetchw_prev_lru_page(page, head, flags);
148909 +                       page_inc_gen(page, lruvec, false);
148911 +                       if (++batch_size == MAX_BATCH_SIZE)
148912 +                               return false;
148913 +               }
148915 +               VM_BUG_ON(lrugen->sizes[gen][file][zone]);
148916 +       }
148918 +       reset_controller_pos(lruvec, gen, file);
148919 +       WRITE_ONCE(lrugen->min_seq[file], lrugen->min_seq[file] + 1);
148921 +       return true;
148924 +static void inc_max_seq(struct lruvec *lruvec)
148926 +       int gen, file, zone;
148927 +       struct lrugen *lrugen = &lruvec->evictable;
148929 +       spin_lock_irq(&lruvec->lru_lock);
148931 +       VM_BUG_ON(!seq_is_valid(lruvec));
148933 +       for (file = 0; file < ANON_AND_FILE; file++) {
148934 +               if (try_inc_min_seq(lruvec, file))
148935 +                       continue;
148937 +               while (!inc_min_seq(lruvec, file)) {
148938 +                       spin_unlock_irq(&lruvec->lru_lock);
148939 +                       cond_resched();
148940 +                       spin_lock_irq(&lruvec->lru_lock);
148941 +               }
148942 +       }
148944 +       gen = lru_gen_from_seq(lrugen->max_seq - 1);
148945 +       for_each_type_zone(file, zone) {
148946 +               enum lru_list lru = LRU_FILE * file;
148947 +               long total = lrugen->sizes[gen][file][zone];
148949 +               if (!total)
148950 +                       continue;
148952 +               WARN_ON_ONCE(total != (int)total);
148954 +               update_lru_size(lruvec, lru, zone, total);
148955 +               update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -total);
148956 +       }
148958 +       gen = lru_gen_from_seq(lrugen->max_seq + 1);
148959 +       for_each_type_zone(file, zone) {
148960 +               VM_BUG_ON(lrugen->sizes[gen][file][zone]);
148961 +               VM_BUG_ON(!list_empty(&lrugen->lists[gen][file][zone]));
148962 +       }
148964 +       for (file = 0; file < ANON_AND_FILE; file++)
148965 +               reset_controller_pos(lruvec, gen, file);
148967 +       WRITE_ONCE(lrugen->timestamps[gen], jiffies);
148968 +       /* make sure all preceding modifications appear first */
148969 +       smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1);
148971 +       spin_unlock_irq(&lruvec->lru_lock);
148974 +/* Main function used by foreground, background and user-triggered aging. */
148975 +static bool walk_mm_list(struct lruvec *lruvec, unsigned long max_seq,
148976 +                        struct scan_control *sc, int swappiness, struct mm_walk_args *args)
148978 +       bool last;
148979 +       bool alloc = !args;
148980 +       struct mm_struct *mm = NULL;
148981 +       struct lrugen *lrugen = &lruvec->evictable;
148982 +       struct pglist_data *pgdat = lruvec_pgdat(lruvec);
148983 +       int nid = pgdat->node_id;
148984 +       struct mem_cgroup *memcg = lruvec_memcg(lruvec);
148985 +       struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
148987 +       VM_BUG_ON(max_seq > READ_ONCE(lrugen->max_seq));
148989 +       /*
148990 +        * For each walk of the mm_struct list of a memcg, we decrement the
148991 +        * priority of its lrugen. For each walk of all memcgs in kswapd, we
148992 +        * increment the priority of every lrugen.
148993 +        *
148994 +        * So if this lrugen has a higher priority (smaller value), it means
148995 +        * other concurrent reclaimers have walked its mm list, and we skip it
148996 +        * for this priority in order to balance the pressure on all memcgs.
148997 +        */
148998 +       if (!mem_cgroup_disabled() && !cgroup_reclaim(sc) &&
148999 +           sc->priority > atomic_read(&lrugen->priority))
149000 +               return false;
149002 +       if (alloc) {
149003 +               args = kvzalloc_node(sizeof(*args), GFP_KERNEL, nid);
149004 +               if (!args)
149005 +                       return false;
149006 +       }
149008 +       args->memcg = memcg;
149009 +       args->max_seq = max_seq;
149010 +       args->start_pfn = pgdat->node_start_pfn;
149011 +       args->end_pfn = pgdat_end_pfn(pgdat);
149012 +       args->node_id = nid;
149014 +       do {
149015 +               last = get_next_mm(args, swappiness, &mm);
149016 +               if (mm)
149017 +                       walk_mm(args, swappiness, mm);
149019 +               cond_resched();
149020 +       } while (mm);
149022 +       if (alloc)
149023 +               kvfree(args);
149025 +       if (!last) {
149026 +               /* foreground aging prefers not to wait unless "necessary" */
149027 +               if (!current_is_kswapd() && sc->priority < DEF_PRIORITY - 2)
149028 +                       wait_event_killable(mm_list->nodes[nid].wait,
149029 +                                           max_seq < READ_ONCE(lrugen->max_seq));
149031 +               return max_seq < READ_ONCE(lrugen->max_seq);
149032 +       }
149034 +       VM_BUG_ON(max_seq != READ_ONCE(lrugen->max_seq));
149036 +       inc_max_seq(lruvec);
149038 +       if (!mem_cgroup_disabled())
149039 +               atomic_add_unless(&lrugen->priority, -1, 0);
149041 +       /* order against inc_max_seq() */
149042 +       smp_mb();
149043 +       /* either we see any waiters or they will see the updated max_seq */
149044 +       if (waitqueue_active(&mm_list->nodes[nid].wait))
149045 +               wake_up_all(&mm_list->nodes[nid].wait);
149047 +       wakeup_flusher_threads(WB_REASON_VMSCAN);
149049 +       return true;
149052 +void lru_gen_scan_around(struct page_vma_mapped_walk *pvmw)
149054 +       pte_t *pte;
149055 +       unsigned long start, end;
149056 +       int old_gen, new_gen;
149057 +       unsigned long flags;
149058 +       struct lruvec *lruvec;
149059 +       struct mem_cgroup *memcg;
149060 +       struct pglist_data *pgdat = page_pgdat(pvmw->page);
149062 +       lockdep_assert_held(pvmw->ptl);
149064 +       start = max(pvmw->address & PMD_MASK, pvmw->vma->vm_start);
149065 +       end = pmd_addr_end(pvmw->address, pvmw->vma->vm_end);
149066 +       pte = pvmw->pte - ((pvmw->address - start) >> PAGE_SHIFT);
149068 +       memcg = lock_page_memcg(pvmw->page);
149069 +       lruvec = lock_page_lruvec_irqsave(pvmw->page, &flags);
149071 +       new_gen = lru_gen_from_seq(lruvec->evictable.max_seq);
149073 +       for (; start != end; pte++, start += PAGE_SIZE) {
149074 +               struct page *page;
149075 +               unsigned long pfn = pte_pfn(*pte);
149077 +               if (!pte_present(*pte) || !pte_young(*pte) || is_zero_pfn(pfn))
149078 +                       continue;
149080 +               if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
149081 +                       continue;
149083 +               page = compound_head(pfn_to_page(pfn));
149084 +               if (page_to_nid(page) != pgdat->node_id)
149085 +                       continue;
149087 +               if (page_memcg_rcu(page) != memcg)
149088 +                       continue;
149089 +               /*
149090 +                * We may be holding many locks. So try to finish as fast as
149091 +                * possible and leave the accessed and the dirty bits to page
149092 +                * table walks.
149093 +                */
149094 +               old_gen = page_update_gen(page, new_gen);
149095 +               if (old_gen >= 0 && old_gen != new_gen)
149096 +                       lru_gen_update_size(page, lruvec, old_gen, new_gen);
149097 +       }
149099 +       unlock_page_lruvec_irqrestore(lruvec, flags);
149100 +       unlock_page_memcg(pvmw->page);
149103 +/******************************************************************************
149104 + *                          the eviction
149105 + ******************************************************************************/
149107 +static bool sort_page(struct page *page, struct lruvec *lruvec, int tier_to_isolate)
149109 +       bool success;
149110 +       int gen = page_lru_gen(page);
149111 +       int file = page_is_file_lru(page);
149112 +       int zone = page_zonenum(page);
149113 +       int tier = lru_tier_from_usage(page_tier_usage(page));
149114 +       struct lrugen *lrugen = &lruvec->evictable;
149116 +       VM_BUG_ON_PAGE(gen == -1, page);
149117 +       VM_BUG_ON_PAGE(tier_to_isolate < 0, page);
149119 +       /* a lazy-free page that has been written into? */
149120 +       if (file && PageDirty(page) && PageAnon(page)) {
149121 +               success = lru_gen_deletion(page, lruvec);
149122 +               VM_BUG_ON_PAGE(!success, page);
149123 +               SetPageSwapBacked(page);
149124 +               add_page_to_lru_list_tail(page, lruvec);
149125 +               return true;
149126 +       }
149128 +       /* page_update_gen() has updated the page? */
149129 +       if (gen != lru_gen_from_seq(lrugen->min_seq[file])) {
149130 +               list_move(&page->lru, &lrugen->lists[gen][file][zone]);
149131 +               return true;
149132 +       }
149134 +       /* activate the page if its tier has a higher refault rate */
149135 +       if (tier_to_isolate < tier) {
149136 +               int sid = sid_from_seq_or_gen(gen);
149138 +               page_inc_gen(page, lruvec, false);
149139 +               WRITE_ONCE(lrugen->activated[sid][file][tier - 1],
149140 +                          lrugen->activated[sid][file][tier - 1] + thp_nr_pages(page));
149141 +               inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file);
149142 +               return true;
149143 +       }
149145 +       /*
149146 +        * A page can't be immediately evicted, and page_inc_gen() will mark it
149147 +        * for reclaim and hopefully writeback will write it soon if it's dirty.
149148 +        */
149149 +       if (PageLocked(page) || PageWriteback(page) || (file && PageDirty(page))) {
149150 +               page_inc_gen(page, lruvec, true);
149151 +               return true;
149152 +       }
149154 +       return false;
149157 +static bool should_skip_page(struct page *page, struct scan_control *sc)
149159 +       if (!sc->may_unmap && page_mapped(page))
149160 +               return true;
149162 +       if (!(sc->may_writepage && (sc->gfp_mask & __GFP_IO)) &&
149163 +           (PageDirty(page) || (PageAnon(page) && !PageSwapCache(page))))
149164 +               return true;
149166 +       if (!get_page_unless_zero(page))
149167 +               return true;
149169 +       if (!TestClearPageLRU(page)) {
149170 +               put_page(page);
149171 +               return true;
149172 +       }
149174 +       return false;
149177 +static void isolate_page(struct page *page, struct lruvec *lruvec)
149179 +       bool success;
149181 +       success = lru_gen_deletion(page, lruvec);
149182 +       VM_BUG_ON_PAGE(!success, page);
149184 +       if (PageActive(page)) {
149185 +               ClearPageActive(page);
149186 +               /* make sure shrink_page_list() rejects this page */
149187 +               SetPageReferenced(page);
149188 +               return;
149189 +       }
149191 +       /* make sure shrink_page_list() doesn't try to write this page */
149192 +       ClearPageReclaim(page);
149193 +       /* make sure shrink_page_list() doesn't reject this page */
149194 +       ClearPageReferenced(page);
149197 +static int scan_lru_gen_pages(struct lruvec *lruvec, struct scan_control *sc,
149198 +                             long *nr_to_scan, int file, int tier,
149199 +                             struct list_head *list)
149201 +       bool success;
149202 +       int gen, zone;
149203 +       enum vm_event_item item;
149204 +       int sorted = 0;
149205 +       int scanned = 0;
149206 +       int isolated = 0;
149207 +       int batch_size = 0;
149208 +       struct lrugen *lrugen = &lruvec->evictable;
149210 +       VM_BUG_ON(!list_empty(list));
149212 +       if (get_nr_gens(lruvec, file) == MIN_NR_GENS)
149213 +               return -ENOENT;
149215 +       gen = lru_gen_from_seq(lrugen->min_seq[file]);
149217 +       for (zone = sc->reclaim_idx; zone >= 0; zone--) {
149218 +               LIST_HEAD(moved);
149219 +               int skipped = 0;
149220 +               struct list_head *head = &lrugen->lists[gen][file][zone];
149222 +               while (!list_empty(head)) {
149223 +                       struct page *page = lru_to_page(head);
149224 +                       int delta = thp_nr_pages(page);
149226 +                       VM_BUG_ON_PAGE(PageTail(page), page);
149227 +                       VM_BUG_ON_PAGE(PageUnevictable(page), page);
149228 +                       VM_BUG_ON_PAGE(PageActive(page), page);
149229 +                       VM_BUG_ON_PAGE(page_is_file_lru(page) != file, page);
149230 +                       VM_BUG_ON_PAGE(page_zonenum(page) != zone, page);
149232 +                       prefetchw_prev_lru_page(page, head, flags);
149234 +                       scanned += delta;
149236 +                       if (sort_page(page, lruvec, tier))
149237 +                               sorted += delta;
149238 +                       else if (should_skip_page(page, sc)) {
149239 +                               list_move(&page->lru, &moved);
149240 +                               skipped += delta;
149241 +                       } else {
149242 +                               isolate_page(page, lruvec);
149243 +                               list_add(&page->lru, list);
149244 +                               isolated += delta;
149245 +                       }
149247 +                       if (scanned >= *nr_to_scan || isolated >= SWAP_CLUSTER_MAX ||
149248 +                           ++batch_size == MAX_BATCH_SIZE)
149249 +                               break;
149250 +               }
149252 +               list_splice(&moved, head);
149253 +               __count_zid_vm_events(PGSCAN_SKIP, zone, skipped);
149255 +               if (scanned >= *nr_to_scan || isolated >= SWAP_CLUSTER_MAX ||
149256 +                   batch_size == MAX_BATCH_SIZE)
149257 +                       break;
149258 +       }
149260 +       success = try_inc_min_seq(lruvec, file);
149262 +       item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
149263 +       if (!cgroup_reclaim(sc))
149264 +               __count_vm_events(item, scanned);
149265 +       __count_memcg_events(lruvec_memcg(lruvec), item, scanned);
149266 +       __count_vm_events(PGSCAN_ANON + file, scanned);
149268 +       *nr_to_scan -= scanned;
149270 +       if (*nr_to_scan <= 0 || success || isolated)
149271 +               return isolated;
149272 +       /*
149273 +        * We may have trouble finding eligible pages due to reclaim_idx,
149274 +        * may_unmap and may_writepage. The following check makes sure we won't
149275 +        * be stuck if we aren't making enough progress.
149276 +        */
149277 +       return batch_size == MAX_BATCH_SIZE && sorted >= SWAP_CLUSTER_MAX ? 0 : -ENOENT;
149280 +static int get_tier_to_isolate(struct lruvec *lruvec, int file)
149282 +       int tier;
149283 +       struct controller_pos sp, pv;
149285 +       /*
149286 +        * Ideally we don't want to evict upper tiers that have higher refault
149287 +        * rates. However, we need to leave some margin for the fluctuation in
149288 +        * refault rates. So we use a larger gain factor to make sure upper
149289 +        * tiers are indeed more active. We choose 2 because the lowest upper
149290 +        * tier would have twice of the refault rate of the base tier, according
149291 +        * to their numbers of accesses.
149292 +        */
149293 +       read_controller_pos(&sp, lruvec, file, 0, 1);
149294 +       for (tier = 1; tier < MAX_NR_TIERS; tier++) {
149295 +               read_controller_pos(&pv, lruvec, file, tier, 2);
149296 +               if (!positive_ctrl_err(&sp, &pv))
149297 +                       break;
149298 +       }
149300 +       return tier - 1;
149303 +static int get_type_to_scan(struct lruvec *lruvec, int swappiness, int *tier_to_isolate)
149305 +       int file, tier;
149306 +       struct controller_pos sp, pv;
149307 +       int gain[ANON_AND_FILE] = { swappiness, 200 - swappiness };
149309 +       /*
149310 +        * Compare the refault rates between the base tiers of anon and file to
149311 +        * determine which type to evict. Also need to compare the refault rates
149312 +        * of the upper tiers of the selected type with that of the base tier to
149313 +        * determine which tier of the selected type to evict.
149314 +        */
149315 +       read_controller_pos(&sp, lruvec, 0, 0, gain[0]);
149316 +       read_controller_pos(&pv, lruvec, 1, 0, gain[1]);
149317 +       file = positive_ctrl_err(&sp, &pv);
149319 +       read_controller_pos(&sp, lruvec, !file, 0, gain[!file]);
149320 +       for (tier = 1; tier < MAX_NR_TIERS; tier++) {
149321 +               read_controller_pos(&pv, lruvec, file, tier, gain[file]);
149322 +               if (!positive_ctrl_err(&sp, &pv))
149323 +                       break;
149324 +       }
149326 +       *tier_to_isolate = tier - 1;
149328 +       return file;
149331 +static int isolate_lru_gen_pages(struct lruvec *lruvec, struct scan_control *sc,
149332 +                                int swappiness, long *nr_to_scan, int *type_to_scan,
149333 +                                struct list_head *list)
149335 +       int i;
149336 +       int file;
149337 +       int isolated;
149338 +       int tier = -1;
149339 +       DEFINE_MAX_SEQ();
149340 +       DEFINE_MIN_SEQ();
149342 +       VM_BUG_ON(!seq_is_valid(lruvec));
149344 +       if (max_nr_gens(max_seq, min_seq, swappiness) == MIN_NR_GENS)
149345 +               return 0;
149346 +       /*
149347 +        * Try to select a type based on generations and swappiness, and if that
149348 +        * fails, fall back to get_type_to_scan(). When anon and file are both
149349 +        * available from the same generation, swappiness 200 is interpreted as
149350 +        * anon first and swappiness 1 is interpreted as file first.
149351 +        */
149352 +       file = !swappiness || min_seq[0] > min_seq[1] ||
149353 +              (min_seq[0] == min_seq[1] && swappiness != 200 &&
149354 +               (swappiness == 1 || get_type_to_scan(lruvec, swappiness, &tier)));
149356 +       if (tier == -1)
149357 +               tier = get_tier_to_isolate(lruvec, file);
149359 +       for (i = !swappiness; i < ANON_AND_FILE; i++) {
149360 +               isolated = scan_lru_gen_pages(lruvec, sc, nr_to_scan, file, tier, list);
149361 +               if (isolated >= 0)
149362 +                       break;
149364 +               file = !file;
149365 +               tier = get_tier_to_isolate(lruvec, file);
149366 +       }
149368 +       if (isolated < 0)
149369 +               isolated = *nr_to_scan = 0;
149371 +       *type_to_scan = file;
149373 +       return isolated;
149376 +/* Main function used by foreground, background and user-triggered eviction. */
149377 +static bool evict_lru_gen_pages(struct lruvec *lruvec, struct scan_control *sc,
149378 +                               int swappiness, long *nr_to_scan)
149380 +       int file;
149381 +       int isolated;
149382 +       int reclaimed;
149383 +       LIST_HEAD(list);
149384 +       struct page *page;
149385 +       enum vm_event_item item;
149386 +       struct reclaim_stat stat;
149387 +       struct pglist_data *pgdat = lruvec_pgdat(lruvec);
149389 +       spin_lock_irq(&lruvec->lru_lock);
149391 +       isolated = isolate_lru_gen_pages(lruvec, sc, swappiness, nr_to_scan, &file, &list);
149392 +       VM_BUG_ON(list_empty(&list) == !!isolated);
149394 +       if (isolated)
149395 +               __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, isolated);
149397 +       spin_unlock_irq(&lruvec->lru_lock);
149399 +       if (!isolated)
149400 +               goto done;
149402 +       reclaimed = shrink_page_list(&list, pgdat, sc, &stat, false);
149403 +       /*
149404 +        * We need to prevent rejected pages from being added back to the same
149405 +        * lists they were isolated from. Otherwise we may risk looping on them
149406 +        * forever. We use PageActive() or !PageReferenced() && PageWorkingset()
149407 +        * to tell lru_gen_addition() not to add them to the oldest generation.
149408 +        */
149409 +       list_for_each_entry(page, &list, lru) {
149410 +               if (PageMlocked(page))
149411 +                       continue;
149413 +               if (PageReferenced(page)) {
149414 +                       SetPageActive(page);
149415 +                       ClearPageReferenced(page);
149416 +               } else {
149417 +                       ClearPageActive(page);
149418 +                       SetPageWorkingset(page);
149419 +               }
149420 +       }
149422 +       spin_lock_irq(&lruvec->lru_lock);
149424 +       move_pages_to_lru(lruvec, &list);
149426 +       __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -isolated);
149428 +       item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
149429 +       if (!cgroup_reclaim(sc))
149430 +               __count_vm_events(item, reclaimed);
149431 +       __count_memcg_events(lruvec_memcg(lruvec), item, reclaimed);
149432 +       __count_vm_events(PGSTEAL_ANON + file, reclaimed);
149434 +       spin_unlock_irq(&lruvec->lru_lock);
149436 +       mem_cgroup_uncharge_list(&list);
149437 +       free_unref_page_list(&list);
149439 +       sc->nr_reclaimed += reclaimed;
149440 +done:
149441 +       return *nr_to_scan > 0 && sc->nr_reclaimed < sc->nr_to_reclaim;
149444 +/******************************************************************************
149445 + *                          page reclaim
149446 + ******************************************************************************/
149448 +static int get_swappiness(struct lruvec *lruvec)
149450 +       struct mem_cgroup *memcg = lruvec_memcg(lruvec);
149451 +       int swappiness = mem_cgroup_get_nr_swap_pages(memcg) >= (long)SWAP_CLUSTER_MAX ?
149452 +                        mem_cgroup_swappiness(memcg) : 0;
149454 +       VM_BUG_ON(swappiness > 200U);
149456 +       return swappiness;
149459 +static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc,
149460 +                                   int swappiness)
149462 +       int gen, file, zone;
149463 +       long nr_to_scan = 0;
149464 +       struct lrugen *lrugen = &lruvec->evictable;
149465 +       DEFINE_MAX_SEQ();
149466 +       DEFINE_MIN_SEQ();
149468 +       lru_add_drain();
149470 +       for (file = !swappiness; file < ANON_AND_FILE; file++) {
149471 +               unsigned long seq;
149473 +               for (seq = min_seq[file]; seq <= max_seq; seq++) {
149474 +                       gen = lru_gen_from_seq(seq);
149476 +                       for (zone = 0; zone <= sc->reclaim_idx; zone++)
149477 +                               nr_to_scan += READ_ONCE(lrugen->sizes[gen][file][zone]);
149478 +               }
149479 +       }
149481 +       nr_to_scan = max(nr_to_scan, 0L);
149482 +       nr_to_scan = round_up(nr_to_scan >> sc->priority, SWAP_CLUSTER_MAX);
149484 +       if (max_nr_gens(max_seq, min_seq, swappiness) > MIN_NR_GENS)
149485 +               return nr_to_scan;
149487 +       /* kswapd uses age_lru_gens() */
149488 +       if (current_is_kswapd())
149489 +               return 0;
149491 +       return walk_mm_list(lruvec, max_seq, sc, swappiness, NULL) ? nr_to_scan : 0;
149494 +static void shrink_lru_gens(struct lruvec *lruvec, struct scan_control *sc)
149496 +       struct blk_plug plug;
149497 +       unsigned long scanned = 0;
149498 +       struct mem_cgroup *memcg = lruvec_memcg(lruvec);
149500 +       blk_start_plug(&plug);
149502 +       while (true) {
149503 +               long nr_to_scan;
149504 +               int swappiness = sc->may_swap ? get_swappiness(lruvec) : 0;
149506 +               nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness) - scanned;
149507 +               if (nr_to_scan < (long)SWAP_CLUSTER_MAX)
149508 +                       break;
149510 +               scanned += nr_to_scan;
149512 +               if (!evict_lru_gen_pages(lruvec, sc, swappiness, &nr_to_scan))
149513 +                       break;
149515 +               scanned -= nr_to_scan;
149517 +               if (mem_cgroup_below_min(memcg) ||
149518 +                   (mem_cgroup_below_low(memcg) && !sc->memcg_low_reclaim))
149519 +                       break;
149521 +               cond_resched();
149522 +       }
149524 +       blk_finish_plug(&plug);
149527 +/******************************************************************************
149528 + *                          the background aging
149529 + ******************************************************************************/
149531 +static int lru_gen_spread = MIN_NR_GENS;
149533 +static void try_walk_mm_list(struct lruvec *lruvec, struct scan_control *sc)
149535 +       int gen, file, zone;
149536 +       long old_and_young[2] = {};
149537 +       struct mm_walk_args args = {};
149538 +       int spread = READ_ONCE(lru_gen_spread);
149539 +       int swappiness = get_swappiness(lruvec);
149540 +       struct lrugen *lrugen = &lruvec->evictable;
149541 +       DEFINE_MAX_SEQ();
149542 +       DEFINE_MIN_SEQ();
149544 +       lru_add_drain();
149546 +       for (file = !swappiness; file < ANON_AND_FILE; file++) {
149547 +               unsigned long seq;
149549 +               for (seq = min_seq[file]; seq <= max_seq; seq++) {
149550 +                       gen = lru_gen_from_seq(seq);
149552 +                       for (zone = 0; zone < MAX_NR_ZONES; zone++)
149553 +                               old_and_young[seq == max_seq] +=
149554 +                                       READ_ONCE(lrugen->sizes[gen][file][zone]);
149555 +               }
149556 +       }
149558 +       old_and_young[0] = max(old_and_young[0], 0L);
149559 +       old_and_young[1] = max(old_and_young[1], 0L);
149561 +       if (old_and_young[0] + old_and_young[1] < SWAP_CLUSTER_MAX)
149562 +               return;
149564 +       /* try to spread pages out across spread+1 generations */
149565 +       if (old_and_young[0] >= old_and_young[1] * spread &&
149566 +           min_nr_gens(max_seq, min_seq, swappiness) > max(spread, MIN_NR_GENS))
149567 +               return;
149569 +       walk_mm_list(lruvec, max_seq, sc, swappiness, &args);
149572 +static void age_lru_gens(struct pglist_data *pgdat, struct scan_control *sc)
149574 +       struct mem_cgroup *memcg;
149576 +       VM_BUG_ON(!current_is_kswapd());
149578 +       memcg = mem_cgroup_iter(NULL, NULL, NULL);
149579 +       do {
149580 +               struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
149581 +               struct lrugen *lrugen = &lruvec->evictable;
149583 +               if (!mem_cgroup_below_min(memcg) &&
149584 +                   (!mem_cgroup_below_low(memcg) || sc->memcg_low_reclaim))
149585 +                       try_walk_mm_list(lruvec, sc);
149587 +               if (!mem_cgroup_disabled())
149588 +                       atomic_add_unless(&lrugen->priority, 1, DEF_PRIORITY);
149590 +               cond_resched();
149591 +       } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
149594 +/******************************************************************************
149595 + *                          state change
149596 + ******************************************************************************/
149598 +#ifdef CONFIG_LRU_GEN_ENABLED
149599 +DEFINE_STATIC_KEY_TRUE(lru_gen_static_key);
149600 +#else
149601 +DEFINE_STATIC_KEY_FALSE(lru_gen_static_key);
149602 +#endif
149604 +static DEFINE_MUTEX(lru_gen_state_mutex);
149605 +static int lru_gen_nr_swapfiles __read_mostly;
149607 +static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
149609 +       int gen, file, zone;
149610 +       enum lru_list lru;
149611 +       struct lrugen *lrugen = &lruvec->evictable;
149613 +       for_each_evictable_lru(lru) {
149614 +               file = is_file_lru(lru);
149616 +               if (lrugen->enabled[file] && !list_empty(&lruvec->lists[lru]))
149617 +                       return false;
149618 +       }
149620 +       for_each_gen_type_zone(gen, file, zone) {
149621 +               if (!lrugen->enabled[file] && !list_empty(&lrugen->lists[gen][file][zone]))
149622 +                       return false;
149624 +               VM_WARN_ONCE(!lrugen->enabled[file] && lrugen->sizes[gen][file][zone],
149625 +                            "lru_gen: possible unbalanced number of pages");
149626 +       }
149628 +       return true;
149631 +static bool fill_lru_gen_lists(struct lruvec *lruvec)
149633 +       enum lru_list lru;
149634 +       int batch_size = 0;
149636 +       for_each_evictable_lru(lru) {
149637 +               int file = is_file_lru(lru);
149638 +               bool active = is_active_lru(lru);
149639 +               struct list_head *head = &lruvec->lists[lru];
149641 +               if (!lruvec->evictable.enabled[file])
149642 +                       continue;
149644 +               while (!list_empty(head)) {
149645 +                       bool success;
149646 +                       struct page *page = lru_to_page(head);
149648 +                       VM_BUG_ON_PAGE(PageTail(page), page);
149649 +                       VM_BUG_ON_PAGE(PageUnevictable(page), page);
149650 +                       VM_BUG_ON_PAGE(PageActive(page) != active, page);
149651 +                       VM_BUG_ON_PAGE(page_lru_gen(page) != -1, page);
149652 +                       VM_BUG_ON_PAGE(page_is_file_lru(page) != file, page);
149654 +                       prefetchw_prev_lru_page(page, head, flags);
149656 +                       del_page_from_lru_list(page, lruvec);
149657 +                       success = lru_gen_addition(page, lruvec, true);
149658 +                       VM_BUG_ON(!success);
149660 +                       if (++batch_size == MAX_BATCH_SIZE)
149661 +                               return false;
149662 +               }
149663 +       }
149665 +       return true;
149668 +static bool drain_lru_gen_lists(struct lruvec *lruvec)
149670 +       int gen, file, zone;
149671 +       int batch_size = 0;
149673 +       for_each_gen_type_zone(gen, file, zone) {
149674 +               struct list_head *head = &lruvec->evictable.lists[gen][file][zone];
149676 +               if (lruvec->evictable.enabled[file])
149677 +                       continue;
149679 +               while (!list_empty(head)) {
149680 +                       bool success;
149681 +                       struct page *page = lru_to_page(head);
149683 +                       VM_BUG_ON_PAGE(PageTail(page), page);
149684 +                       VM_BUG_ON_PAGE(PageUnevictable(page), page);
149685 +                       VM_BUG_ON_PAGE(PageActive(page), page);
149686 +                       VM_BUG_ON_PAGE(page_is_file_lru(page) != file, page);
149687 +                       VM_BUG_ON_PAGE(page_zonenum(page) != zone, page);
149689 +                       prefetchw_prev_lru_page(page, head, flags);
149691 +                       success = lru_gen_deletion(page, lruvec);
149692 +                       VM_BUG_ON(!success);
149693 +                       add_page_to_lru_list(page, lruvec);
149695 +                       if (++batch_size == MAX_BATCH_SIZE)
149696 +                               return false;
149697 +               }
149698 +       }
149700 +       return true;
149704 + * For file page tracking, we enable/disable it according to the main switch.
149705 + * For anon page tracking, we only enabled it when the main switch is on and
149706 + * there is at least one swapfile; we disable it when there are no swapfiles
149707 + * regardless of the value of the main switch. Otherwise, we will eventually
149708 + * reach the max size of the sliding window and have to call inc_min_seq(),
149709 + * which brings an unnecessary overhead.
149710 + */
149711 +void lru_gen_set_state(bool enable, bool main, bool swap)
149713 +       struct mem_cgroup *memcg;
149715 +       mem_hotplug_begin();
149716 +       mutex_lock(&lru_gen_state_mutex);
149717 +       cgroup_lock();
149719 +       main = main && enable != lru_gen_enabled();
149720 +       swap = swap && !(enable ? lru_gen_nr_swapfiles++ : --lru_gen_nr_swapfiles);
149721 +       swap = swap && lru_gen_enabled();
149722 +       if (!main && !swap)
149723 +               goto unlock;
149725 +       if (main) {
149726 +               if (enable)
149727 +                       static_branch_enable(&lru_gen_static_key);
149728 +               else
149729 +                       static_branch_disable(&lru_gen_static_key);
149730 +       }
149732 +       memcg = mem_cgroup_iter(NULL, NULL, NULL);
149733 +       do {
149734 +               int nid;
149736 +               for_each_node_state(nid, N_MEMORY) {
149737 +                       struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
149738 +                       struct lrugen *lrugen = &lruvec->evictable;
149740 +                       spin_lock_irq(&lruvec->lru_lock);
149742 +                       VM_BUG_ON(!seq_is_valid(lruvec));
149743 +                       VM_BUG_ON(!state_is_valid(lruvec));
149745 +                       WRITE_ONCE(lrugen->enabled[0], lru_gen_enabled() && lru_gen_nr_swapfiles);
149746 +                       WRITE_ONCE(lrugen->enabled[1], lru_gen_enabled());
149748 +                       while (!(enable ? fill_lru_gen_lists(lruvec) :
149749 +                                         drain_lru_gen_lists(lruvec))) {
149750 +                               spin_unlock_irq(&lruvec->lru_lock);
149751 +                               cond_resched();
149752 +                               spin_lock_irq(&lruvec->lru_lock);
149753 +                       }
149755 +                       spin_unlock_irq(&lruvec->lru_lock);
149756 +               }
149758 +               cond_resched();
149759 +       } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
149760 +unlock:
149761 +       cgroup_unlock();
149762 +       mutex_unlock(&lru_gen_state_mutex);
149763 +       mem_hotplug_done();
149766 +static int __meminit __maybe_unused lru_gen_online_mem(struct notifier_block *self,
149767 +                                                      unsigned long action, void *arg)
149769 +       struct mem_cgroup *memcg;
149770 +       struct memory_notify *mnb = arg;
149771 +       int nid = mnb->status_change_nid;
149773 +       if (action != MEM_GOING_ONLINE || nid == NUMA_NO_NODE)
149774 +               return NOTIFY_DONE;
149776 +       mutex_lock(&lru_gen_state_mutex);
149777 +       cgroup_lock();
149779 +       memcg = mem_cgroup_iter(NULL, NULL, NULL);
149780 +       do {
149781 +               struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
149782 +               struct lrugen *lrugen = &lruvec->evictable;
149784 +               VM_BUG_ON(!seq_is_valid(lruvec));
149785 +               VM_BUG_ON(!state_is_valid(lruvec));
149787 +               WRITE_ONCE(lrugen->enabled[0], lru_gen_enabled() && lru_gen_nr_swapfiles);
149788 +               WRITE_ONCE(lrugen->enabled[1], lru_gen_enabled());
149789 +       } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
149791 +       cgroup_unlock();
149792 +       mutex_unlock(&lru_gen_state_mutex);
149794 +       return NOTIFY_DONE;
149797 +/******************************************************************************
149798 + *                          sysfs interface
149799 + ******************************************************************************/
149801 +static ssize_t show_lru_gen_spread(struct kobject *kobj, struct kobj_attribute *attr,
149802 +                                  char *buf)
149804 +       return sprintf(buf, "%d\n", READ_ONCE(lru_gen_spread));
149807 +static ssize_t store_lru_gen_spread(struct kobject *kobj, struct kobj_attribute *attr,
149808 +                                   const char *buf, size_t len)
149810 +       int spread;
149812 +       if (kstrtoint(buf, 10, &spread) || spread >= MAX_NR_GENS)
149813 +               return -EINVAL;
149815 +       WRITE_ONCE(lru_gen_spread, spread);
149817 +       return len;
149820 +static struct kobj_attribute lru_gen_spread_attr = __ATTR(
149821 +       spread, 0644, show_lru_gen_spread, store_lru_gen_spread
149824 +static ssize_t show_lru_gen_enabled(struct kobject *kobj, struct kobj_attribute *attr,
149825 +                                   char *buf)
149827 +       return snprintf(buf, PAGE_SIZE, "%ld\n", lru_gen_enabled());
149830 +static ssize_t store_lru_gen_enabled(struct kobject *kobj, struct kobj_attribute *attr,
149831 +                                    const char *buf, size_t len)
149833 +       int enable;
149835 +       if (kstrtoint(buf, 10, &enable))
149836 +               return -EINVAL;
149838 +       lru_gen_set_state(enable, true, false);
149840 +       return len;
149843 +static struct kobj_attribute lru_gen_enabled_attr = __ATTR(
149844 +       enabled, 0644, show_lru_gen_enabled, store_lru_gen_enabled
149847 +static struct attribute *lru_gen_attrs[] = {
149848 +       &lru_gen_spread_attr.attr,
149849 +       &lru_gen_enabled_attr.attr,
149850 +       NULL
149853 +static struct attribute_group lru_gen_attr_group = {
149854 +       .name = "lru_gen",
149855 +       .attrs = lru_gen_attrs,
149858 +/******************************************************************************
149859 + *                          debugfs interface
149860 + ******************************************************************************/
149862 +static void *lru_gen_seq_start(struct seq_file *m, loff_t *pos)
149864 +       struct mem_cgroup *memcg;
149865 +       loff_t nr_to_skip = *pos;
149867 +       m->private = kzalloc(PATH_MAX, GFP_KERNEL);
149868 +       if (!m->private)
149869 +               return ERR_PTR(-ENOMEM);
149871 +       memcg = mem_cgroup_iter(NULL, NULL, NULL);
149872 +       do {
149873 +               int nid;
149875 +               for_each_node_state(nid, N_MEMORY) {
149876 +                       if (!nr_to_skip--)
149877 +                               return mem_cgroup_lruvec(memcg, NODE_DATA(nid));
149878 +               }
149879 +       } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
149881 +       return NULL;
149884 +static void lru_gen_seq_stop(struct seq_file *m, void *v)
149886 +       if (!IS_ERR_OR_NULL(v))
149887 +               mem_cgroup_iter_break(NULL, lruvec_memcg(v));
149889 +       kfree(m->private);
149890 +       m->private = NULL;
149893 +static void *lru_gen_seq_next(struct seq_file *m, void *v, loff_t *pos)
149895 +       int nid = lruvec_pgdat(v)->node_id;
149896 +       struct mem_cgroup *memcg = lruvec_memcg(v);
149898 +       ++*pos;
149900 +       nid = next_memory_node(nid);
149901 +       if (nid == MAX_NUMNODES) {
149902 +               memcg = mem_cgroup_iter(NULL, memcg, NULL);
149903 +               if (!memcg)
149904 +                       return NULL;
149906 +               nid = first_memory_node;
149907 +       }
149909 +       return mem_cgroup_lruvec(memcg, NODE_DATA(nid));
149912 +static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec,
149913 +                                 unsigned long max_seq, unsigned long *min_seq,
149914 +                                 unsigned long seq)
149916 +       int i;
149917 +       int file, tier;
149918 +       int sid = sid_from_seq_or_gen(seq);
149919 +       struct lrugen *lrugen = &lruvec->evictable;
149920 +       int nid = lruvec_pgdat(lruvec)->node_id;
149921 +       struct mem_cgroup *memcg = lruvec_memcg(lruvec);
149922 +       struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
149924 +       for (tier = 0; tier < MAX_NR_TIERS; tier++) {
149925 +               seq_printf(m, "            %10d", tier);
149926 +               for (file = 0; file < ANON_AND_FILE; file++) {
149927 +                       unsigned long n[3] = {};
149929 +                       if (seq == max_seq) {
149930 +                               n[0] = READ_ONCE(lrugen->avg_refaulted[file][tier]);
149931 +                               n[1] = READ_ONCE(lrugen->avg_total[file][tier]);
149933 +                               seq_printf(m, " %10luR %10luT %10lu ", n[0], n[1], n[2]);
149934 +                       } else if (seq == min_seq[file] || NR_STAT_GENS > 1) {
149935 +                               n[0] = atomic_long_read(&lrugen->refaulted[sid][file][tier]);
149936 +                               n[1] = atomic_long_read(&lrugen->evicted[sid][file][tier]);
149937 +                               if (tier)
149938 +                                       n[2] = READ_ONCE(lrugen->activated[sid][file][tier - 1]);
149940 +                               seq_printf(m, " %10lur %10lue %10lua", n[0], n[1], n[2]);
149941 +                       } else
149942 +                               seq_puts(m, "          0           0           0 ");
149943 +               }
149944 +               seq_putc(m, '\n');
149945 +       }
149947 +       seq_puts(m, "                      ");
149948 +       for (i = 0; i < NR_MM_STATS; i++) {
149949 +               if (seq == max_seq && NR_STAT_GENS == 1)
149950 +                       seq_printf(m, " %10lu%c", READ_ONCE(mm_list->nodes[nid].stats[sid][i]),
149951 +                                  toupper(MM_STAT_CODES[i]));
149952 +               else if (seq != max_seq && NR_STAT_GENS > 1)
149953 +                       seq_printf(m, " %10lu%c", READ_ONCE(mm_list->nodes[nid].stats[sid][i]),
149954 +                                  MM_STAT_CODES[i]);
149955 +               else
149956 +                       seq_puts(m, "          0 ");
149957 +       }
149958 +       seq_putc(m, '\n');
149961 +static int lru_gen_seq_show(struct seq_file *m, void *v)
149963 +       unsigned long seq;
149964 +       bool full = !debugfs_real_fops(m->file)->write;
149965 +       struct lruvec *lruvec = v;
149966 +       struct lrugen *lrugen = &lruvec->evictable;
149967 +       int nid = lruvec_pgdat(lruvec)->node_id;
149968 +       struct mem_cgroup *memcg = lruvec_memcg(lruvec);
149969 +       DEFINE_MAX_SEQ();
149970 +       DEFINE_MIN_SEQ();
149972 +       if (nid == first_memory_node) {
149973 +#ifdef CONFIG_MEMCG
149974 +               if (memcg)
149975 +                       cgroup_path(memcg->css.cgroup, m->private, PATH_MAX);
149976 +#endif
149977 +               seq_printf(m, "memcg %5hu %s\n",
149978 +                          mem_cgroup_id(memcg), (char *)m->private);
149979 +       }
149981 +       seq_printf(m, " node %5d %10d\n", nid, atomic_read(&lrugen->priority));
149983 +       seq = full ? (max_seq < MAX_NR_GENS ? 0 : max_seq - MAX_NR_GENS + 1) :
149984 +                    min(min_seq[0], min_seq[1]);
149986 +       for (; seq <= max_seq; seq++) {
149987 +               int gen, file, zone;
149988 +               unsigned int msecs;
149990 +               gen = lru_gen_from_seq(seq);
149991 +               msecs = jiffies_to_msecs(jiffies - READ_ONCE(lrugen->timestamps[gen]));
149993 +               seq_printf(m, " %10lu %10u", seq, msecs);
149995 +               for (file = 0; file < ANON_AND_FILE; file++) {
149996 +                       long size = 0;
149998 +                       if (seq < min_seq[file]) {
149999 +                               seq_puts(m, "         -0 ");
150000 +                               continue;
150001 +                       }
150003 +                       for (zone = 0; zone < MAX_NR_ZONES; zone++)
150004 +                               size += READ_ONCE(lrugen->sizes[gen][file][zone]);
150006 +                       seq_printf(m, " %10lu ", max(size, 0L));
150007 +               }
150009 +               seq_putc(m, '\n');
150011 +               if (full)
150012 +                       lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq);
150013 +       }
150015 +       return 0;
150018 +static const struct seq_operations lru_gen_seq_ops = {
150019 +       .start = lru_gen_seq_start,
150020 +       .stop = lru_gen_seq_stop,
150021 +       .next = lru_gen_seq_next,
150022 +       .show = lru_gen_seq_show,
150025 +static int advance_max_seq(struct lruvec *lruvec, unsigned long seq, int swappiness)
150027 +       struct mm_walk_args args = {};
150028 +       struct scan_control sc = {
150029 +               .target_mem_cgroup = lruvec_memcg(lruvec),
150030 +       };
150031 +       DEFINE_MAX_SEQ();
150033 +       if (seq == max_seq)
150034 +               walk_mm_list(lruvec, max_seq, &sc, swappiness, &args);
150036 +       return seq > max_seq ? -EINVAL : 0;
150039 +static int advance_min_seq(struct lruvec *lruvec, unsigned long seq, int swappiness,
150040 +                          unsigned long nr_to_reclaim)
150042 +       struct blk_plug plug;
150043 +       int err = -EINTR;
150044 +       long nr_to_scan = LONG_MAX;
150045 +       struct scan_control sc = {
150046 +               .nr_to_reclaim = nr_to_reclaim,
150047 +               .target_mem_cgroup = lruvec_memcg(lruvec),
150048 +               .may_writepage = 1,
150049 +               .may_unmap = 1,
150050 +               .may_swap = 1,
150051 +               .reclaim_idx = MAX_NR_ZONES - 1,
150052 +               .gfp_mask = GFP_KERNEL,
150053 +       };
150054 +       DEFINE_MAX_SEQ();
150056 +       if (seq >= max_seq - 1)
150057 +               return -EINVAL;
150059 +       blk_start_plug(&plug);
150061 +       while (!signal_pending(current)) {
150062 +               DEFINE_MIN_SEQ();
150064 +               if (seq < min(min_seq[!swappiness], min_seq[swappiness < 200]) ||
150065 +                   !evict_lru_gen_pages(lruvec, &sc, swappiness, &nr_to_scan)) {
150066 +                       err = 0;
150067 +                       break;
150068 +               }
150070 +               cond_resched();
150071 +       }
150073 +       blk_finish_plug(&plug);
150075 +       return err;
150078 +static int advance_seq(char cmd, int memcg_id, int nid, unsigned long seq,
150079 +                      int swappiness, unsigned long nr_to_reclaim)
150081 +       struct lruvec *lruvec;
150082 +       int err = -EINVAL;
150083 +       struct mem_cgroup *memcg = NULL;
150085 +       if (!mem_cgroup_disabled()) {
150086 +               rcu_read_lock();
150087 +               memcg = mem_cgroup_from_id(memcg_id);
150088 +#ifdef CONFIG_MEMCG
150089 +               if (memcg && !css_tryget(&memcg->css))
150090 +                       memcg = NULL;
150091 +#endif
150092 +               rcu_read_unlock();
150094 +               if (!memcg)
150095 +                       goto done;
150096 +       }
150097 +       if (memcg_id != mem_cgroup_id(memcg))
150098 +               goto done;
150100 +       if (nid < 0 || nid >= MAX_NUMNODES || !node_state(nid, N_MEMORY))
150101 +               goto done;
150103 +       lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
150105 +       if (swappiness == -1)
150106 +               swappiness = get_swappiness(lruvec);
150107 +       else if (swappiness > 200U)
150108 +               goto done;
150110 +       switch (cmd) {
150111 +       case '+':
150112 +               err = advance_max_seq(lruvec, seq, swappiness);
150113 +               break;
150114 +       case '-':
150115 +               err = advance_min_seq(lruvec, seq, swappiness, nr_to_reclaim);
150116 +               break;
150117 +       }
150118 +done:
150119 +       mem_cgroup_put(memcg);
150121 +       return err;
150124 +static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
150125 +                                size_t len, loff_t *pos)
150127 +       void *buf;
150128 +       char *cur, *next;
150129 +       int err = 0;
150131 +       buf = kvmalloc(len + 1, GFP_USER);
150132 +       if (!buf)
150133 +               return -ENOMEM;
150135 +       if (copy_from_user(buf, src, len)) {
150136 +               kvfree(buf);
150137 +               return -EFAULT;
150138 +       }
150140 +       next = buf;
150141 +       next[len] = '\0';
150143 +       while ((cur = strsep(&next, ",;\n"))) {
150144 +               int n;
150145 +               int end;
150146 +               char cmd;
150147 +               int memcg_id;
150148 +               int nid;
150149 +               unsigned long seq;
150150 +               int swappiness = -1;
150151 +               unsigned long nr_to_reclaim = -1;
150153 +               cur = skip_spaces(cur);
150154 +               if (!*cur)
150155 +                       continue;
150157 +               n = sscanf(cur, "%c %u %u %lu %n %u %n %lu %n", &cmd, &memcg_id, &nid,
150158 +                          &seq, &end, &swappiness, &end, &nr_to_reclaim, &end);
150159 +               if (n < 4 || cur[end]) {
150160 +                       err = -EINVAL;
150161 +                       break;
150162 +               }
150164 +               err = advance_seq(cmd, memcg_id, nid, seq, swappiness, nr_to_reclaim);
150165 +               if (err)
150166 +                       break;
150167 +       }
150169 +       kvfree(buf);
150171 +       return err ? : len;
150174 +static int lru_gen_seq_open(struct inode *inode, struct file *file)
150176 +       return seq_open(file, &lru_gen_seq_ops);
150179 +static const struct file_operations lru_gen_rw_fops = {
150180 +       .open = lru_gen_seq_open,
150181 +       .read = seq_read,
150182 +       .write = lru_gen_seq_write,
150183 +       .llseek = seq_lseek,
150184 +       .release = seq_release,
150187 +static const struct file_operations lru_gen_ro_fops = {
150188 +       .open = lru_gen_seq_open,
150189 +       .read = seq_read,
150190 +       .llseek = seq_lseek,
150191 +       .release = seq_release,
150194 +/******************************************************************************
150195 + *                          initialization
150196 + ******************************************************************************/
150198 +void lru_gen_init_lruvec(struct lruvec *lruvec)
150200 +       int i;
150201 +       int gen, file, zone;
150202 +       struct lrugen *lrugen = &lruvec->evictable;
150204 +       atomic_set(&lrugen->priority, DEF_PRIORITY);
150206 +       lrugen->max_seq = MIN_NR_GENS + 1;
150207 +       lrugen->enabled[0] = lru_gen_enabled() && lru_gen_nr_swapfiles;
150208 +       lrugen->enabled[1] = lru_gen_enabled();
150210 +       for (i = 0; i <= MIN_NR_GENS + 1; i++)
150211 +               lrugen->timestamps[i] = jiffies;
150213 +       for_each_gen_type_zone(gen, file, zone)
150214 +               INIT_LIST_HEAD(&lrugen->lists[gen][file][zone]);
150217 +static int __init init_lru_gen(void)
150219 +       BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS);
150220 +       BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS);
150221 +       BUILD_BUG_ON(sizeof(MM_STAT_CODES) != NR_MM_STATS + 1);
150222 +       BUILD_BUG_ON(PMD_SIZE / PAGE_SIZE != PTRS_PER_PTE);
150223 +       BUILD_BUG_ON(PUD_SIZE / PMD_SIZE != PTRS_PER_PMD);
150224 +       BUILD_BUG_ON(P4D_SIZE / PUD_SIZE != PTRS_PER_PUD);
150226 +       if (mem_cgroup_disabled()) {
150227 +               global_mm_list = alloc_mm_list();
150228 +               if (!global_mm_list) {
150229 +                       pr_err("lru_gen: failed to allocate global mm_struct list\n");
150230 +                       return -ENOMEM;
150231 +               }
150232 +       }
150234 +       if (hotplug_memory_notifier(lru_gen_online_mem, 0))
150235 +               pr_err("lru_gen: failed to subscribe hotplug notifications\n");
150237 +       if (sysfs_create_group(mm_kobj, &lru_gen_attr_group))
150238 +               pr_err("lru_gen: failed to create sysfs group\n");
150240 +       debugfs_create_file("lru_gen", 0644, NULL, NULL, &lru_gen_rw_fops);
150241 +       debugfs_create_file("lru_gen_full", 0444, NULL, NULL, &lru_gen_ro_fops);
150243 +       return 0;
150246 + * We want to run as early as possible because some debug code, e.g.,
150247 + * dma_resv_lockdep(), calls mm_alloc() and mmput(). We only depend on mm_kobj,
150248 + * which is initialized one stage earlier.
150249 + */
150250 +arch_initcall(init_lru_gen);
150252 +#endif /* CONFIG_LRU_GEN */
150253 diff --git a/mm/workingset.c b/mm/workingset.c
150254 index cd39902c1062..df363f9419fc 100644
150255 --- a/mm/workingset.c
150256 +++ b/mm/workingset.c
150257 @@ -168,9 +168,9 @@
150258   * refault distance will immediately activate the refaulting page.
150259   */
150261 -#define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) +  \
150262 -                        1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT)
150263 -#define EVICTION_MASK  (~0UL >> EVICTION_SHIFT)
150264 +#define EVICTION_SHIFT         (BITS_PER_XA_VALUE - MEM_CGROUP_ID_SHIFT - NODES_SHIFT)
150265 +#define EVICTION_MASK          (BIT(EVICTION_SHIFT) - 1)
150266 +#define WORKINGSET_WIDTH       1
150269   * Eviction timestamps need to be able to cover the full range of
150270 @@ -182,38 +182,139 @@
150271   */
150272  static unsigned int bucket_order __read_mostly;
150274 -static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
150275 -                        bool workingset)
150276 +static void *pack_shadow(int memcg_id, struct pglist_data *pgdat, unsigned long val)
150278 -       eviction >>= bucket_order;
150279 -       eviction &= EVICTION_MASK;
150280 -       eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
150281 -       eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
150282 -       eviction = (eviction << 1) | workingset;
150283 +       val = (val << MEM_CGROUP_ID_SHIFT) | memcg_id;
150284 +       val = (val << NODES_SHIFT) | pgdat->node_id;
150286 -       return xa_mk_value(eviction);
150287 +       return xa_mk_value(val);
150290 -static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
150291 -                         unsigned long *evictionp, bool *workingsetp)
150292 +static unsigned long unpack_shadow(void *shadow, int *memcg_id, struct pglist_data **pgdat)
150294 -       unsigned long entry = xa_to_value(shadow);
150295 -       int memcgid, nid;
150296 -       bool workingset;
150297 +       unsigned long val = xa_to_value(shadow);
150299 +       *pgdat = NODE_DATA(val & (BIT(NODES_SHIFT) - 1));
150300 +       val >>= NODES_SHIFT;
150301 +       *memcg_id = val & (BIT(MEM_CGROUP_ID_SHIFT) - 1);
150303 +       return val >> MEM_CGROUP_ID_SHIFT;
150306 +#ifdef CONFIG_LRU_GEN
150308 +#if LRU_GEN_SHIFT + LRU_USAGE_SHIFT >= EVICTION_SHIFT
150309 +#error "Please try smaller NODES_SHIFT, NR_LRU_GENS and TIERS_PER_GEN configurations"
150310 +#endif
150312 +static void page_set_usage(struct page *page, int usage)
150314 +       unsigned long old_flags, new_flags;
150316 +       VM_BUG_ON(usage > BIT(LRU_USAGE_WIDTH));
150318 +       if (!usage)
150319 +               return;
150321 +       do {
150322 +               old_flags = READ_ONCE(page->flags);
150323 +               new_flags = (old_flags & ~LRU_USAGE_MASK) | LRU_TIER_FLAGS |
150324 +                           ((usage - 1UL) << LRU_USAGE_PGOFF);
150325 +               if (old_flags == new_flags)
150326 +                       break;
150327 +       } while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
150330 +/* Return a token to be stored in the shadow entry of a page being evicted. */
150331 +static void *lru_gen_eviction(struct page *page)
150333 +       int sid, tier;
150334 +       unsigned long token;
150335 +       unsigned long min_seq;
150336 +       struct lruvec *lruvec;
150337 +       struct lrugen *lrugen;
150338 +       int file = page_is_file_lru(page);
150339 +       int usage = page_tier_usage(page);
150340 +       struct mem_cgroup *memcg = page_memcg(page);
150341 +       struct pglist_data *pgdat = page_pgdat(page);
150343 +       if (!lru_gen_enabled())
150344 +               return NULL;
150346 +       lruvec = mem_cgroup_lruvec(memcg, pgdat);
150347 +       lrugen = &lruvec->evictable;
150348 +       min_seq = READ_ONCE(lrugen->min_seq[file]);
150349 +       token = (min_seq << LRU_USAGE_SHIFT) | usage;
150351 +       sid = sid_from_seq_or_gen(min_seq);
150352 +       tier = lru_tier_from_usage(usage);
150353 +       atomic_long_add(thp_nr_pages(page), &lrugen->evicted[sid][file][tier]);
150355 +       return pack_shadow(mem_cgroup_id(memcg), pgdat, token);
150358 +/* Account a refaulted page based on the token stored in its shadow entry. */
150359 +static bool lru_gen_refault(struct page *page, void *shadow)
150361 +       int sid, tier, usage;
150362 +       int memcg_id;
150363 +       unsigned long token;
150364 +       unsigned long min_seq;
150365 +       struct lruvec *lruvec;
150366 +       struct lrugen *lrugen;
150367 +       struct pglist_data *pgdat;
150368 +       struct mem_cgroup *memcg;
150369 +       int file = page_is_file_lru(page);
150371 +       if (!lru_gen_enabled())
150372 +               return false;
150374 +       token = unpack_shadow(shadow, &memcg_id, &pgdat);
150375 +       if (page_pgdat(page) != pgdat)
150376 +               return true;
150378 +       rcu_read_lock();
150379 +       memcg = page_memcg_rcu(page);
150380 +       if (mem_cgroup_id(memcg) != memcg_id)
150381 +               goto unlock;
150383 +       usage = token & (BIT(LRU_USAGE_SHIFT) - 1);
150384 +       token >>= LRU_USAGE_SHIFT;
150386 +       lruvec = mem_cgroup_lruvec(memcg, pgdat);
150387 +       lrugen = &lruvec->evictable;
150388 +       min_seq = READ_ONCE(lrugen->min_seq[file]);
150389 +       if (token != (min_seq & (EVICTION_MASK >> LRU_USAGE_SHIFT)))
150390 +               goto unlock;
150392 -       workingset = entry & 1;
150393 -       entry >>= 1;
150394 -       nid = entry & ((1UL << NODES_SHIFT) - 1);
150395 -       entry >>= NODES_SHIFT;
150396 -       memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
150397 -       entry >>= MEM_CGROUP_ID_SHIFT;
150399 -       *memcgidp = memcgid;
150400 -       *pgdat = NODE_DATA(nid);
150401 -       *evictionp = entry << bucket_order;
150402 -       *workingsetp = workingset;
150403 +       page_set_usage(page, usage);
150405 +       sid = sid_from_seq_or_gen(min_seq);
150406 +       tier = lru_tier_from_usage(usage);
150407 +       atomic_long_add(thp_nr_pages(page), &lrugen->refaulted[sid][file][tier]);
150408 +       inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file);
150409 +       if (tier)
150410 +               inc_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file);
150411 +unlock:
150412 +       rcu_read_unlock();
150414 +       return true;
150417 +#else /* CONFIG_LRU_GEN */
150419 +static void *lru_gen_eviction(struct page *page)
150421 +       return NULL;
150424 +static bool lru_gen_refault(struct page *page, void *shadow)
150426 +       return false;
150429 +#endif /* CONFIG_LRU_GEN */
150431  /**
150432   * workingset_age_nonresident - age non-resident entries as LRU ages
150433   * @lruvec: the lruvec that was aged
150434 @@ -256,18 +357,25 @@ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg)
150435         unsigned long eviction;
150436         struct lruvec *lruvec;
150437         int memcgid;
150438 +       void *shadow;
150440         /* Page is fully exclusive and pins page's memory cgroup pointer */
150441         VM_BUG_ON_PAGE(PageLRU(page), page);
150442         VM_BUG_ON_PAGE(page_count(page), page);
150443         VM_BUG_ON_PAGE(!PageLocked(page), page);
150445 +       shadow = lru_gen_eviction(page);
150446 +       if (shadow)
150447 +               return shadow;
150449         lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
150450         /* XXX: target_memcg can be NULL, go through lruvec */
150451         memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
150452         eviction = atomic_long_read(&lruvec->nonresident_age);
150453 +       eviction >>= bucket_order;
150454 +       eviction = (eviction << WORKINGSET_WIDTH) | PageWorkingset(page);
150455         workingset_age_nonresident(lruvec, thp_nr_pages(page));
150456 -       return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
150457 +       return pack_shadow(memcgid, pgdat, eviction);
150460  /**
150461 @@ -294,7 +402,10 @@ void workingset_refault(struct page *page, void *shadow)
150462         bool workingset;
150463         int memcgid;
150465 -       unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
150466 +       if (lru_gen_refault(page, shadow))
150467 +               return;
150469 +       eviction = unpack_shadow(shadow, &memcgid, &pgdat);
150471         rcu_read_lock();
150472         /*
150473 @@ -318,6 +429,8 @@ void workingset_refault(struct page *page, void *shadow)
150474                 goto out;
150475         eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
150476         refault = atomic_long_read(&eviction_lruvec->nonresident_age);
150477 +       workingset = eviction & (BIT(WORKINGSET_WIDTH) - 1);
150478 +       eviction = (eviction >> WORKINGSET_WIDTH) << bucket_order;
150480         /*
150481          * Calculate the refault distance
150482 @@ -335,7 +448,7 @@ void workingset_refault(struct page *page, void *shadow)
150483          * longest time, so the occasional inappropriate activation
150484          * leading to pressure on the active list is not a problem.
150485          */
150486 -       refault_distance = (refault - eviction) & EVICTION_MASK;
150487 +       refault_distance = (refault - eviction) & (EVICTION_MASK >> WORKINGSET_WIDTH);
150489         /*
150490          * The activation decision for this page is made at the level
150491 @@ -594,7 +707,7 @@ static int __init workingset_init(void)
150492         unsigned int max_order;
150493         int ret;
150495 -       BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
150496 +       BUILD_BUG_ON(EVICTION_SHIFT < WORKINGSET_WIDTH);
150497         /*
150498          * Calculate the eviction bucket size to cover the longest
150499          * actionable refault distance, which is currently half of
150500 @@ -602,7 +715,7 @@ static int __init workingset_init(void)
150501          * some more pages at runtime, so keep working with up to
150502          * double the initial memory by using totalram_pages as-is.
150503          */
150504 -       timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
150505 +       timestamp_bits = EVICTION_SHIFT - WORKINGSET_WIDTH;
150506         max_order = fls_long(totalram_pages() - 1);
150507         if (max_order > timestamp_bits)
150508                 bucket_order = max_order - timestamp_bits;
150509 diff --git a/net/bluetooth/ecdh_helper.h b/net/bluetooth/ecdh_helper.h
150510 index a6f8d03d4aaf..830723971cf8 100644
150511 --- a/net/bluetooth/ecdh_helper.h
150512 +++ b/net/bluetooth/ecdh_helper.h
150513 @@ -25,6 +25,6 @@
150515  int compute_ecdh_secret(struct crypto_kpp *tfm, const u8 pair_public_key[64],
150516                         u8 secret[32]);
150517 -int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 *private_key);
150518 +int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 private_key[32]);
150519  int generate_ecdh_public_key(struct crypto_kpp *tfm, u8 public_key[64]);
150520  int generate_ecdh_keys(struct crypto_kpp *tfm, u8 public_key[64]);
150521 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
150522 index 6ffa89e3ba0a..f72646690539 100644
150523 --- a/net/bluetooth/hci_conn.c
150524 +++ b/net/bluetooth/hci_conn.c
150525 @@ -1830,8 +1830,6 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
150527         u32 phys = 0;
150529 -       hci_dev_lock(conn->hdev);
150531         /* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
150532          * Table 6.2: Packets defined for synchronous, asynchronous, and
150533          * CSB logical transport types.
150534 @@ -1928,7 +1926,5 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
150535                 break;
150536         }
150538 -       hci_dev_unlock(conn->hdev);
150540         return phys;
150542 diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
150543 index 67668be3461e..82f4973a011d 100644
150544 --- a/net/bluetooth/hci_event.c
150545 +++ b/net/bluetooth/hci_event.c
150546 @@ -5005,6 +5005,7 @@ static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
150547                 return;
150549         hchan->handle = le16_to_cpu(ev->handle);
150550 +       hchan->amp = true;
150552         BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
150554 @@ -5037,7 +5038,7 @@ static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
150555         hci_dev_lock(hdev);
150557         hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
150558 -       if (!hchan)
150559 +       if (!hchan || !hchan->amp)
150560                 goto unlock;
150562         amp_destroy_logical_link(hchan, ev->reason);
150563 @@ -5911,7 +5912,7 @@ static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
150565         BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
150567 -       if (!ev->status)
150568 +       if (ev->status)
150569                 return;
150571         hci_dev_lock(hdev);
150572 diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
150573 index e55976db4403..805ce546b813 100644
150574 --- a/net/bluetooth/hci_request.c
150575 +++ b/net/bluetooth/hci_request.c
150576 @@ -272,12 +272,16 @@ int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
150578         int ret;
150580 -       if (!test_bit(HCI_UP, &hdev->flags))
150581 -               return -ENETDOWN;
150583         /* Serialize all requests */
150584         hci_req_sync_lock(hdev);
150585 -       ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
150586 +       /* check the state after obtaing the lock to protect the HCI_UP
150587 +        * against any races from hci_dev_do_close when the controller
150588 +        * gets removed.
150589 +        */
150590 +       if (test_bit(HCI_UP, &hdev->flags))
150591 +               ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
150592 +       else
150593 +               ret = -ENETDOWN;
150594         hci_req_sync_unlock(hdev);
150596         return ret;
150597 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
150598 index 72c2f5226d67..53ddbee459b9 100644
150599 --- a/net/bluetooth/l2cap_core.c
150600 +++ b/net/bluetooth/l2cap_core.c
150601 @@ -451,6 +451,8 @@ struct l2cap_chan *l2cap_chan_create(void)
150602         if (!chan)
150603                 return NULL;
150605 +       skb_queue_head_init(&chan->tx_q);
150606 +       skb_queue_head_init(&chan->srej_q);
150607         mutex_init(&chan->lock);
150609         /* Set default lock nesting level */
150610 @@ -516,7 +518,9 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
150611         chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
150612         chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
150613         chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
150615         chan->conf_state = 0;
150616 +       set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
150618         set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
150620 diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
150621 index f1b1edd0b697..c99d65ef13b1 100644
150622 --- a/net/bluetooth/l2cap_sock.c
150623 +++ b/net/bluetooth/l2cap_sock.c
150624 @@ -179,9 +179,17 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
150625         struct l2cap_chan *chan = l2cap_pi(sk)->chan;
150626         struct sockaddr_l2 la;
150627         int len, err = 0;
150628 +       bool zapped;
150630         BT_DBG("sk %p", sk);
150632 +       lock_sock(sk);
150633 +       zapped = sock_flag(sk, SOCK_ZAPPED);
150634 +       release_sock(sk);
150636 +       if (zapped)
150637 +               return -EINVAL;
150639         if (!addr || alen < offsetofend(struct sockaddr, sa_family) ||
150640             addr->sa_family != AF_BLUETOOTH)
150641                 return -EINVAL;
150642 diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
150643 index 74971b4bd457..939c6f77fecc 100644
150644 --- a/net/bluetooth/mgmt.c
150645 +++ b/net/bluetooth/mgmt.c
150646 @@ -7976,7 +7976,6 @@ static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
150647                 goto unlock;
150648         }
150650 -       hdev->cur_adv_instance = cp->instance;
150651         /* Submit request for advertising params if ext adv available */
150652         if (ext_adv_capable(hdev)) {
150653                 hci_req_init(&req, hdev);
150654 diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
150655 index b0c1ee110eff..e03cc284161c 100644
150656 --- a/net/bluetooth/smp.c
150657 +++ b/net/bluetooth/smp.c
150658 @@ -2732,6 +2732,15 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
150659         if (skb->len < sizeof(*key))
150660                 return SMP_INVALID_PARAMS;
150662 +       /* Check if remote and local public keys are the same and debug key is
150663 +        * not in use.
150664 +        */
150665 +       if (!test_bit(SMP_FLAG_DEBUG_KEY, &smp->flags) &&
150666 +           !crypto_memneq(key, smp->local_pk, 64)) {
150667 +               bt_dev_err(hdev, "Remote and local public keys are identical");
150668 +               return SMP_UNSPECIFIED;
150669 +       }
150671         memcpy(smp->remote_pk, key, 64);
150673         if (test_bit(SMP_FLAG_REMOTE_OOB, &smp->flags)) {
150674 diff --git a/net/bridge/br_arp_nd_proxy.c b/net/bridge/br_arp_nd_proxy.c
150675 index dfec65eca8a6..3db1def4437b 100644
150676 --- a/net/bridge/br_arp_nd_proxy.c
150677 +++ b/net/bridge/br_arp_nd_proxy.c
150678 @@ -160,7 +160,9 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br,
150679         if (br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
150680                 if (p && (p->flags & BR_NEIGH_SUPPRESS))
150681                         return;
150682 -               if (ipv4_is_zeronet(sip) || sip == tip) {
150683 +               if (parp->ar_op != htons(ARPOP_RREQUEST) &&
150684 +                   parp->ar_op != htons(ARPOP_RREPLY) &&
150685 +                   (ipv4_is_zeronet(sip) || sip == tip)) {
150686                         /* prevent flooding to neigh suppress ports */
150687                         BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
150688                         return;
150689 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
150690 index 9d265447d654..226bb05c3b42 100644
150691 --- a/net/bridge/br_multicast.c
150692 +++ b/net/bridge/br_multicast.c
150693 @@ -1593,7 +1593,8 @@ static void br_multicast_port_group_rexmit(struct timer_list *t)
150694         spin_unlock(&br->multicast_lock);
150697 -static void br_mc_disabled_update(struct net_device *dev, bool value)
150698 +static int br_mc_disabled_update(struct net_device *dev, bool value,
150699 +                                struct netlink_ext_ack *extack)
150701         struct switchdev_attr attr = {
150702                 .orig_dev = dev,
150703 @@ -1602,11 +1603,13 @@ static void br_mc_disabled_update(struct net_device *dev, bool value)
150704                 .u.mc_disabled = !value,
150705         };
150707 -       switchdev_port_attr_set(dev, &attr, NULL);
150708 +       return switchdev_port_attr_set(dev, &attr, extack);
150711  int br_multicast_add_port(struct net_bridge_port *port)
150713 +       int err;
150715         port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
150716         port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT;
150718 @@ -1618,8 +1621,12 @@ int br_multicast_add_port(struct net_bridge_port *port)
150719         timer_setup(&port->ip6_own_query.timer,
150720                     br_ip6_multicast_port_query_expired, 0);
150721  #endif
150722 -       br_mc_disabled_update(port->dev,
150723 -                             br_opt_get(port->br, BROPT_MULTICAST_ENABLED));
150724 +       err = br_mc_disabled_update(port->dev,
150725 +                                   br_opt_get(port->br,
150726 +                                              BROPT_MULTICAST_ENABLED),
150727 +                                   NULL);
150728 +       if (err && err != -EOPNOTSUPP)
150729 +               return err;
150731         port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
150732         if (!port->mcast_stats)
150733 @@ -3152,25 +3159,14 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
150736  #if IS_ENABLED(CONFIG_IPV6)
150737 -static int br_ip6_multicast_mrd_rcv(struct net_bridge *br,
150738 -                                   struct net_bridge_port *port,
150739 -                                   struct sk_buff *skb)
150740 +static void br_ip6_multicast_mrd_rcv(struct net_bridge *br,
150741 +                                    struct net_bridge_port *port,
150742 +                                    struct sk_buff *skb)
150744 -       int ret;
150746 -       if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
150747 -               return -ENOMSG;
150749 -       ret = ipv6_mc_check_icmpv6(skb);
150750 -       if (ret < 0)
150751 -               return ret;
150753         if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
150754 -               return -ENOMSG;
150755 +               return;
150757         br_multicast_mark_router(br, port);
150759 -       return 0;
150762  static int br_multicast_ipv6_rcv(struct net_bridge *br,
150763 @@ -3184,18 +3180,12 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
150765         err = ipv6_mc_check_mld(skb);
150767 -       if (err == -ENOMSG) {
150768 +       if (err == -ENOMSG || err == -ENODATA) {
150769                 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
150770                         BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
150772 -               if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) {
150773 -                       err = br_ip6_multicast_mrd_rcv(br, port, skb);
150775 -                       if (err < 0 && err != -ENOMSG) {
150776 -                               br_multicast_err_count(br, port, skb->protocol);
150777 -                               return err;
150778 -                       }
150779 -               }
150780 +               if (err == -ENODATA &&
150781 +                   ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr))
150782 +                       br_ip6_multicast_mrd_rcv(br, port, skb);
150784                 return 0;
150785         } else if (err < 0) {
150786 @@ -3560,16 +3550,23 @@ static void br_multicast_start_querier(struct net_bridge *br,
150787         rcu_read_unlock();
150790 -int br_multicast_toggle(struct net_bridge *br, unsigned long val)
150791 +int br_multicast_toggle(struct net_bridge *br, unsigned long val,
150792 +                       struct netlink_ext_ack *extack)
150794         struct net_bridge_port *port;
150795         bool change_snoopers = false;
150796 +       int err = 0;
150798         spin_lock_bh(&br->multicast_lock);
150799         if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
150800                 goto unlock;
150802 -       br_mc_disabled_update(br->dev, val);
150803 +       err = br_mc_disabled_update(br->dev, val, extack);
150804 +       if (err == -EOPNOTSUPP)
150805 +               err = 0;
150806 +       if (err)
150807 +               goto unlock;
150809         br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
150810         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
150811                 change_snoopers = true;
150812 @@ -3607,7 +3604,7 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
150813                         br_multicast_leave_snoopers(br);
150814         }
150816 -       return 0;
150817 +       return err;
150820  bool br_multicast_enabled(const struct net_device *dev)
150821 diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
150822 index f2b1343f8332..e4e6e991313e 100644
150823 --- a/net/bridge/br_netlink.c
150824 +++ b/net/bridge/br_netlink.c
150825 @@ -103,8 +103,9 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev,
150827         rcu_read_lock();
150828         if (netif_is_bridge_port(dev)) {
150829 -               p = br_port_get_rcu(dev);
150830 -               vg = nbp_vlan_group_rcu(p);
150831 +               p = br_port_get_check_rcu(dev);
150832 +               if (p)
150833 +                       vg = nbp_vlan_group_rcu(p);
150834         } else if (dev->priv_flags & IFF_EBRIDGE) {
150835                 br = netdev_priv(dev);
150836                 vg = br_vlan_group_rcu(br);
150837 @@ -1293,7 +1294,9 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
150838         if (data[IFLA_BR_MCAST_SNOOPING]) {
150839                 u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]);
150841 -               br_multicast_toggle(br, mcast_snooping);
150842 +               err = br_multicast_toggle(br, mcast_snooping, extack);
150843 +               if (err)
150844 +                       return err;
150845         }
150847         if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) {
150848 diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
150849 index d7d167e10b70..af3430c2d6ea 100644
150850 --- a/net/bridge/br_private.h
150851 +++ b/net/bridge/br_private.h
150852 @@ -810,7 +810,8 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
150853                         struct sk_buff *skb, bool local_rcv, bool local_orig);
150854  int br_multicast_set_router(struct net_bridge *br, unsigned long val);
150855  int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val);
150856 -int br_multicast_toggle(struct net_bridge *br, unsigned long val);
150857 +int br_multicast_toggle(struct net_bridge *br, unsigned long val,
150858 +                       struct netlink_ext_ack *extack);
150859  int br_multicast_set_querier(struct net_bridge *br, unsigned long val);
150860  int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
150861  int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val);
150862 diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
150863 index 072e29840082..381467b691d5 100644
150864 --- a/net/bridge/br_sysfs_br.c
150865 +++ b/net/bridge/br_sysfs_br.c
150866 @@ -409,17 +409,11 @@ static ssize_t multicast_snooping_show(struct device *d,
150867         return sprintf(buf, "%d\n", br_opt_get(br, BROPT_MULTICAST_ENABLED));
150870 -static int toggle_multicast(struct net_bridge *br, unsigned long val,
150871 -                           struct netlink_ext_ack *extack)
150873 -       return br_multicast_toggle(br, val);
150876  static ssize_t multicast_snooping_store(struct device *d,
150877                                         struct device_attribute *attr,
150878                                         const char *buf, size_t len)
150880 -       return store_bridge_parm(d, buf, len, toggle_multicast);
150881 +       return store_bridge_parm(d, buf, len, br_multicast_toggle);
150883  static DEVICE_ATTR_RW(multicast_snooping);
150885 diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
150886 index ca44c327bace..79641c4afee9 100644
150887 --- a/net/ceph/auth_x.c
150888 +++ b/net/ceph/auth_x.c
150889 @@ -526,7 +526,7 @@ static int ceph_x_build_request(struct ceph_auth_client *ac,
150890                 if (ret < 0)
150891                         return ret;
150893 -               auth->struct_v = 2;  /* nautilus+ */
150894 +               auth->struct_v = 3;  /* nautilus+ */
150895                 auth->key = 0;
150896                 for (u = (u64 *)enc_buf; u + 1 <= (u64 *)(enc_buf + ret); u++)
150897                         auth->key ^= *(__le64 *)u;
150898 diff --git a/net/ceph/decode.c b/net/ceph/decode.c
150899 index b44f7651be04..bc109a1a4616 100644
150900 --- a/net/ceph/decode.c
150901 +++ b/net/ceph/decode.c
150902 @@ -4,6 +4,7 @@
150903  #include <linux/inet.h>
150905  #include <linux/ceph/decode.h>
150906 +#include <linux/ceph/messenger.h>  /* for ceph_pr_addr() */
150908  static int
150909  ceph_decode_entity_addr_versioned(void **p, void *end,
150910 @@ -110,6 +111,7 @@ int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
150911         }
150913         ceph_decode_32_safe(p, end, addr_cnt, e_inval);
150914 +       dout("%s addr_cnt %d\n", __func__, addr_cnt);
150916         found = false;
150917         for (i = 0; i < addr_cnt; i++) {
150918 @@ -117,6 +119,7 @@ int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
150919                 if (ret)
150920                         return ret;
150922 +               dout("%s i %d addr %s\n", __func__, i, ceph_pr_addr(&tmp_addr));
150923                 if (tmp_addr.type == my_type) {
150924                         if (found) {
150925                                 pr_err("another match of type %d in addrvec\n",
150926 @@ -128,13 +131,18 @@ int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
150927                         found = true;
150928                 }
150929         }
150930 -       if (!found && addr_cnt != 0) {
150931 -               pr_err("no match of type %d in addrvec\n",
150932 -                      le32_to_cpu(my_type));
150933 -               return -ENOENT;
150934 -       }
150936 -       return 0;
150937 +       if (found)
150938 +               return 0;
150940 +       if (!addr_cnt)
150941 +               return 0;  /* normal -- e.g. unused OSD id/slot */
150943 +       if (addr_cnt == 1 && !memchr_inv(&tmp_addr, 0, sizeof(tmp_addr)))
150944 +               return 0;  /* weird but effectively the same as !addr_cnt */
150946 +       pr_err("no match of type %d in addrvec\n", le32_to_cpu(my_type));
150947 +       return -ENOENT;
150949  e_inval:
150950         return -EINVAL;
150951 diff --git a/net/core/dev.c b/net/core/dev.c
150952 index 1f79b9aa9a3f..70829c568645 100644
150953 --- a/net/core/dev.c
150954 +++ b/net/core/dev.c
150955 @@ -4672,10 +4672,10 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
150956         void *orig_data, *orig_data_end, *hard_start;
150957         struct netdev_rx_queue *rxqueue;
150958         u32 metalen, act = XDP_DROP;
150959 +       bool orig_bcast, orig_host;
150960         u32 mac_len, frame_sz;
150961         __be16 orig_eth_type;
150962         struct ethhdr *eth;
150963 -       bool orig_bcast;
150964         int off;
150966         /* Reinjected packets coming from act_mirred or similar should
150967 @@ -4722,6 +4722,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
150968         orig_data_end = xdp->data_end;
150969         orig_data = xdp->data;
150970         eth = (struct ethhdr *)xdp->data;
150971 +       orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr);
150972         orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
150973         orig_eth_type = eth->h_proto;
150975 @@ -4749,8 +4750,11 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
150976         /* check if XDP changed eth hdr such SKB needs update */
150977         eth = (struct ethhdr *)xdp->data;
150978         if ((orig_eth_type != eth->h_proto) ||
150979 +           (orig_host != ether_addr_equal_64bits(eth->h_dest,
150980 +                                                 skb->dev->dev_addr)) ||
150981             (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
150982                 __skb_push(skb, ETH_HLEN);
150983 +               skb->pkt_type = PACKET_HOST;
150984                 skb->protocol = eth_type_trans(skb, skb->dev);
150985         }
150987 @@ -5914,7 +5918,7 @@ static struct list_head *gro_list_prepare(struct napi_struct *napi,
150988         return head;
150991 -static void skb_gro_reset_offset(struct sk_buff *skb)
150992 +static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
150994         const struct skb_shared_info *pinfo = skb_shinfo(skb);
150995         const skb_frag_t *frag0 = &pinfo->frags[0];
150996 @@ -5925,7 +5929,7 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
150998         if (!skb_headlen(skb) && pinfo->nr_frags &&
150999             !PageHighMem(skb_frag_page(frag0)) &&
151000 -           (!NET_IP_ALIGN || !(skb_frag_off(frag0) & 3))) {
151001 +           (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
151002                 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
151003                 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
151004                                                     skb_frag_size(frag0),
151005 @@ -6143,7 +6147,7 @@ gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
151006         skb_mark_napi_id(skb, napi);
151007         trace_napi_gro_receive_entry(skb);
151009 -       skb_gro_reset_offset(skb);
151010 +       skb_gro_reset_offset(skb, 0);
151012         ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
151013         trace_napi_gro_receive_exit(ret);
151014 @@ -6232,7 +6236,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
151015         napi->skb = NULL;
151017         skb_reset_mac_header(skb);
151018 -       skb_gro_reset_offset(skb);
151019 +       skb_gro_reset_offset(skb, hlen);
151021         if (unlikely(skb_gro_header_hard(skb, hlen))) {
151022                 eth = skb_gro_header_slow(skb, hlen, 0);
151023 diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
151024 index a96a4f5de0ce..3f36b04d86a0 100644
151025 --- a/net/core/flow_dissector.c
151026 +++ b/net/core/flow_dissector.c
151027 @@ -828,8 +828,10 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
151028                 key_addrs = skb_flow_dissector_target(flow_dissector,
151029                                                       FLOW_DISSECTOR_KEY_IPV6_ADDRS,
151030                                                       target_container);
151031 -               memcpy(&key_addrs->v6addrs, &flow_keys->ipv6_src,
151032 -                      sizeof(key_addrs->v6addrs));
151033 +               memcpy(&key_addrs->v6addrs.src, &flow_keys->ipv6_src,
151034 +                      sizeof(key_addrs->v6addrs.src));
151035 +               memcpy(&key_addrs->v6addrs.dst, &flow_keys->ipv6_dst,
151036 +                      sizeof(key_addrs->v6addrs.dst));
151037                 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
151038         }
151040 diff --git a/net/core/page_pool.c b/net/core/page_pool.c
151041 index ad8b0707af04..f014fd8c19a6 100644
151042 --- a/net/core/page_pool.c
151043 +++ b/net/core/page_pool.c
151044 @@ -174,8 +174,10 @@ static void page_pool_dma_sync_for_device(struct page_pool *pool,
151045                                           struct page *page,
151046                                           unsigned int dma_sync_size)
151048 +       dma_addr_t dma_addr = page_pool_get_dma_addr(page);
151050         dma_sync_size = min(dma_sync_size, pool->p.max_len);
151051 -       dma_sync_single_range_for_device(pool->p.dev, page->dma_addr,
151052 +       dma_sync_single_range_for_device(pool->p.dev, dma_addr,
151053                                          pool->p.offset, dma_sync_size,
151054                                          pool->p.dma_dir);
151056 @@ -226,7 +228,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
151057                 put_page(page);
151058                 return NULL;
151059         }
151060 -       page->dma_addr = dma;
151061 +       page_pool_set_dma_addr(page, dma);
151063         if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
151064                 page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
151065 @@ -294,13 +296,13 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
151066                  */
151067                 goto skip_dma_unmap;
151069 -       dma = page->dma_addr;
151070 +       dma = page_pool_get_dma_addr(page);
151072 -       /* When page is unmapped, it cannot be returned our pool */
151073 +       /* When page is unmapped, it cannot be returned to our pool */
151074         dma_unmap_page_attrs(pool->p.dev, dma,
151075                              PAGE_SIZE << pool->p.order, pool->p.dma_dir,
151076                              DMA_ATTR_SKIP_CPU_SYNC);
151077 -       page->dma_addr = 0;
151078 +       page_pool_set_dma_addr(page, 0);
151079  skip_dma_unmap:
151080         /* This may be the last page returned, releasing the pool, so
151081          * it is not safe to reference pool afterwards.
151082 diff --git a/net/core/pktgen.c b/net/core/pktgen.c
151083 index 3fba429f1f57..9a3a9a6eb837 100644
151084 --- a/net/core/pktgen.c
151085 +++ b/net/core/pktgen.c
151086 @@ -1894,7 +1894,7 @@ static void pktgen_mark_device(const struct pktgen_net *pn, const char *ifname)
151087                 mutex_unlock(&pktgen_thread_lock);
151088                 pr_debug("%s: waiting for %s to disappear....\n",
151089                          __func__, ifname);
151090 -               schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try));
151091 +               schedule_msec_hrtimeout_interruptible((msec_per_try));
151092                 mutex_lock(&pktgen_thread_lock);
151094                 if (++i >= max_tries) {
151095 diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
151096 index 771688e1b0da..2603966da904 100644
151097 --- a/net/ethtool/ioctl.c
151098 +++ b/net/ethtool/ioctl.c
151099 @@ -489,7 +489,7 @@ store_link_ksettings_for_user(void __user *to,
151101         struct ethtool_link_usettings link_usettings;
151103 -       memcpy(&link_usettings.base, &from->base, sizeof(link_usettings));
151104 +       memcpy(&link_usettings, from, sizeof(link_usettings));
151105         bitmap_to_arr32(link_usettings.link_modes.supported,
151106                         from->link_modes.supported,
151107                         __ETHTOOL_LINK_MODE_MASK_NBITS);
151108 diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
151109 index 50d3c8896f91..25a55086d2b6 100644
151110 --- a/net/ethtool/netlink.c
151111 +++ b/net/ethtool/netlink.c
151112 @@ -384,7 +384,8 @@ static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev,
151113         int ret;
151115         ehdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
151116 -                          &ethtool_genl_family, 0, ctx->ops->reply_cmd);
151117 +                          &ethtool_genl_family, NLM_F_MULTI,
151118 +                          ctx->ops->reply_cmd);
151119         if (!ehdr)
151120                 return -EMSGSIZE;
151122 diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
151123 index b218e4594009..6852e9bccf5b 100644
151124 --- a/net/hsr/hsr_forward.c
151125 +++ b/net/hsr/hsr_forward.c
151126 @@ -520,6 +520,10 @@ static int fill_frame_info(struct hsr_frame_info *frame,
151127         struct ethhdr *ethhdr;
151128         __be16 proto;
151130 +       /* Check if skb contains hsr_ethhdr */
151131 +       if (skb->mac_len < sizeof(struct hsr_ethhdr))
151132 +               return -EINVAL;
151134         memset(frame, 0, sizeof(*frame));
151135         frame->is_supervision = is_supervision_frame(port->hsr, skb);
151136         frame->node_src = hsr_get_node(port, &hsr->node_db, skb,
151137 diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
151138 index 87983e70f03f..a833a7a67ce7 100644
151139 --- a/net/ipv4/Kconfig
151140 +++ b/net/ipv4/Kconfig
151141 @@ -669,6 +669,24 @@ config TCP_CONG_BBR
151142           AQM schemes that do not provide a delay signal. It requires the fq
151143           ("Fair Queue") pacing packet scheduler.
151145 +config TCP_CONG_BBR2
151146 +       tristate "BBR2 TCP"
151147 +       default n
151148 +       help
151150 +       BBR2 TCP congestion control is a model-based congestion control
151151 +       algorithm that aims to maximize network utilization, keep queues and
151152 +       retransmit rates low, and to be able to coexist with Reno/CUBIC in
151153 +       common scenarios. It builds an explicit model of the network path.  It
151154 +       tolerates a targeted degree of random packet loss and delay that are
151155 +       unrelated to congestion. It can operate over LAN, WAN, cellular, wifi,
151156 +       or cable modem links, and can use DCTCP-L4S-style ECN signals.  It can
151157 +       coexist with flows that use loss-based congestion control, and can
151158 +       operate with shallow buffers, deep buffers, bufferbloat, policers, or
151159 +       AQM schemes that do not provide a delay signal. It requires pacing,
151160 +       using either TCP internal pacing or the fq ("Fair Queue") pacing packet
151161 +       scheduler.
151163  choice
151164         prompt "Default TCP congestion control"
151165         default DEFAULT_CUBIC
151166 @@ -706,6 +724,9 @@ choice
151167         config DEFAULT_BBR
151168                 bool "BBR" if TCP_CONG_BBR=y
151170 +       config DEFAULT_BBR2
151171 +               bool "BBR2" if TCP_CONG_BBR2=y
151173         config DEFAULT_RENO
151174                 bool "Reno"
151175  endchoice
151176 @@ -730,6 +751,7 @@ config DEFAULT_TCP_CONG
151177         default "dctcp" if DEFAULT_DCTCP
151178         default "cdg" if DEFAULT_CDG
151179         default "bbr" if DEFAULT_BBR
151180 +       default "bbr2" if DEFAULT_BBR2
151181         default "cubic"
151183  config TCP_MD5SIG
151184 diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
151185 index 5b77a46885b9..8c5779dba462 100644
151186 --- a/net/ipv4/Makefile
151187 +++ b/net/ipv4/Makefile
151188 @@ -46,6 +46,7 @@ obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o
151189  obj-$(CONFIG_INET_UDP_DIAG) += udp_diag.o
151190  obj-$(CONFIG_INET_RAW_DIAG) += raw_diag.o
151191  obj-$(CONFIG_TCP_CONG_BBR) += tcp_bbr.o
151192 +obj-$(CONFIG_TCP_CONG_BBR2) += tcp_bbr2.o
151193  obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o
151194  obj-$(CONFIG_TCP_CONG_CDG) += tcp_cdg.o
151195  obj-$(CONFIG_TCP_CONG_CUBIC) += tcp_cubic.o
151196 diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c
151197 index d520e61649c8..22129c1c56a2 100644
151198 --- a/net/ipv4/bpf_tcp_ca.c
151199 +++ b/net/ipv4/bpf_tcp_ca.c
151200 @@ -16,7 +16,7 @@ static u32 optional_ops[] = {
151201         offsetof(struct tcp_congestion_ops, cwnd_event),
151202         offsetof(struct tcp_congestion_ops, in_ack_event),
151203         offsetof(struct tcp_congestion_ops, pkts_acked),
151204 -       offsetof(struct tcp_congestion_ops, min_tso_segs),
151205 +       offsetof(struct tcp_congestion_ops, tso_segs),
151206         offsetof(struct tcp_congestion_ops, sndbuf_expand),
151207         offsetof(struct tcp_congestion_ops, cong_control),
151209 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
151210 index bba150fdd265..d635b4f32d34 100644
151211 --- a/net/ipv4/route.c
151212 +++ b/net/ipv4/route.c
151213 @@ -66,6 +66,7 @@
151214  #include <linux/types.h>
151215  #include <linux/kernel.h>
151216  #include <linux/mm.h>
151217 +#include <linux/memblock.h>
151218  #include <linux/string.h>
151219  #include <linux/socket.h>
151220  #include <linux/sockios.h>
151221 @@ -478,8 +479,10 @@ static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
151222         __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
151225 -#define IP_IDENTS_SZ 2048u
151227 +/* Hash tables of size 2048..262144 depending on RAM size.
151228 + * Each bucket uses 8 bytes.
151229 + */
151230 +static u32 ip_idents_mask __read_mostly;
151231  static atomic_t *ip_idents __read_mostly;
151232  static u32 *ip_tstamps __read_mostly;
151234 @@ -489,12 +492,16 @@ static u32 *ip_tstamps __read_mostly;
151235   */
151236  u32 ip_idents_reserve(u32 hash, int segs)
151238 -       u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
151239 -       atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
151240 -       u32 old = READ_ONCE(*p_tstamp);
151241 -       u32 now = (u32)jiffies;
151242 +       u32 bucket, old, now = (u32)jiffies;
151243 +       atomic_t *p_id;
151244 +       u32 *p_tstamp;
151245         u32 delta = 0;
151247 +       bucket = hash & ip_idents_mask;
151248 +       p_tstamp = ip_tstamps + bucket;
151249 +       p_id = ip_idents + bucket;
151250 +       old = READ_ONCE(*p_tstamp);
151252         if (old != now && cmpxchg(p_tstamp, old, now) == old)
151253                 delta = prandom_u32_max(now - old);
151255 @@ -3553,18 +3560,25 @@ struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
151257  int __init ip_rt_init(void)
151259 +       void *idents_hash;
151260         int cpu;
151262 -       ip_idents = kmalloc_array(IP_IDENTS_SZ, sizeof(*ip_idents),
151263 -                                 GFP_KERNEL);
151264 -       if (!ip_idents)
151265 -               panic("IP: failed to allocate ip_idents\n");
151266 +       /* For modern hosts, this will use 2 MB of memory */
151267 +       idents_hash = alloc_large_system_hash("IP idents",
151268 +                                             sizeof(*ip_idents) + sizeof(*ip_tstamps),
151269 +                                             0,
151270 +                                             16, /* one bucket per 64 KB */
151271 +                                             HASH_ZERO,
151272 +                                             NULL,
151273 +                                             &ip_idents_mask,
151274 +                                             2048,
151275 +                                             256*1024);
151277 +       ip_idents = idents_hash;
151279 -       prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
151280 +       prandom_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents));
151282 -       ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
151283 -       if (!ip_tstamps)
151284 -               panic("IP: failed to allocate ip_tstamps\n");
151285 +       ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents);
151287         for_each_possible_cpu(cpu) {
151288                 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
151289 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
151290 index de7cc8445ac0..521f310f2ac1 100644
151291 --- a/net/ipv4/tcp.c
151292 +++ b/net/ipv4/tcp.c
151293 @@ -3033,6 +3033,7 @@ int tcp_disconnect(struct sock *sk, int flags)
151294         tp->rx_opt.dsack = 0;
151295         tp->rx_opt.num_sacks = 0;
151296         tp->rcv_ooopack = 0;
151297 +       tp->fast_ack_mode = 0;
151300         /* Clean up fastopen related fields */
151301 diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
151302 index 6ea3dc2e4219..8ef512fefe25 100644
151303 --- a/net/ipv4/tcp_bbr.c
151304 +++ b/net/ipv4/tcp_bbr.c
151305 @@ -292,26 +292,40 @@ static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
151306                 sk->sk_pacing_rate = rate;
151309 -/* override sysctl_tcp_min_tso_segs */
151310  static u32 bbr_min_tso_segs(struct sock *sk)
151312         return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
151315 +/* Return the number of segments BBR would like in a TSO/GSO skb, given
151316 + * a particular max gso size as a constraint.
151317 + */
151318 +static u32 bbr_tso_segs_generic(struct sock *sk, unsigned int mss_now,
151319 +                               u32 gso_max_size)
151321 +       u32 segs;
151322 +       u64 bytes;
151324 +       /* Budget a TSO/GSO burst size allowance based on bw (pacing_rate). */
151325 +       bytes = sk->sk_pacing_rate >> sk->sk_pacing_shift;
151327 +       bytes = min_t(u32, bytes, gso_max_size - 1 - MAX_TCP_HEADER);
151328 +       segs = max_t(u32, bytes / mss_now, bbr_min_tso_segs(sk));
151329 +       return segs;
151332 +/* Custom tcp_tso_autosize() for BBR, used at transmit time to cap skb size. */
151333 +static u32  bbr_tso_segs(struct sock *sk, unsigned int mss_now)
151335 +       return bbr_tso_segs_generic(sk, mss_now, sk->sk_gso_max_size);
151338 +/* Like bbr_tso_segs(), using mss_cache, ignoring driver's sk_gso_max_size. */
151339  static u32 bbr_tso_segs_goal(struct sock *sk)
151341         struct tcp_sock *tp = tcp_sk(sk);
151342 -       u32 segs, bytes;
151344 -       /* Sort of tcp_tso_autosize() but ignoring
151345 -        * driver provided sk_gso_max_size.
151346 -        */
151347 -       bytes = min_t(unsigned long,
151348 -                     sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
151349 -                     GSO_MAX_SIZE - 1 - MAX_TCP_HEADER);
151350 -       segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
151352 -       return min(segs, 0x7FU);
151353 +       return  bbr_tso_segs_generic(sk, tp->mss_cache, GSO_MAX_SIZE);
151356  /* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
151357 @@ -1147,7 +1161,7 @@ static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = {
151358         .undo_cwnd      = bbr_undo_cwnd,
151359         .cwnd_event     = bbr_cwnd_event,
151360         .ssthresh       = bbr_ssthresh,
151361 -       .min_tso_segs   = bbr_min_tso_segs,
151362 +       .tso_segs       = bbr_tso_segs,
151363         .get_info       = bbr_get_info,
151364         .set_state      = bbr_set_state,
151366 diff --git a/net/ipv4/tcp_bbr2.c b/net/ipv4/tcp_bbr2.c
151367 new file mode 100644
151368 index 000000000000..5510adc92bbb
151369 --- /dev/null
151370 +++ b/net/ipv4/tcp_bbr2.c
151371 @@ -0,0 +1,2671 @@
151372 +/* BBR (Bottleneck Bandwidth and RTT) congestion control, v2
151374 + * BBRv2 is a model-based congestion control algorithm that aims for low
151375 + * queues, low loss, and (bounded) Reno/CUBIC coexistence. To maintain a model
151376 + * of the network path, it uses measurements of bandwidth and RTT, as well as
151377 + * (if they occur) packet loss and/or DCTCP/L4S-style ECN signals.  Note that
151378 + * although it can use ECN or loss signals explicitly, it does not require
151379 + * either; it can bound its in-flight data based on its estimate of the BDP.
151381 + * The model has both higher and lower bounds for the operating range:
151382 + *   lo: bw_lo, inflight_lo: conservative short-term lower bound
151383 + *   hi: bw_hi, inflight_hi: robust long-term upper bound
151384 + * The bandwidth-probing time scale is (a) extended dynamically based on
151385 + * estimated BDP to improve coexistence with Reno/CUBIC; (b) bounded by
151386 + * an interactive wall-clock time-scale to be more scalable and responsive
151387 + * than Reno and CUBIC.
151389 + * Here is a state transition diagram for BBR:
151391 + *             |
151392 + *             V
151393 + *    +---> STARTUP  ----+
151394 + *    |        |         |
151395 + *    |        V         |
151396 + *    |      DRAIN   ----+
151397 + *    |        |         |
151398 + *    |        V         |
151399 + *    +---> PROBE_BW ----+
151400 + *    |      ^    |      |
151401 + *    |      |    |      |
151402 + *    |      +----+      |
151403 + *    |                  |
151404 + *    +---- PROBE_RTT <--+
151406 + * A BBR flow starts in STARTUP, and ramps up its sending rate quickly.
151407 + * When it estimates the pipe is full, it enters DRAIN to drain the queue.
151408 + * In steady state a BBR flow only uses PROBE_BW and PROBE_RTT.
151409 + * A long-lived BBR flow spends the vast majority of its time remaining
151410 + * (repeatedly) in PROBE_BW, fully probing and utilizing the pipe's bandwidth
151411 + * in a fair manner, with a small, bounded queue. *If* a flow has been
151412 + * continuously sending for the entire min_rtt window, and hasn't seen an RTT
151413 + * sample that matches or decreases its min_rtt estimate for 10 seconds, then
151414 + * it briefly enters PROBE_RTT to cut inflight to a minimum value to re-probe
151415 + * the path's two-way propagation delay (min_rtt). When exiting PROBE_RTT, if
151416 + * we estimated that we reached the full bw of the pipe then we enter PROBE_BW;
151417 + * otherwise we enter STARTUP to try to fill the pipe.
151419 + * BBR is described in detail in:
151420 + *   "BBR: Congestion-Based Congestion Control",
151421 + *   Neal Cardwell, Yuchung Cheng, C. Stephen Gunn, Soheil Hassas Yeganeh,
151422 + *   Van Jacobson. ACM Queue, Vol. 14 No. 5, September-October 2016.
151424 + * There is a public e-mail list for discussing BBR development and testing:
151425 + *   https://groups.google.com/forum/#!forum/bbr-dev
151427 + * NOTE: BBR might be used with the fq qdisc ("man tc-fq") with pacing enabled,
151428 + * otherwise TCP stack falls back to an internal pacing using one high
151429 + * resolution timer per TCP socket and may use more resources.
151430 + */
151431 +#include <linux/module.h>
151432 +#include <net/tcp.h>
151433 +#include <linux/inet_diag.h>
151434 +#include <linux/inet.h>
151435 +#include <linux/random.h>
151437 +#include "tcp_dctcp.h"
151439 +/* Scale factor for rate in pkt/uSec unit to avoid truncation in bandwidth
151440 + * estimation. The rate unit ~= (1500 bytes / 1 usec / 2^24) ~= 715 bps.
151441 + * This handles bandwidths from 0.06pps (715bps) to 256Mpps (3Tbps) in a u32.
151442 + * Since the minimum window is >=4 packets, the lower bound isn't
151443 + * an issue. The upper bound isn't an issue with existing technologies.
151444 + */
151445 +#define BW_SCALE 24
151446 +#define BW_UNIT (1 << BW_SCALE)
151448 +#define BBR_SCALE 8    /* scaling factor for fractions in BBR (e.g. gains) */
151449 +#define BBR_UNIT (1 << BBR_SCALE)
151451 +#define FLAG_DEBUG_VERBOSE     0x1     /* Verbose debugging messages */
151452 +#define FLAG_DEBUG_LOOPBACK    0x2     /* Do NOT skip loopback addr */
151454 +#define CYCLE_LEN              8       /* number of phases in a pacing gain cycle */
151456 +/* BBR has the following modes for deciding how fast to send: */
151457 +enum bbr_mode {
151458 +       BBR_STARTUP,    /* ramp up sending rate rapidly to fill pipe */
151459 +       BBR_DRAIN,      /* drain any queue created during startup */
151460 +       BBR_PROBE_BW,   /* discover, share bw: pace around estimated bw */
151461 +       BBR_PROBE_RTT,  /* cut inflight to min to probe min_rtt */
151464 +/* How does the incoming ACK stream relate to our bandwidth probing? */
151465 +enum bbr_ack_phase {
151466 +       BBR_ACKS_INIT,            /* not probing; not getting probe feedback */
151467 +       BBR_ACKS_REFILLING,       /* sending at est. bw to fill pipe */
151468 +       BBR_ACKS_PROBE_STARTING,  /* inflight rising to probe bw */
151469 +       BBR_ACKS_PROBE_FEEDBACK,  /* getting feedback from bw probing */
151470 +       BBR_ACKS_PROBE_STOPPING,  /* stopped probing; still getting feedback */
151473 +/* BBR congestion control block */
151474 +struct bbr {
151475 +       u32     min_rtt_us;             /* min RTT in min_rtt_win_sec window */
151476 +       u32     min_rtt_stamp;          /* timestamp of min_rtt_us */
151477 +       u32     probe_rtt_done_stamp;   /* end time for BBR_PROBE_RTT mode */
151478 +       u32     probe_rtt_min_us;       /* min RTT in bbr_probe_rtt_win_ms window */
151479 +       u32     probe_rtt_min_stamp;    /* timestamp of probe_rtt_min_us*/
151480 +       u32     next_rtt_delivered; /* scb->tx.delivered at end of round */
151481 +       u32     prior_rcv_nxt;  /* tp->rcv_nxt when CE state last changed */
151482 +       u64     cycle_mstamp;        /* time of this cycle phase start */
151483 +       u32     mode:3,              /* current bbr_mode in state machine */
151484 +               prev_ca_state:3,     /* CA state on previous ACK */
151485 +               packet_conservation:1,  /* use packet conservation? */
151486 +               round_start:1,       /* start of packet-timed tx->ack round? */
151487 +               ce_state:1,          /* If most recent data has CE bit set */
151488 +               bw_probe_up_rounds:5,   /* cwnd-limited rounds in PROBE_UP */
151489 +               try_fast_path:1,        /* can we take fast path? */
151490 +               unused2:11,
151491 +               idle_restart:1,      /* restarting after idle? */
151492 +               probe_rtt_round_done:1,  /* a BBR_PROBE_RTT round at 4 pkts? */
151493 +               cycle_idx:3,    /* current index in pacing_gain cycle array */
151494 +               has_seen_rtt:1;      /* have we seen an RTT sample yet? */
151495 +       u32     pacing_gain:11, /* current gain for setting pacing rate */
151496 +               cwnd_gain:11,   /* current gain for setting cwnd */
151497 +               full_bw_reached:1,   /* reached full bw in Startup? */
151498 +               full_bw_cnt:2,  /* number of rounds without large bw gains */
151499 +               init_cwnd:7;    /* initial cwnd */
151500 +       u32     prior_cwnd;     /* prior cwnd upon entering loss recovery */
151501 +       u32     full_bw;        /* recent bw, to estimate if pipe is full */
151503 +       /* For tracking ACK aggregation: */
151504 +       u64     ack_epoch_mstamp;       /* start of ACK sampling epoch */
151505 +       u16     extra_acked[2];         /* max excess data ACKed in epoch */
151506 +       u32     ack_epoch_acked:20,     /* packets (S)ACKed in sampling epoch */
151507 +               extra_acked_win_rtts:5, /* age of extra_acked, in round trips */
151508 +               extra_acked_win_idx:1,  /* current index in extra_acked array */
151509 +       /* BBR v2 state: */
151510 +               unused1:2,
151511 +               startup_ecn_rounds:2,   /* consecutive hi ECN STARTUP rounds */
151512 +               loss_in_cycle:1,        /* packet loss in this cycle? */
151513 +               ecn_in_cycle:1;         /* ECN in this cycle? */
151514 +       u32     loss_round_delivered; /* scb->tx.delivered ending loss round */
151515 +       u32     undo_bw_lo;          /* bw_lo before latest losses */
151516 +       u32     undo_inflight_lo;    /* inflight_lo before latest losses */
151517 +       u32     undo_inflight_hi;    /* inflight_hi before latest losses */
151518 +       u32     bw_latest;       /* max delivered bw in last round trip */
151519 +       u32     bw_lo;           /* lower bound on sending bandwidth */
151520 +       u32     bw_hi[2];        /* upper bound of sending bandwidth range*/
151521 +       u32     inflight_latest; /* max delivered data in last round trip */
151522 +       u32     inflight_lo;     /* lower bound of inflight data range */
151523 +       u32     inflight_hi;     /* upper bound of inflight data range */
151524 +       u32     bw_probe_up_cnt; /* packets delivered per inflight_hi incr */
151525 +       u32     bw_probe_up_acks;  /* packets (S)ACKed since inflight_hi incr */
151526 +       u32     probe_wait_us;   /* PROBE_DOWN until next clock-driven probe */
151527 +       u32     ecn_eligible:1, /* sender can use ECN (RTT, handshake)? */
151528 +               ecn_alpha:9,    /* EWMA delivered_ce/delivered; 0..256 */
151529 +               bw_probe_samples:1,    /* rate samples reflect bw probing? */
151530 +               prev_probe_too_high:1, /* did last PROBE_UP go too high? */
151531 +               stopped_risky_probe:1, /* last PROBE_UP stopped due to risk? */
151532 +               rounds_since_probe:8,  /* packet-timed rounds since probed bw */
151533 +               loss_round_start:1,    /* loss_round_delivered round trip? */
151534 +               loss_in_round:1,       /* loss marked in this round trip? */
151535 +               ecn_in_round:1,        /* ECN marked in this round trip? */
151536 +               ack_phase:3,           /* bbr_ack_phase: meaning of ACKs */
151537 +               loss_events_in_round:4,/* losses in STARTUP round */
151538 +               initialized:1;         /* has bbr_init() been called? */
151539 +       u32     alpha_last_delivered;    /* tp->delivered    at alpha update */
151540 +       u32     alpha_last_delivered_ce; /* tp->delivered_ce at alpha update */
151542 +       /* Params configurable using setsockopt. Refer to correspoding
151543 +        * module param for detailed description of params.
151544 +        */
151545 +       struct bbr_params {
151546 +               u32     high_gain:11,           /* max allowed value: 2047 */
151547 +                       drain_gain:10,          /* max allowed value: 1023 */
151548 +                       cwnd_gain:11;           /* max allowed value: 2047 */
151549 +               u32     cwnd_min_target:4,      /* max allowed value: 15 */
151550 +                       min_rtt_win_sec:5,      /* max allowed value: 31 */
151551 +                       probe_rtt_mode_ms:9,    /* max allowed value: 511 */
151552 +                       full_bw_cnt:3,          /* max allowed value: 7 */
151553 +                       cwnd_tso_budget:1,      /* allowed values: {0, 1} */
151554 +                       unused3:6,
151555 +                       drain_to_target:1,      /* boolean */
151556 +                       precise_ece_ack:1,      /* boolean */
151557 +                       extra_acked_in_startup:1, /* allowed values: {0, 1} */
151558 +                       fast_path:1;            /* boolean */
151559 +               u32     full_bw_thresh:10,      /* max allowed value: 1023 */
151560 +                       startup_cwnd_gain:11,   /* max allowed value: 2047 */
151561 +                       bw_probe_pif_gain:9,    /* max allowed value: 511 */
151562 +                       usage_based_cwnd:1,     /* boolean */
151563 +                       unused2:1;
151564 +               u16     probe_rtt_win_ms:14,    /* max allowed value: 16383 */
151565 +                       refill_add_inc:2;       /* max allowed value: 3 */
151566 +               u16     extra_acked_gain:11,    /* max allowed value: 2047 */
151567 +                       extra_acked_win_rtts:5; /* max allowed value: 31*/
151568 +               u16     pacing_gain[CYCLE_LEN]; /* max allowed value: 1023 */
151569 +               /* Mostly BBR v2 parameters below here: */
151570 +               u32     ecn_alpha_gain:8,       /* max allowed value: 255 */
151571 +                       ecn_factor:8,           /* max allowed value: 255 */
151572 +                       ecn_thresh:8,           /* max allowed value: 255 */
151573 +                       beta:8;                 /* max allowed value: 255 */
151574 +               u32     ecn_max_rtt_us:19,      /* max allowed value: 524287 */
151575 +                       bw_probe_reno_gain:9,   /* max allowed value: 511 */
151576 +                       full_loss_cnt:4;        /* max allowed value: 15 */
151577 +               u32     probe_rtt_cwnd_gain:8,  /* max allowed value: 255 */
151578 +                       inflight_headroom:8,    /* max allowed value: 255 */
151579 +                       loss_thresh:8,          /* max allowed value: 255 */
151580 +                       bw_probe_max_rounds:8;  /* max allowed value: 255 */
151581 +               u32     bw_probe_rand_rounds:4, /* max allowed value: 15 */
151582 +                       bw_probe_base_us:26,    /* usecs: 0..2^26-1 (67 secs) */
151583 +                       full_ecn_cnt:2;         /* max allowed value: 3 */
151584 +               u32     bw_probe_rand_us:26,    /* usecs: 0..2^26-1 (67 secs) */
151585 +                       undo:1,                 /* boolean */
151586 +                       tso_rtt_shift:4,        /* max allowed value: 15 */
151587 +                       unused5:1;
151588 +               u32     ecn_reprobe_gain:9,     /* max allowed value: 511 */
151589 +                       unused1:14,
151590 +                       ecn_alpha_init:9;       /* max allowed value: 256 */
151591 +       } params;
151593 +       struct {
151594 +               u32     snd_isn; /* Initial sequence number */
151595 +               u32     rs_bw;   /* last valid rate sample bw */
151596 +               u32     target_cwnd; /* target cwnd, based on BDP */
151597 +               u8      undo:1,  /* Undo even happened but not yet logged */
151598 +                       unused:7;
151599 +               char    event;   /* single-letter event debug codes */
151600 +               u16     unused2;
151601 +       } debug;
151604 +struct bbr_context {
151605 +       u32 sample_bw;
151606 +       u32 target_cwnd;
151607 +       u32 log:1;
151610 +/* Window length of min_rtt filter (in sec). Max allowed value is 31 (0x1F) */
151611 +static u32 bbr_min_rtt_win_sec = 10;
151612 +/* Minimum time (in ms) spent at bbr_cwnd_min_target in BBR_PROBE_RTT mode.
151613 + * Max allowed value is 511 (0x1FF).
151614 + */
151615 +static u32 bbr_probe_rtt_mode_ms = 200;
151616 +/* Window length of probe_rtt_min_us filter (in ms), and consequently the
151617 + * typical interval between PROBE_RTT mode entries.
151618 + * Note that bbr_probe_rtt_win_ms must be <= bbr_min_rtt_win_sec * MSEC_PER_SEC
151619 + */
151620 +static u32 bbr_probe_rtt_win_ms = 5000;
151621 +/* Skip TSO below the following bandwidth (bits/sec): */
151622 +static int bbr_min_tso_rate = 1200000;
151624 +/* Use min_rtt to help adapt TSO burst size, with smaller min_rtt resulting
151625 + * in bigger TSO bursts. By default we cut the RTT-based allowance in half
151626 + * for every 2^9 usec (aka 512 us) of RTT, so that the RTT-based allowance
151627 + * is below 1500 bytes after 6 * ~500 usec = 3ms.
151628 + */
151629 +static u32 bbr_tso_rtt_shift = 9;  /* halve allowance per 2^9 usecs, 512us */
151631 +/* Select cwnd TSO budget approach:
151632 + *  0: padding
151633 + *  1: flooring
151634 + */
151635 +static uint bbr_cwnd_tso_budget = 1;
151637 +/* Pace at ~1% below estimated bw, on average, to reduce queue at bottleneck.
151638 + * In order to help drive the network toward lower queues and low latency while
151639 + * maintaining high utilization, the average pacing rate aims to be slightly
151640 + * lower than the estimated bandwidth. This is an important aspect of the
151641 + * design.
151642 + */
151643 +static const int bbr_pacing_margin_percent = 1;
151645 +/* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain
151646 + * that will allow a smoothly increasing pacing rate that will double each RTT
151647 + * and send the same number of packets per RTT that an un-paced, slow-starting
151648 + * Reno or CUBIC flow would. Max allowed value is 2047 (0x7FF).
151649 + */
151650 +static int bbr_high_gain  = BBR_UNIT * 2885 / 1000 + 1;
151651 +/* The gain for deriving startup cwnd. Max allowed value is 2047 (0x7FF). */
151652 +static int bbr_startup_cwnd_gain  = BBR_UNIT * 2885 / 1000 + 1;
151653 +/* The pacing gain of 1/high_gain in BBR_DRAIN is calculated to typically drain
151654 + * the queue created in BBR_STARTUP in a single round. Max allowed value
151655 + * is 1023 (0x3FF).
151656 + */
151657 +static int bbr_drain_gain = BBR_UNIT * 1000 / 2885;
151658 +/* The gain for deriving steady-state cwnd tolerates delayed/stretched ACKs.
151659 + * Max allowed value is 2047 (0x7FF).
151660 + */
151661 +static int bbr_cwnd_gain  = BBR_UNIT * 2;
151662 +/* The pacing_gain values for the PROBE_BW gain cycle, to discover/share bw.
151663 + * Max allowed value for each element is 1023 (0x3FF).
151664 + */
151665 +enum bbr_pacing_gain_phase {
151666 +       BBR_BW_PROBE_UP         = 0,  /* push up inflight to probe for bw/vol */
151667 +       BBR_BW_PROBE_DOWN       = 1,  /* drain excess inflight from the queue */
151668 +       BBR_BW_PROBE_CRUISE     = 2,  /* use pipe, w/ headroom in queue/pipe */
151669 +       BBR_BW_PROBE_REFILL     = 3,  /* v2: refill the pipe again to 100% */
151671 +static int bbr_pacing_gain[] = {
151672 +       BBR_UNIT * 5 / 4,       /* probe for more available bw */
151673 +       BBR_UNIT * 3 / 4,       /* drain queue and/or yield bw to other flows */
151674 +       BBR_UNIT, BBR_UNIT, BBR_UNIT,   /* cruise at 1.0*bw to utilize pipe, */
151675 +       BBR_UNIT, BBR_UNIT, BBR_UNIT    /* without creating excess queue... */
151678 +/* Try to keep at least this many packets in flight, if things go smoothly. For
151679 + * smooth functioning, a sliding window protocol ACKing every other packet
151680 + * needs at least 4 packets in flight. Max allowed value is 15 (0xF).
151681 + */
151682 +static u32 bbr_cwnd_min_target = 4;
151684 +/* Cwnd to BDP proportion in PROBE_RTT mode scaled by BBR_UNIT. Default: 50%.
151685 + * Use 0 to disable. Max allowed value is 255.
151686 + */
151687 +static u32 bbr_probe_rtt_cwnd_gain = BBR_UNIT * 1 / 2;
151689 +/* To estimate if BBR_STARTUP mode (i.e. high_gain) has filled pipe... */
151690 +/* If bw has increased significantly (1.25x), there may be more bw available.
151691 + * Max allowed value is 1023 (0x3FF).
151692 + */
151693 +static u32 bbr_full_bw_thresh = BBR_UNIT * 5 / 4;
151694 +/* But after 3 rounds w/o significant bw growth, estimate pipe is full.
151695 + * Max allowed value is 7 (0x7).
151696 + */
151697 +static u32 bbr_full_bw_cnt = 3;
151699 +static u32 bbr_flags;          /* Debugging related stuff */
151701 +/* Whether to debug using printk.
151702 + */
151703 +static bool bbr_debug_with_printk;
151705 +/* Whether to debug using ftrace event tcp:tcp_bbr_event.
151706 + * Ignored when bbr_debug_with_printk is set.
151707 + */
151708 +static bool bbr_debug_ftrace;
151710 +/* Experiment: each cycle, try to hold sub-unity gain until inflight <= BDP. */
151711 +static bool bbr_drain_to_target = true;                /* default: enabled */
151713 +/* Experiment: Flags to control BBR with ECN behavior.
151714 + */
151715 +static bool bbr_precise_ece_ack = true;                /* default: enabled */
151717 +/* The max rwin scaling shift factor is 14 (RFC 1323), so the max sane rwin is
151718 + * (2^(16+14) B)/(1024 B/packet) = 1M packets.
151719 + */
151720 +static u32 bbr_cwnd_warn_val   = 1U << 20;
151722 +static u16 bbr_debug_port_mask;
151724 +/* BBR module parameters. These are module parameters only in Google prod.
151725 + * Upstream these are intentionally not module parameters.
151726 + */
151727 +static int bbr_pacing_gain_size = CYCLE_LEN;
151729 +/* Gain factor for adding extra_acked to target cwnd: */
151730 +static int bbr_extra_acked_gain = 256;
151732 +/* Window length of extra_acked window. Max allowed val is 31. */
151733 +static u32 bbr_extra_acked_win_rtts = 5;
151735 +/* Max allowed val for ack_epoch_acked, after which sampling epoch is reset */
151736 +static u32 bbr_ack_epoch_acked_reset_thresh = 1U << 20;
151738 +/* Time period for clamping cwnd increment due to ack aggregation */
151739 +static u32 bbr_extra_acked_max_us = 100 * 1000;
151741 +/* Use extra acked in startup ?
151742 + * 0: disabled
151743 + * 1: use latest extra_acked value from 1-2 rtt in startup
151744 + */
151745 +static int bbr_extra_acked_in_startup = 1;             /* default: enabled */
151747 +/* Experiment: don't grow cwnd beyond twice of what we just probed. */
151748 +static bool bbr_usage_based_cwnd;              /* default: disabled */
151750 +/* For lab testing, researchers can enable BBRv2 ECN support with this flag,
151751 + * when they know that any ECN marks that the connections experience will be
151752 + * DCTCP/L4S-style ECN marks, rather than RFC3168 ECN marks.
151753 + * TODO(ncardwell): Production use of the BBRv2 ECN functionality depends on
151754 + * negotiation or configuration that is outside the scope of the BBRv2
151755 + * alpha release.
151756 + */
151757 +static bool bbr_ecn_enable = false;
151759 +module_param_named(min_tso_rate,      bbr_min_tso_rate,      int,    0644);
151760 +module_param_named(tso_rtt_shift,     bbr_tso_rtt_shift,     int,    0644);
151761 +module_param_named(high_gain,         bbr_high_gain,         int,    0644);
151762 +module_param_named(drain_gain,        bbr_drain_gain,        int,    0644);
151763 +module_param_named(startup_cwnd_gain, bbr_startup_cwnd_gain, int,    0644);
151764 +module_param_named(cwnd_gain,         bbr_cwnd_gain,         int,    0644);
151765 +module_param_array_named(pacing_gain, bbr_pacing_gain,       int,
151766 +                        &bbr_pacing_gain_size, 0644);
151767 +module_param_named(cwnd_min_target,   bbr_cwnd_min_target,   uint,   0644);
151768 +module_param_named(probe_rtt_cwnd_gain,
151769 +                  bbr_probe_rtt_cwnd_gain,                  uint,   0664);
151770 +module_param_named(cwnd_warn_val,     bbr_cwnd_warn_val,     uint,   0664);
151771 +module_param_named(debug_port_mask,   bbr_debug_port_mask,   ushort, 0644);
151772 +module_param_named(flags,             bbr_flags,             uint,   0644);
151773 +module_param_named(debug_ftrace,      bbr_debug_ftrace, bool,   0644);
151774 +module_param_named(debug_with_printk, bbr_debug_with_printk, bool,   0644);
151775 +module_param_named(min_rtt_win_sec,   bbr_min_rtt_win_sec,   uint,   0644);
151776 +module_param_named(probe_rtt_mode_ms, bbr_probe_rtt_mode_ms, uint,   0644);
151777 +module_param_named(probe_rtt_win_ms,  bbr_probe_rtt_win_ms,  uint,   0644);
151778 +module_param_named(full_bw_thresh,    bbr_full_bw_thresh,    uint,   0644);
151779 +module_param_named(full_bw_cnt,       bbr_full_bw_cnt,       uint,   0644);
151780 +module_param_named(cwnd_tso_bduget,   bbr_cwnd_tso_budget,   uint,   0664);
151781 +module_param_named(extra_acked_gain,  bbr_extra_acked_gain,  int,    0664);
151782 +module_param_named(extra_acked_win_rtts,
151783 +                  bbr_extra_acked_win_rtts, uint,   0664);
151784 +module_param_named(extra_acked_max_us,
151785 +                  bbr_extra_acked_max_us, uint,   0664);
151786 +module_param_named(ack_epoch_acked_reset_thresh,
151787 +                  bbr_ack_epoch_acked_reset_thresh, uint,   0664);
151788 +module_param_named(drain_to_target,   bbr_drain_to_target,   bool,   0664);
151789 +module_param_named(precise_ece_ack,   bbr_precise_ece_ack,   bool,   0664);
151790 +module_param_named(extra_acked_in_startup,
151791 +                  bbr_extra_acked_in_startup, int, 0664);
151792 +module_param_named(usage_based_cwnd, bbr_usage_based_cwnd, bool,   0664);
151793 +module_param_named(ecn_enable,       bbr_ecn_enable,         bool,   0664);
151795 +static void bbr2_exit_probe_rtt(struct sock *sk);
151796 +static void bbr2_reset_congestion_signals(struct sock *sk);
151798 +static void bbr_check_probe_rtt_done(struct sock *sk);
151800 +/* Do we estimate that STARTUP filled the pipe? */
151801 +static bool bbr_full_bw_reached(const struct sock *sk)
151803 +       const struct bbr *bbr = inet_csk_ca(sk);
151805 +       return bbr->full_bw_reached;
151808 +/* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
151809 +static u32 bbr_max_bw(const struct sock *sk)
151811 +       struct bbr *bbr = inet_csk_ca(sk);
151813 +       return max(bbr->bw_hi[0], bbr->bw_hi[1]);
151816 +/* Return the estimated bandwidth of the path, in pkts/uS << BW_SCALE. */
151817 +static u32 bbr_bw(const struct sock *sk)
151819 +       struct bbr *bbr = inet_csk_ca(sk);
151821 +       return min(bbr_max_bw(sk), bbr->bw_lo);
151824 +/* Return maximum extra acked in past k-2k round trips,
151825 + * where k = bbr_extra_acked_win_rtts.
151826 + */
151827 +static u16 bbr_extra_acked(const struct sock *sk)
151829 +       struct bbr *bbr = inet_csk_ca(sk);
151831 +       return max(bbr->extra_acked[0], bbr->extra_acked[1]);
151834 +/* Return rate in bytes per second, optionally with a gain.
151835 + * The order here is chosen carefully to avoid overflow of u64. This should
151836 + * work for input rates of up to 2.9Tbit/sec and gain of 2.89x.
151837 + */
151838 +static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain,
151839 +                                 int margin)
151841 +       unsigned int mss = tcp_sk(sk)->mss_cache;
151843 +       rate *= mss;
151844 +       rate *= gain;
151845 +       rate >>= BBR_SCALE;
151846 +       rate *= USEC_PER_SEC / 100 * (100 - margin);
151847 +       rate >>= BW_SCALE;
151848 +       rate = max(rate, 1ULL);
151849 +       return rate;
151852 +static u64 bbr_bw_bytes_per_sec(struct sock *sk, u64 rate)
151854 +       return bbr_rate_bytes_per_sec(sk, rate, BBR_UNIT, 0);
151857 +static u64 bbr_rate_kbps(struct sock *sk, u64 rate)
151859 +       rate = bbr_bw_bytes_per_sec(sk, rate);
151860 +       rate *= 8;
151861 +       do_div(rate, 1000);
151862 +       return rate;
151865 +static u32 bbr_tso_segs_goal(struct sock *sk);
151866 +static void bbr_debug(struct sock *sk, u32 acked,
151867 +                     const struct rate_sample *rs, struct bbr_context *ctx)
151869 +       static const char ca_states[] = {
151870 +               [TCP_CA_Open]           = 'O',
151871 +               [TCP_CA_Disorder]       = 'D',
151872 +               [TCP_CA_CWR]            = 'C',
151873 +               [TCP_CA_Recovery]       = 'R',
151874 +               [TCP_CA_Loss]           = 'L',
151875 +       };
151876 +       static const char mode[] = {
151877 +               'G',  /* Growing   - BBR_STARTUP */
151878 +               'D',  /* Drain     - BBR_DRAIN */
151879 +               'W',  /* Window    - BBR_PROBE_BW */
151880 +               'M',  /* Min RTT   - BBR_PROBE_RTT */
151881 +       };
151882 +       static const char ack_phase[] = { /* bbr_ack_phase strings */
151883 +               'I',    /* BBR_ACKS_INIT           - 'Init' */
151884 +               'R',    /* BBR_ACKS_REFILLING      - 'Refilling' */
151885 +               'B',    /* BBR_ACKS_PROBE_STARTING - 'Before' */
151886 +               'F',    /* BBR_ACKS_PROBE_FEEDBACK - 'Feedback' */
151887 +               'A',    /* BBR_ACKS_PROBE_STOPPING - 'After' */
151888 +       };
151889 +       struct tcp_sock *tp = tcp_sk(sk);
151890 +       struct bbr *bbr = inet_csk_ca(sk);
151891 +       const u32 una = tp->snd_una - bbr->debug.snd_isn;
151892 +       const u32 fack = tcp_highest_sack_seq(tp);
151893 +       const u16 dport = ntohs(inet_sk(sk)->inet_dport);
151894 +       bool is_port_match = (bbr_debug_port_mask &&
151895 +                             ((dport & bbr_debug_port_mask) == 0));
151896 +       char debugmsg[320];
151898 +       if (sk->sk_state == TCP_SYN_SENT)
151899 +               return;  /* no bbr_init() yet if SYN retransmit -> CA_Loss */
151901 +       if (!tp->snd_cwnd || tp->snd_cwnd > bbr_cwnd_warn_val) {
151902 +               char addr[INET6_ADDRSTRLEN + 10] = { 0 };
151904 +               if (sk->sk_family == AF_INET)
151905 +                       snprintf(addr, sizeof(addr), "%pI4:%u",
151906 +                                &inet_sk(sk)->inet_daddr, dport);
151907 +               else if (sk->sk_family == AF_INET6)
151908 +                       snprintf(addr, sizeof(addr), "%pI6:%u",
151909 +                                &sk->sk_v6_daddr, dport);
151911 +               WARN_ONCE(1,
151912 +                       "BBR %s cwnd alert: %u "
151913 +                       "snd_una: %u ca: %d pacing_gain: %u cwnd_gain: %u "
151914 +                       "bw: %u rtt: %u min_rtt: %u "
151915 +                       "acked: %u tso_segs: %u "
151916 +                       "bw: %d %ld %d pif: %u\n",
151917 +                       addr, tp->snd_cwnd,
151918 +                       una, inet_csk(sk)->icsk_ca_state,
151919 +                       bbr->pacing_gain, bbr->cwnd_gain,
151920 +                       bbr_max_bw(sk), (tp->srtt_us >> 3), bbr->min_rtt_us,
151921 +                       acked, bbr_tso_segs_goal(sk),
151922 +                       rs->delivered, rs->interval_us, rs->is_retrans,
151923 +                       tcp_packets_in_flight(tp));
151924 +       }
151926 +       if (likely(!bbr_debug_with_printk && !bbr_debug_ftrace))
151927 +               return;
151929 +       if (!sock_flag(sk, SOCK_DBG) && !is_port_match)
151930 +               return;
151932 +       if (!ctx->log && !tp->app_limited && !(bbr_flags & FLAG_DEBUG_VERBOSE))
151933 +               return;
151935 +       if (ipv4_is_loopback(inet_sk(sk)->inet_daddr) &&
151936 +           !(bbr_flags & FLAG_DEBUG_LOOPBACK))
151937 +               return;
151939 +       snprintf(debugmsg, sizeof(debugmsg) - 1,
151940 +                "BBR %pI4:%-5u %5u,%03u:%-7u %c "
151941 +                "%c %2u br %2u cr %2d rtt %5ld d %2d i %5ld mrtt %d %cbw %llu "
151942 +                "bw %llu lb %llu ib %llu qb %llu "
151943 +                "a %u if %2u %c %c dl %u l %u al %u # %u t %u %c %c "
151944 +                "lr %d er %d ea %d bwl %lld il %d ih %d c %d "
151945 +                "v %d %c %u %c %s\n",
151946 +                &inet_sk(sk)->inet_daddr, dport,
151947 +                una / 1000, una % 1000, fack - tp->snd_una,
151948 +                ca_states[inet_csk(sk)->icsk_ca_state],
151949 +                bbr->debug.undo ? '@' : mode[bbr->mode],
151950 +                tp->snd_cwnd,
151951 +                bbr_extra_acked(sk),   /* br (legacy): extra_acked */
151952 +                rs->tx_in_flight,      /* cr (legacy): tx_inflight */
151953 +                rs->rtt_us,
151954 +                rs->delivered,
151955 +                rs->interval_us,
151956 +                bbr->min_rtt_us,
151957 +                rs->is_app_limited ? '_' : 'l',
151958 +                bbr_rate_kbps(sk, ctx->sample_bw), /* lbw: latest sample bw */
151959 +                bbr_rate_kbps(sk, bbr_max_bw(sk)), /* bw: max bw */
151960 +                0ULL,                              /* lb: [obsolete] */
151961 +                0ULL,                              /* ib: [obsolete] */
151962 +                (u64)sk->sk_pacing_rate * 8 / 1000,
151963 +                acked,
151964 +                tcp_packets_in_flight(tp),
151965 +                rs->is_ack_delayed ? 'd' : '.',
151966 +                bbr->round_start ? '*' : '.',
151967 +                tp->delivered, tp->lost,
151968 +                tp->app_limited,
151969 +                0,                                 /* #: [obsolete] */
151970 +                ctx->target_cwnd,
151971 +                tp->reord_seen ? 'r' : '.',  /* r: reordering seen? */
151972 +                ca_states[bbr->prev_ca_state],
151973 +                (rs->lost + rs->delivered) > 0 ?
151974 +                (1000 * rs->lost /
151975 +                 (rs->lost + rs->delivered)) : 0,    /* lr: loss rate x1000 */
151976 +                (rs->delivered) > 0 ?
151977 +                (1000 * rs->delivered_ce /
151978 +                 (rs->delivered)) : 0,               /* er: ECN rate x1000 */
151979 +                1000 * bbr->ecn_alpha >> BBR_SCALE,  /* ea: ECN alpha x1000 */
151980 +                bbr->bw_lo == ~0U ?
151981 +                  -1 : (s64)bbr_rate_kbps(sk, bbr->bw_lo), /* bwl */
151982 +                bbr->inflight_lo,      /* il */
151983 +                bbr->inflight_hi,      /* ih */
151984 +                bbr->bw_probe_up_cnt,  /* c */
151985 +                2,                     /* v: version */
151986 +                bbr->debug.event,
151987 +                bbr->cycle_idx,
151988 +                ack_phase[bbr->ack_phase],
151989 +                bbr->bw_probe_samples ? "Y" : "N");
151990 +       debugmsg[sizeof(debugmsg) - 1] = 0;
151992 +       /* printk takes a higher precedence. */
151993 +       if (bbr_debug_with_printk)
151994 +               printk(KERN_DEBUG "%s", debugmsg);
151996 +       if (unlikely(bbr->debug.undo))
151997 +               bbr->debug.undo = 0;
152000 +/* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
152001 +static unsigned long bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
152003 +       u64 rate = bw;
152005 +       rate = bbr_rate_bytes_per_sec(sk, rate, gain,
152006 +                                     bbr_pacing_margin_percent);
152007 +       rate = min_t(u64, rate, sk->sk_max_pacing_rate);
152008 +       return rate;
152011 +/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
152012 +static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
152014 +       struct tcp_sock *tp = tcp_sk(sk);
152015 +       struct bbr *bbr = inet_csk_ca(sk);
152016 +       u64 bw;
152017 +       u32 rtt_us;
152019 +       if (tp->srtt_us) {              /* any RTT sample yet? */
152020 +               rtt_us = max(tp->srtt_us >> 3, 1U);
152021 +               bbr->has_seen_rtt = 1;
152022 +       } else {                         /* no RTT sample yet */
152023 +               rtt_us = USEC_PER_MSEC;  /* use nominal default RTT */
152024 +       }
152025 +       bw = (u64)tp->snd_cwnd * BW_UNIT;
152026 +       do_div(bw, rtt_us);
152027 +       sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr->params.high_gain);
152030 +/* Pace using current bw estimate and a gain factor. */
152031 +static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
152033 +       struct tcp_sock *tp = tcp_sk(sk);
152034 +       struct bbr *bbr = inet_csk_ca(sk);
152035 +       unsigned long rate = bbr_bw_to_pacing_rate(sk, bw, gain);
152037 +       if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
152038 +               bbr_init_pacing_rate_from_rtt(sk);
152039 +       if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
152040 +               sk->sk_pacing_rate = rate;
152043 +static u32 bbr_min_tso_segs(struct sock *sk)
152045 +       return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
152048 +/* Return the number of segments BBR would like in a TSO/GSO skb, given
152049 + * a particular max gso size as a constraint.
152050 + */
152051 +static u32 bbr_tso_segs_generic(struct sock *sk, unsigned int mss_now,
152052 +                               u32 gso_max_size)
152054 +       struct bbr *bbr = inet_csk_ca(sk);
152055 +       u32 segs, r;
152056 +       u64 bytes;
152058 +       /* Budget a TSO/GSO burst size allowance based on bw (pacing_rate). */
152059 +       bytes = sk->sk_pacing_rate >> sk->sk_pacing_shift;
152061 +       /* Budget a TSO/GSO burst size allowance based on min_rtt. For every
152062 +        * K = 2^tso_rtt_shift microseconds of min_rtt, halve the burst.
152063 +        * The min_rtt-based burst allowance is: 64 KBytes / 2^(min_rtt/K)
152064 +        */
152065 +       if (bbr->params.tso_rtt_shift) {
152066 +               r = bbr->min_rtt_us >> bbr->params.tso_rtt_shift;
152067 +               if (r < BITS_PER_TYPE(u32))   /* prevent undefined behavior */
152068 +                       bytes += GSO_MAX_SIZE >> r;
152069 +       }
152071 +       bytes = min_t(u32, bytes, gso_max_size - 1 - MAX_TCP_HEADER);
152072 +       segs = max_t(u32, bytes / mss_now, bbr_min_tso_segs(sk));
152073 +       return segs;
152076 +/* Custom tcp_tso_autosize() for BBR, used at transmit time to cap skb size. */
152077 +static u32  bbr_tso_segs(struct sock *sk, unsigned int mss_now)
152079 +       return bbr_tso_segs_generic(sk, mss_now, sk->sk_gso_max_size);
152082 +/* Like bbr_tso_segs(), using mss_cache, ignoring driver's sk_gso_max_size. */
152083 +static u32 bbr_tso_segs_goal(struct sock *sk)
152085 +       struct tcp_sock *tp = tcp_sk(sk);
152087 +       return  bbr_tso_segs_generic(sk, tp->mss_cache, GSO_MAX_SIZE);
152090 +/* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
152091 +static void bbr_save_cwnd(struct sock *sk)
152093 +       struct tcp_sock *tp = tcp_sk(sk);
152094 +       struct bbr *bbr = inet_csk_ca(sk);
152096 +       if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT)
152097 +               bbr->prior_cwnd = tp->snd_cwnd;  /* this cwnd is good enough */
152098 +       else  /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */
152099 +               bbr->prior_cwnd = max(bbr->prior_cwnd, tp->snd_cwnd);
152102 +static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
152104 +       struct tcp_sock *tp = tcp_sk(sk);
152105 +       struct bbr *bbr = inet_csk_ca(sk);
152107 +       if (event == CA_EVENT_TX_START && tp->app_limited) {
152108 +               bbr->idle_restart = 1;
152109 +               bbr->ack_epoch_mstamp = tp->tcp_mstamp;
152110 +               bbr->ack_epoch_acked = 0;
152111 +               /* Avoid pointless buffer overflows: pace at est. bw if we don't
152112 +                * need more speed (we're restarting from idle and app-limited).
152113 +                */
152114 +               if (bbr->mode == BBR_PROBE_BW)
152115 +                       bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT);
152116 +               else if (bbr->mode == BBR_PROBE_RTT)
152117 +                       bbr_check_probe_rtt_done(sk);
152118 +       } else if ((event == CA_EVENT_ECN_IS_CE ||
152119 +                   event == CA_EVENT_ECN_NO_CE) &&
152120 +                   bbr_ecn_enable &&
152121 +                   bbr->params.precise_ece_ack) {
152122 +               u32 state = bbr->ce_state;
152123 +               dctcp_ece_ack_update(sk, event, &bbr->prior_rcv_nxt, &state);
152124 +               bbr->ce_state = state;
152125 +               if (tp->fast_ack_mode == 2 && event == CA_EVENT_ECN_IS_CE)
152126 +                       tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
152127 +       }
152130 +/* Calculate bdp based on min RTT and the estimated bottleneck bandwidth:
152132 + * bdp = ceil(bw * min_rtt * gain)
152134 + * The key factor, gain, controls the amount of queue. While a small gain
152135 + * builds a smaller queue, it becomes more vulnerable to noise in RTT
152136 + * measurements (e.g., delayed ACKs or other ACK compression effects). This
152137 + * noise may cause BBR to under-estimate the rate.
152138 + */
152139 +static u32 bbr_bdp(struct sock *sk, u32 bw, int gain)
152141 +       struct bbr *bbr = inet_csk_ca(sk);
152142 +       u32 bdp;
152143 +       u64 w;
152145 +       /* If we've never had a valid RTT sample, cap cwnd at the initial
152146 +        * default. This should only happen when the connection is not using TCP
152147 +        * timestamps and has retransmitted all of the SYN/SYNACK/data packets
152148 +        * ACKed so far. In this case, an RTO can cut cwnd to 1, in which
152149 +        * case we need to slow-start up toward something safe: initial cwnd.
152150 +        */
152151 +       if (unlikely(bbr->min_rtt_us == ~0U))    /* no valid RTT samples yet? */
152152 +               return bbr->init_cwnd;  /* be safe: cap at initial cwnd */
152154 +       w = (u64)bw * bbr->min_rtt_us;
152156 +       /* Apply a gain to the given value, remove the BW_SCALE shift, and
152157 +        * round the value up to avoid a negative feedback loop.
152158 +        */
152159 +       bdp = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
152161 +       return bdp;
152164 +/* To achieve full performance in high-speed paths, we budget enough cwnd to
152165 + * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
152166 + *   - one skb in sending host Qdisc,
152167 + *   - one skb in sending host TSO/GSO engine
152168 + *   - one skb being received by receiver host LRO/GRO/delayed-ACK engine
152169 + * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
152170 + * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
152171 + * which allows 2 outstanding 2-packet sequences, to try to keep pipe
152172 + * full even with ACK-every-other-packet delayed ACKs.
152173 + */
152174 +static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd)
152176 +       struct bbr *bbr = inet_csk_ca(sk);
152177 +       u32 tso_segs_goal;
152179 +       tso_segs_goal = 3 * bbr_tso_segs_goal(sk);
152181 +       /* Allow enough full-sized skbs in flight to utilize end systems. */
152182 +       if (bbr->params.cwnd_tso_budget == 1) {
152183 +               cwnd = max_t(u32, cwnd, tso_segs_goal);
152184 +               cwnd = max_t(u32, cwnd, bbr->params.cwnd_min_target);
152185 +       } else {
152186 +               cwnd += tso_segs_goal;
152187 +               cwnd = (cwnd + 1) & ~1U;
152188 +       }
152189 +       /* Ensure gain cycling gets inflight above BDP even for small BDPs. */
152190 +       if (bbr->mode == BBR_PROBE_BW && bbr->cycle_idx == BBR_BW_PROBE_UP)
152191 +               cwnd += 2;
152193 +       return cwnd;
152196 +/* Find inflight based on min RTT and the estimated bottleneck bandwidth. */
152197 +static u32 bbr_inflight(struct sock *sk, u32 bw, int gain)
152199 +       u32 inflight;
152201 +       inflight = bbr_bdp(sk, bw, gain);
152202 +       inflight = bbr_quantization_budget(sk, inflight);
152204 +       return inflight;
152207 +/* With pacing at lower layers, there's often less data "in the network" than
152208 + * "in flight". With TSQ and departure time pacing at lower layers (e.g. fq),
152209 + * we often have several skbs queued in the pacing layer with a pre-scheduled
152210 + * earliest departure time (EDT). BBR adapts its pacing rate based on the
152211 + * inflight level that it estimates has already been "baked in" by previous
152212 + * departure time decisions. We calculate a rough estimate of the number of our
152213 + * packets that might be in the network at the earliest departure time for the
152214 + * next skb scheduled:
152215 + *   in_network_at_edt = inflight_at_edt - (EDT - now) * bw
152216 + * If we're increasing inflight, then we want to know if the transmit of the
152217 + * EDT skb will push inflight above the target, so inflight_at_edt includes
152218 + * bbr_tso_segs_goal() from the skb departing at EDT. If decreasing inflight,
152219 + * then estimate if inflight will sink too low just before the EDT transmit.
152220 + */
152221 +static u32 bbr_packets_in_net_at_edt(struct sock *sk, u32 inflight_now)
152223 +       struct tcp_sock *tp = tcp_sk(sk);
152224 +       struct bbr *bbr = inet_csk_ca(sk);
152225 +       u64 now_ns, edt_ns, interval_us;
152226 +       u32 interval_delivered, inflight_at_edt;
152228 +       now_ns = tp->tcp_clock_cache;
152229 +       edt_ns = max(tp->tcp_wstamp_ns, now_ns);
152230 +       interval_us = div_u64(edt_ns - now_ns, NSEC_PER_USEC);
152231 +       interval_delivered = (u64)bbr_bw(sk) * interval_us >> BW_SCALE;
152232 +       inflight_at_edt = inflight_now;
152233 +       if (bbr->pacing_gain > BBR_UNIT)              /* increasing inflight */
152234 +               inflight_at_edt += bbr_tso_segs_goal(sk);  /* include EDT skb */
152235 +       if (interval_delivered >= inflight_at_edt)
152236 +               return 0;
152237 +       return inflight_at_edt - interval_delivered;
152240 +/* Find the cwnd increment based on estimate of ack aggregation */
152241 +static u32 bbr_ack_aggregation_cwnd(struct sock *sk)
152243 +       struct bbr *bbr = inet_csk_ca(sk);
152244 +       u32 max_aggr_cwnd, aggr_cwnd = 0;
152246 +       if (bbr->params.extra_acked_gain &&
152247 +           (bbr_full_bw_reached(sk) || bbr->params.extra_acked_in_startup)) {
152248 +               max_aggr_cwnd = ((u64)bbr_bw(sk) * bbr_extra_acked_max_us)
152249 +                               / BW_UNIT;
152250 +               aggr_cwnd = (bbr->params.extra_acked_gain * bbr_extra_acked(sk))
152251 +                            >> BBR_SCALE;
152252 +               aggr_cwnd = min(aggr_cwnd, max_aggr_cwnd);
152253 +       }
152255 +       return aggr_cwnd;
152258 +/* Returns the cwnd for PROBE_RTT mode. */
152259 +static u32 bbr_probe_rtt_cwnd(struct sock *sk)
152261 +       struct bbr *bbr = inet_csk_ca(sk);
152263 +       if (bbr->params.probe_rtt_cwnd_gain == 0)
152264 +               return bbr->params.cwnd_min_target;
152265 +       return max_t(u32, bbr->params.cwnd_min_target,
152266 +                    bbr_bdp(sk, bbr_bw(sk), bbr->params.probe_rtt_cwnd_gain));
152269 +/* Slow-start up toward target cwnd (if bw estimate is growing, or packet loss
152270 + * has drawn us down below target), or snap down to target if we're above it.
152271 + */
152272 +static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
152273 +                        u32 acked, u32 bw, int gain, u32 cwnd,
152274 +                        struct bbr_context *ctx)
152276 +       struct tcp_sock *tp = tcp_sk(sk);
152277 +       struct bbr *bbr = inet_csk_ca(sk);
152278 +       u32 target_cwnd = 0, prev_cwnd = tp->snd_cwnd, max_probe;
152280 +       if (!acked)
152281 +               goto done;  /* no packet fully ACKed; just apply caps */
152283 +       target_cwnd = bbr_bdp(sk, bw, gain);
152285 +       /* Increment the cwnd to account for excess ACKed data that seems
152286 +        * due to aggregation (of data and/or ACKs) visible in the ACK stream.
152287 +        */
152288 +       target_cwnd += bbr_ack_aggregation_cwnd(sk);
152289 +       target_cwnd = bbr_quantization_budget(sk, target_cwnd);
152291 +       /* If we're below target cwnd, slow start cwnd toward target cwnd. */
152292 +       bbr->debug.target_cwnd = target_cwnd;
152294 +       /* Update cwnd and enable fast path if cwnd reaches target_cwnd. */
152295 +       bbr->try_fast_path = 0;
152296 +       if (bbr_full_bw_reached(sk)) { /* only cut cwnd if we filled the pipe */
152297 +               cwnd += acked;
152298 +               if (cwnd >= target_cwnd) {
152299 +                       cwnd = target_cwnd;
152300 +                       bbr->try_fast_path = 1;
152301 +               }
152302 +       } else if (cwnd < target_cwnd || cwnd  < 2 * bbr->init_cwnd) {
152303 +               cwnd += acked;
152304 +       } else {
152305 +               bbr->try_fast_path = 1;
152306 +       }
152308 +       /* When growing cwnd, don't grow beyond twice what we just probed. */
152309 +       if (bbr->params.usage_based_cwnd) {
152310 +               max_probe = max(2 * tp->max_packets_out, tp->snd_cwnd);
152311 +               cwnd = min(cwnd, max_probe);
152312 +       }
152314 +       cwnd = max_t(u32, cwnd, bbr->params.cwnd_min_target);
152315 +done:
152316 +       tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);   /* apply global cap */
152317 +       if (bbr->mode == BBR_PROBE_RTT)  /* drain queue, refresh min_rtt */
152318 +               tp->snd_cwnd = min_t(u32, tp->snd_cwnd, bbr_probe_rtt_cwnd(sk));
152320 +       ctx->target_cwnd = target_cwnd;
152321 +       ctx->log = (tp->snd_cwnd != prev_cwnd);
152324 +/* See if we have reached next round trip */
152325 +static void bbr_update_round_start(struct sock *sk,
152326 +               const struct rate_sample *rs, struct bbr_context *ctx)
152328 +       struct tcp_sock *tp = tcp_sk(sk);
152329 +       struct bbr *bbr = inet_csk_ca(sk);
152331 +       bbr->round_start = 0;
152333 +       /* See if we've reached the next RTT */
152334 +       if (rs->interval_us > 0 &&
152335 +           !before(rs->prior_delivered, bbr->next_rtt_delivered)) {
152336 +               bbr->next_rtt_delivered = tp->delivered;
152337 +               bbr->round_start = 1;
152338 +       }
152341 +/* Calculate the bandwidth based on how fast packets are delivered */
152342 +static void bbr_calculate_bw_sample(struct sock *sk,
152343 +                       const struct rate_sample *rs, struct bbr_context *ctx)
152345 +       struct bbr *bbr = inet_csk_ca(sk);
152346 +       u64 bw = 0;
152348 +       /* Divide delivered by the interval to find a (lower bound) bottleneck
152349 +        * bandwidth sample. Delivered is in packets and interval_us in uS and
152350 +        * ratio will be <<1 for most connections. So delivered is first scaled.
152351 +        * Round up to allow growth at low rates, even with integer division.
152352 +        */
152353 +       if (rs->interval_us > 0) {
152354 +               if (WARN_ONCE(rs->delivered < 0,
152355 +                             "negative delivered: %d interval_us: %ld\n",
152356 +                             rs->delivered, rs->interval_us))
152357 +                       return;
152359 +               bw = DIV_ROUND_UP_ULL((u64)rs->delivered * BW_UNIT, rs->interval_us);
152360 +       }
152362 +       ctx->sample_bw = bw;
152363 +       bbr->debug.rs_bw = bw;
152366 +/* Estimates the windowed max degree of ack aggregation.
152367 + * This is used to provision extra in-flight data to keep sending during
152368 + * inter-ACK silences.
152370 + * Degree of ack aggregation is estimated as extra data acked beyond expected.
152372 + * max_extra_acked = "maximum recent excess data ACKed beyond max_bw * interval"
152373 + * cwnd += max_extra_acked
152375 + * Max extra_acked is clamped by cwnd and bw * bbr_extra_acked_max_us (100 ms).
152376 + * Max filter is an approximate sliding window of 5-10 (packet timed) round
152377 + * trips for non-startup phase, and 1-2 round trips for startup.
152378 + */
152379 +static void bbr_update_ack_aggregation(struct sock *sk,
152380 +                                      const struct rate_sample *rs)
152382 +       u32 epoch_us, expected_acked, extra_acked;
152383 +       struct bbr *bbr = inet_csk_ca(sk);
152384 +       struct tcp_sock *tp = tcp_sk(sk);
152385 +       u32 extra_acked_win_rtts_thresh = bbr->params.extra_acked_win_rtts;
152387 +       if (!bbr->params.extra_acked_gain || rs->acked_sacked <= 0 ||
152388 +           rs->delivered < 0 || rs->interval_us <= 0)
152389 +               return;
152391 +       if (bbr->round_start) {
152392 +               bbr->extra_acked_win_rtts = min(0x1F,
152393 +                                               bbr->extra_acked_win_rtts + 1);
152394 +               if (bbr->params.extra_acked_in_startup &&
152395 +                   !bbr_full_bw_reached(sk))
152396 +                       extra_acked_win_rtts_thresh = 1;
152397 +               if (bbr->extra_acked_win_rtts >=
152398 +                   extra_acked_win_rtts_thresh) {
152399 +                       bbr->extra_acked_win_rtts = 0;
152400 +                       bbr->extra_acked_win_idx = bbr->extra_acked_win_idx ?
152401 +                                                  0 : 1;
152402 +                       bbr->extra_acked[bbr->extra_acked_win_idx] = 0;
152403 +               }
152404 +       }
152406 +       /* Compute how many packets we expected to be delivered over epoch. */
152407 +       epoch_us = tcp_stamp_us_delta(tp->delivered_mstamp,
152408 +                                     bbr->ack_epoch_mstamp);
152409 +       expected_acked = ((u64)bbr_bw(sk) * epoch_us) / BW_UNIT;
152411 +       /* Reset the aggregation epoch if ACK rate is below expected rate or
152412 +        * significantly large no. of ack received since epoch (potentially
152413 +        * quite old epoch).
152414 +        */
152415 +       if (bbr->ack_epoch_acked <= expected_acked ||
152416 +           (bbr->ack_epoch_acked + rs->acked_sacked >=
152417 +            bbr_ack_epoch_acked_reset_thresh)) {
152418 +               bbr->ack_epoch_acked = 0;
152419 +               bbr->ack_epoch_mstamp = tp->delivered_mstamp;
152420 +               expected_acked = 0;
152421 +       }
152423 +       /* Compute excess data delivered, beyond what was expected. */
152424 +       bbr->ack_epoch_acked = min_t(u32, 0xFFFFF,
152425 +                                  bbr->ack_epoch_acked + rs->acked_sacked);
152426 +       extra_acked = bbr->ack_epoch_acked - expected_acked;
152427 +       extra_acked = min(extra_acked, tp->snd_cwnd);
152428 +       if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx])
152429 +               bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked;
152432 +/* Estimate when the pipe is full, using the change in delivery rate: BBR
152433 + * estimates that STARTUP filled the pipe if the estimated bw hasn't changed by
152434 + * at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited
152435 + * rounds. Why 3 rounds: 1: rwin autotuning grows the rwin, 2: we fill the
152436 + * higher rwin, 3: we get higher delivery rate samples. Or transient
152437 + * cross-traffic or radio noise can go away. CUBIC Hystart shares a similar
152438 + * design goal, but uses delay and inter-ACK spacing instead of bandwidth.
152439 + */
152440 +static void bbr_check_full_bw_reached(struct sock *sk,
152441 +                                     const struct rate_sample *rs)
152443 +       struct bbr *bbr = inet_csk_ca(sk);
152444 +       u32 bw_thresh;
152446 +       if (bbr_full_bw_reached(sk) || !bbr->round_start || rs->is_app_limited)
152447 +               return;
152449 +       bw_thresh = (u64)bbr->full_bw * bbr->params.full_bw_thresh >> BBR_SCALE;
152450 +       if (bbr_max_bw(sk) >= bw_thresh) {
152451 +               bbr->full_bw = bbr_max_bw(sk);
152452 +               bbr->full_bw_cnt = 0;
152453 +               return;
152454 +       }
152455 +       ++bbr->full_bw_cnt;
152456 +       bbr->full_bw_reached = bbr->full_bw_cnt >= bbr->params.full_bw_cnt;
152459 +/* If pipe is probably full, drain the queue and then enter steady-state. */
152460 +static bool bbr_check_drain(struct sock *sk, const struct rate_sample *rs,
152461 +                           struct bbr_context *ctx)
152463 +       struct bbr *bbr = inet_csk_ca(sk);
152465 +       if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) {
152466 +               bbr->mode = BBR_DRAIN;  /* drain queue we created */
152467 +               tcp_sk(sk)->snd_ssthresh =
152468 +                               bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT);
152469 +               bbr2_reset_congestion_signals(sk);
152470 +       }       /* fall through to check if in-flight is already small: */
152471 +       if (bbr->mode == BBR_DRAIN &&
152472 +           bbr_packets_in_net_at_edt(sk, tcp_packets_in_flight(tcp_sk(sk))) <=
152473 +           bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT))
152474 +               return true;  /* exiting DRAIN now */
152475 +       return false;
152478 +static void bbr_check_probe_rtt_done(struct sock *sk)
152480 +       struct tcp_sock *tp = tcp_sk(sk);
152481 +       struct bbr *bbr = inet_csk_ca(sk);
152483 +       if (!(bbr->probe_rtt_done_stamp &&
152484 +             after(tcp_jiffies32, bbr->probe_rtt_done_stamp)))
152485 +               return;
152487 +       bbr->probe_rtt_min_stamp = tcp_jiffies32; /* schedule next PROBE_RTT */
152488 +       tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd);
152489 +       bbr2_exit_probe_rtt(sk);
152492 +/* The goal of PROBE_RTT mode is to have BBR flows cooperatively and
152493 + * periodically drain the bottleneck queue, to converge to measure the true
152494 + * min_rtt (unloaded propagation delay). This allows the flows to keep queues
152495 + * small (reducing queuing delay and packet loss) and achieve fairness among
152496 + * BBR flows.
152498 + * The min_rtt filter window is 10 seconds. When the min_rtt estimate expires,
152499 + * we enter PROBE_RTT mode and cap the cwnd at bbr_cwnd_min_target=4 packets.
152500 + * After at least bbr_probe_rtt_mode_ms=200ms and at least one packet-timed
152501 + * round trip elapsed with that flight size <= 4, we leave PROBE_RTT mode and
152502 + * re-enter the previous mode. BBR uses 200ms to approximately bound the
152503 + * performance penalty of PROBE_RTT's cwnd capping to roughly 2% (200ms/10s).
152505 + * Note that flows need only pay 2% if they are busy sending over the last 10
152506 + * seconds. Interactive applications (e.g., Web, RPCs, video chunks) often have
152507 + * natural silences or low-rate periods within 10 seconds where the rate is low
152508 + * enough for long enough to drain its queue in the bottleneck. We pick up
152509 + * these min RTT measurements opportunistically with our min_rtt filter. :-)
152510 + */
152511 +static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
152513 +       struct tcp_sock *tp = tcp_sk(sk);
152514 +       struct bbr *bbr = inet_csk_ca(sk);
152515 +       bool probe_rtt_expired, min_rtt_expired;
152516 +       u32 expire;
152518 +       /* Track min RTT in probe_rtt_win_ms to time next PROBE_RTT state. */
152519 +       expire = bbr->probe_rtt_min_stamp +
152520 +                msecs_to_jiffies(bbr->params.probe_rtt_win_ms);
152521 +       probe_rtt_expired = after(tcp_jiffies32, expire);
152522 +       if (rs->rtt_us >= 0 &&
152523 +           (rs->rtt_us <= bbr->probe_rtt_min_us ||
152524 +            (probe_rtt_expired && !rs->is_ack_delayed))) {
152525 +               bbr->probe_rtt_min_us = rs->rtt_us;
152526 +               bbr->probe_rtt_min_stamp = tcp_jiffies32;
152527 +       }
152528 +       /* Track min RTT seen in the min_rtt_win_sec filter window: */
152529 +       expire = bbr->min_rtt_stamp + bbr->params.min_rtt_win_sec * HZ;
152530 +       min_rtt_expired = after(tcp_jiffies32, expire);
152531 +       if (bbr->probe_rtt_min_us <= bbr->min_rtt_us ||
152532 +           min_rtt_expired) {
152533 +               bbr->min_rtt_us = bbr->probe_rtt_min_us;
152534 +               bbr->min_rtt_stamp = bbr->probe_rtt_min_stamp;
152535 +       }
152537 +       if (bbr->params.probe_rtt_mode_ms > 0 && probe_rtt_expired &&
152538 +           !bbr->idle_restart && bbr->mode != BBR_PROBE_RTT) {
152539 +               bbr->mode = BBR_PROBE_RTT;  /* dip, drain queue */
152540 +               bbr_save_cwnd(sk);  /* note cwnd so we can restore it */
152541 +               bbr->probe_rtt_done_stamp = 0;
152542 +               bbr->ack_phase = BBR_ACKS_PROBE_STOPPING;
152543 +               bbr->next_rtt_delivered = tp->delivered;
152544 +       }
152546 +       if (bbr->mode == BBR_PROBE_RTT) {
152547 +               /* Ignore low rate samples during this mode. */
152548 +               tp->app_limited =
152549 +                       (tp->delivered + tcp_packets_in_flight(tp)) ? : 1;
152550 +               /* Maintain min packets in flight for max(200 ms, 1 round). */
152551 +               if (!bbr->probe_rtt_done_stamp &&
152552 +                   tcp_packets_in_flight(tp) <= bbr_probe_rtt_cwnd(sk)) {
152553 +                       bbr->probe_rtt_done_stamp = tcp_jiffies32 +
152554 +                               msecs_to_jiffies(bbr->params.probe_rtt_mode_ms);
152555 +                       bbr->probe_rtt_round_done = 0;
152556 +                       bbr->next_rtt_delivered = tp->delivered;
152557 +               } else if (bbr->probe_rtt_done_stamp) {
152558 +                       if (bbr->round_start)
152559 +                               bbr->probe_rtt_round_done = 1;
152560 +                       if (bbr->probe_rtt_round_done)
152561 +                               bbr_check_probe_rtt_done(sk);
152562 +               }
152563 +       }
152564 +       /* Restart after idle ends only once we process a new S/ACK for data */
152565 +       if (rs->delivered > 0)
152566 +               bbr->idle_restart = 0;
152569 +static void bbr_update_gains(struct sock *sk)
152571 +       struct bbr *bbr = inet_csk_ca(sk);
152573 +       switch (bbr->mode) {
152574 +       case BBR_STARTUP:
152575 +               bbr->pacing_gain = bbr->params.high_gain;
152576 +               bbr->cwnd_gain   = bbr->params.startup_cwnd_gain;
152577 +               break;
152578 +       case BBR_DRAIN:
152579 +               bbr->pacing_gain = bbr->params.drain_gain;  /* slow, to drain */
152580 +               bbr->cwnd_gain = bbr->params.startup_cwnd_gain;  /* keep cwnd */
152581 +               break;
152582 +       case BBR_PROBE_BW:
152583 +               bbr->pacing_gain = bbr->params.pacing_gain[bbr->cycle_idx];
152584 +               bbr->cwnd_gain = bbr->params.cwnd_gain;
152585 +               break;
152586 +       case BBR_PROBE_RTT:
152587 +               bbr->pacing_gain = BBR_UNIT;
152588 +               bbr->cwnd_gain = BBR_UNIT;
152589 +               break;
152590 +       default:
152591 +               WARN_ONCE(1, "BBR bad mode: %u\n", bbr->mode);
152592 +               break;
152593 +       }
152596 +static void bbr_init(struct sock *sk)
152598 +       struct tcp_sock *tp = tcp_sk(sk);
152599 +       struct bbr *bbr = inet_csk_ca(sk);
152600 +       int i;
152602 +       WARN_ON_ONCE(tp->snd_cwnd >= bbr_cwnd_warn_val);
152604 +       bbr->initialized = 1;
152605 +       bbr->params.high_gain = min(0x7FF, bbr_high_gain);
152606 +       bbr->params.drain_gain = min(0x3FF, bbr_drain_gain);
152607 +       bbr->params.startup_cwnd_gain = min(0x7FF, bbr_startup_cwnd_gain);
152608 +       bbr->params.cwnd_gain = min(0x7FF, bbr_cwnd_gain);
152609 +       bbr->params.cwnd_tso_budget = min(0x1U, bbr_cwnd_tso_budget);
152610 +       bbr->params.cwnd_min_target = min(0xFU, bbr_cwnd_min_target);
152611 +       bbr->params.min_rtt_win_sec = min(0x1FU, bbr_min_rtt_win_sec);
152612 +       bbr->params.probe_rtt_mode_ms = min(0x1FFU, bbr_probe_rtt_mode_ms);
152613 +       bbr->params.full_bw_cnt = min(0x7U, bbr_full_bw_cnt);
152614 +       bbr->params.full_bw_thresh = min(0x3FFU, bbr_full_bw_thresh);
152615 +       bbr->params.extra_acked_gain = min(0x7FF, bbr_extra_acked_gain);
152616 +       bbr->params.extra_acked_win_rtts = min(0x1FU, bbr_extra_acked_win_rtts);
152617 +       bbr->params.drain_to_target = bbr_drain_to_target ? 1 : 0;
152618 +       bbr->params.precise_ece_ack = bbr_precise_ece_ack ? 1 : 0;
152619 +       bbr->params.extra_acked_in_startup = bbr_extra_acked_in_startup ? 1 : 0;
152620 +       bbr->params.probe_rtt_cwnd_gain = min(0xFFU, bbr_probe_rtt_cwnd_gain);
152621 +       bbr->params.probe_rtt_win_ms =
152622 +               min(0x3FFFU,
152623 +                   min_t(u32, bbr_probe_rtt_win_ms,
152624 +                         bbr->params.min_rtt_win_sec * MSEC_PER_SEC));
152625 +       for (i = 0; i < CYCLE_LEN; i++)
152626 +               bbr->params.pacing_gain[i] = min(0x3FF, bbr_pacing_gain[i]);
152627 +       bbr->params.usage_based_cwnd = bbr_usage_based_cwnd ? 1 : 0;
152628 +       bbr->params.tso_rtt_shift =  min(0xFU, bbr_tso_rtt_shift);
152630 +       bbr->debug.snd_isn = tp->snd_una;
152631 +       bbr->debug.target_cwnd = 0;
152632 +       bbr->debug.undo = 0;
152634 +       bbr->init_cwnd = min(0x7FU, tp->snd_cwnd);
152635 +       bbr->prior_cwnd = tp->prior_cwnd;
152636 +       tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
152637 +       bbr->next_rtt_delivered = 0;
152638 +       bbr->prev_ca_state = TCP_CA_Open;
152639 +       bbr->packet_conservation = 0;
152641 +       bbr->probe_rtt_done_stamp = 0;
152642 +       bbr->probe_rtt_round_done = 0;
152643 +       bbr->probe_rtt_min_us = tcp_min_rtt(tp);
152644 +       bbr->probe_rtt_min_stamp = tcp_jiffies32;
152645 +       bbr->min_rtt_us = tcp_min_rtt(tp);
152646 +       bbr->min_rtt_stamp = tcp_jiffies32;
152648 +       bbr->has_seen_rtt = 0;
152649 +       bbr_init_pacing_rate_from_rtt(sk);
152651 +       bbr->round_start = 0;
152652 +       bbr->idle_restart = 0;
152653 +       bbr->full_bw_reached = 0;
152654 +       bbr->full_bw = 0;
152655 +       bbr->full_bw_cnt = 0;
152656 +       bbr->cycle_mstamp = 0;
152657 +       bbr->cycle_idx = 0;
152658 +       bbr->mode = BBR_STARTUP;
152659 +       bbr->debug.rs_bw = 0;
152661 +       bbr->ack_epoch_mstamp = tp->tcp_mstamp;
152662 +       bbr->ack_epoch_acked = 0;
152663 +       bbr->extra_acked_win_rtts = 0;
152664 +       bbr->extra_acked_win_idx = 0;
152665 +       bbr->extra_acked[0] = 0;
152666 +       bbr->extra_acked[1] = 0;
152668 +       bbr->ce_state = 0;
152669 +       bbr->prior_rcv_nxt = tp->rcv_nxt;
152670 +       bbr->try_fast_path = 0;
152672 +       cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED);
152675 +static u32 bbr_sndbuf_expand(struct sock *sk)
152677 +       /* Provision 3 * cwnd since BBR may slow-start even during recovery. */
152678 +       return 3;
152681 +/* __________________________________________________________________________
152683 + * Functions new to BBR v2 ("bbr") congestion control are below here.
152684 + * __________________________________________________________________________
152685 + */
152687 +/* Incorporate a new bw sample into the current window of our max filter. */
152688 +static void bbr2_take_bw_hi_sample(struct sock *sk, u32 bw)
152690 +       struct bbr *bbr = inet_csk_ca(sk);
152692 +       bbr->bw_hi[1] = max(bw, bbr->bw_hi[1]);
152695 +/* Keep max of last 1-2 cycles. Each PROBE_BW cycle, flip filter window. */
152696 +static void bbr2_advance_bw_hi_filter(struct sock *sk)
152698 +       struct bbr *bbr = inet_csk_ca(sk);
152700 +       if (!bbr->bw_hi[1])
152701 +               return;  /* no samples in this window; remember old window */
152702 +       bbr->bw_hi[0] = bbr->bw_hi[1];
152703 +       bbr->bw_hi[1] = 0;
152706 +/* How much do we want in flight? Our BDP, unless congestion cut cwnd. */
152707 +static u32 bbr2_target_inflight(struct sock *sk)
152709 +       u32 bdp = bbr_inflight(sk, bbr_bw(sk), BBR_UNIT);
152711 +       return min(bdp, tcp_sk(sk)->snd_cwnd);
152714 +static bool bbr2_is_probing_bandwidth(struct sock *sk)
152716 +       struct bbr *bbr = inet_csk_ca(sk);
152718 +       return (bbr->mode == BBR_STARTUP) ||
152719 +               (bbr->mode == BBR_PROBE_BW &&
152720 +                (bbr->cycle_idx == BBR_BW_PROBE_REFILL ||
152721 +                 bbr->cycle_idx == BBR_BW_PROBE_UP));
152724 +/* Has the given amount of time elapsed since we marked the phase start? */
152725 +static bool bbr2_has_elapsed_in_phase(const struct sock *sk, u32 interval_us)
152727 +       const struct tcp_sock *tp = tcp_sk(sk);
152728 +       const struct bbr *bbr = inet_csk_ca(sk);
152730 +       return tcp_stamp_us_delta(tp->tcp_mstamp,
152731 +                                 bbr->cycle_mstamp + interval_us) > 0;
152734 +static void bbr2_handle_queue_too_high_in_startup(struct sock *sk)
152736 +       struct bbr *bbr = inet_csk_ca(sk);
152738 +       bbr->full_bw_reached = 1;
152739 +       bbr->inflight_hi = bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT);
152742 +/* Exit STARTUP upon N consecutive rounds with ECN mark rate > ecn_thresh. */
152743 +static void bbr2_check_ecn_too_high_in_startup(struct sock *sk, u32 ce_ratio)
152745 +       struct bbr *bbr = inet_csk_ca(sk);
152747 +       if (bbr_full_bw_reached(sk) || !bbr->ecn_eligible ||
152748 +           !bbr->params.full_ecn_cnt || !bbr->params.ecn_thresh)
152749 +               return;
152751 +       if (ce_ratio >= bbr->params.ecn_thresh)
152752 +               bbr->startup_ecn_rounds++;
152753 +       else
152754 +               bbr->startup_ecn_rounds = 0;
152756 +       if (bbr->startup_ecn_rounds >= bbr->params.full_ecn_cnt) {
152757 +               bbr->debug.event = 'E';  /* ECN caused STARTUP exit */
152758 +               bbr2_handle_queue_too_high_in_startup(sk);
152759 +               return;
152760 +       }
152763 +static void bbr2_update_ecn_alpha(struct sock *sk)
152765 +       struct tcp_sock *tp = tcp_sk(sk);
152766 +       struct bbr *bbr = inet_csk_ca(sk);
152767 +       s32 delivered, delivered_ce;
152768 +       u64 alpha, ce_ratio;
152769 +       u32 gain;
152771 +       if (bbr->params.ecn_factor == 0)
152772 +               return;
152774 +       delivered = tp->delivered - bbr->alpha_last_delivered;
152775 +       delivered_ce = tp->delivered_ce - bbr->alpha_last_delivered_ce;
152777 +       if (delivered == 0 ||           /* avoid divide by zero */
152778 +           WARN_ON_ONCE(delivered < 0 || delivered_ce < 0))  /* backwards? */
152779 +               return;
152781 +       /* See if we should use ECN sender logic for this connection. */
152782 +       if (!bbr->ecn_eligible && bbr_ecn_enable &&
152783 +           (bbr->min_rtt_us <= bbr->params.ecn_max_rtt_us ||
152784 +            !bbr->params.ecn_max_rtt_us))
152785 +               bbr->ecn_eligible = 1;
152787 +       ce_ratio = (u64)delivered_ce << BBR_SCALE;
152788 +       do_div(ce_ratio, delivered);
152789 +       gain = bbr->params.ecn_alpha_gain;
152790 +       alpha = ((BBR_UNIT - gain) * bbr->ecn_alpha) >> BBR_SCALE;
152791 +       alpha += (gain * ce_ratio) >> BBR_SCALE;
152792 +       bbr->ecn_alpha = min_t(u32, alpha, BBR_UNIT);
152794 +       bbr->alpha_last_delivered = tp->delivered;
152795 +       bbr->alpha_last_delivered_ce = tp->delivered_ce;
152797 +       bbr2_check_ecn_too_high_in_startup(sk, ce_ratio);
152800 +/* Each round trip of BBR_BW_PROBE_UP, double volume of probing data. */
152801 +static void bbr2_raise_inflight_hi_slope(struct sock *sk)
152803 +       struct tcp_sock *tp = tcp_sk(sk);
152804 +       struct bbr *bbr = inet_csk_ca(sk);
152805 +       u32 growth_this_round, cnt;
152807 +       /* Calculate "slope": packets S/Acked per inflight_hi increment. */
152808 +       growth_this_round = 1 << bbr->bw_probe_up_rounds;
152809 +       bbr->bw_probe_up_rounds = min(bbr->bw_probe_up_rounds + 1, 30);
152810 +       cnt = tp->snd_cwnd / growth_this_round;
152811 +       cnt = max(cnt, 1U);
152812 +       bbr->bw_probe_up_cnt = cnt;
152813 +       bbr->debug.event = 'G';  /* Grow inflight_hi slope */
152816 +/* In BBR_BW_PROBE_UP, not seeing high loss/ECN/queue, so raise inflight_hi. */
152817 +static void bbr2_probe_inflight_hi_upward(struct sock *sk,
152818 +                                         const struct rate_sample *rs)
152820 +       struct tcp_sock *tp = tcp_sk(sk);
152821 +       struct bbr *bbr = inet_csk_ca(sk);
152822 +       u32 delta;
152824 +       if (!tp->is_cwnd_limited || tp->snd_cwnd < bbr->inflight_hi) {
152825 +               bbr->bw_probe_up_acks = 0;  /* don't accmulate unused credits */
152826 +               return;  /* not fully using inflight_hi, so don't grow it */
152827 +       }
152829 +       /* For each bw_probe_up_cnt packets ACKed, increase inflight_hi by 1. */
152830 +       bbr->bw_probe_up_acks += rs->acked_sacked;
152831 +       if (bbr->bw_probe_up_acks >=  bbr->bw_probe_up_cnt) {
152832 +               delta = bbr->bw_probe_up_acks / bbr->bw_probe_up_cnt;
152833 +               bbr->bw_probe_up_acks -= delta * bbr->bw_probe_up_cnt;
152834 +               bbr->inflight_hi += delta;
152835 +               bbr->debug.event = 'I';  /* Increment inflight_hi */
152836 +       }
152838 +       if (bbr->round_start)
152839 +               bbr2_raise_inflight_hi_slope(sk);
152842 +/* Does loss/ECN rate for this sample say inflight is "too high"?
152843 + * This is used by both the bbr_check_loss_too_high_in_startup() function,
152844 + * which can be used in either v1 or v2, and the PROBE_UP phase of v2, which
152845 + * uses it to notice when loss/ECN rates suggest inflight is too high.
152846 + */
152847 +static bool bbr2_is_inflight_too_high(const struct sock *sk,
152848 +                                    const struct rate_sample *rs)
152850 +       const struct bbr *bbr = inet_csk_ca(sk);
152851 +       u32 loss_thresh, ecn_thresh;
152853 +       if (rs->lost > 0 && rs->tx_in_flight) {
152854 +               loss_thresh = (u64)rs->tx_in_flight * bbr->params.loss_thresh >>
152855 +                               BBR_SCALE;
152856 +               if (rs->lost > loss_thresh)
152857 +                       return true;
152858 +       }
152860 +       if (rs->delivered_ce > 0 && rs->delivered > 0 &&
152861 +           bbr->ecn_eligible && bbr->params.ecn_thresh) {
152862 +               ecn_thresh = (u64)rs->delivered * bbr->params.ecn_thresh >>
152863 +                               BBR_SCALE;
152864 +               if (rs->delivered_ce >= ecn_thresh)
152865 +                       return true;
152866 +       }
152868 +       return false;
152871 +/* Calculate the tx_in_flight level that corresponded to excessive loss.
152872 + * We find "lost_prefix" segs of the skb where loss rate went too high,
152873 + * by solving for "lost_prefix" in the following equation:
152874 + *   lost                     /  inflight                     >= loss_thresh
152875 + *  (lost_prev + lost_prefix) / (inflight_prev + lost_prefix) >= loss_thresh
152876 + * Then we take that equation, convert it to fixed point, and
152877 + * round up to the nearest packet.
152878 + */
152879 +static u32 bbr2_inflight_hi_from_lost_skb(const struct sock *sk,
152880 +                                         const struct rate_sample *rs,
152881 +                                         const struct sk_buff *skb)
152883 +       const struct bbr *bbr = inet_csk_ca(sk);
152884 +       u32 loss_thresh  = bbr->params.loss_thresh;
152885 +       u32 pcount, divisor, inflight_hi;
152886 +       s32 inflight_prev, lost_prev;
152887 +       u64 loss_budget, lost_prefix;
152889 +       pcount = tcp_skb_pcount(skb);
152891 +       /* How much data was in flight before this skb? */
152892 +       inflight_prev = rs->tx_in_flight - pcount;
152893 +       if (WARN_ONCE(inflight_prev < 0,
152894 +                     "tx_in_flight: %u pcount: %u reneg: %u",
152895 +                     rs->tx_in_flight, pcount, tcp_sk(sk)->is_sack_reneg))
152896 +               return ~0U;
152898 +       /* How much inflight data was marked lost before this skb? */
152899 +       lost_prev = rs->lost - pcount;
152900 +       if (WARN_ON_ONCE(lost_prev < 0))
152901 +               return ~0U;
152903 +       /* At what prefix of this lost skb did losss rate exceed loss_thresh? */
152904 +       loss_budget = (u64)inflight_prev * loss_thresh + BBR_UNIT - 1;
152905 +       loss_budget >>= BBR_SCALE;
152906 +       if (lost_prev >= loss_budget) {
152907 +               lost_prefix = 0;   /* previous losses crossed loss_thresh */
152908 +       } else {
152909 +               lost_prefix = loss_budget - lost_prev;
152910 +               lost_prefix <<= BBR_SCALE;
152911 +               divisor = BBR_UNIT - loss_thresh;
152912 +               if (WARN_ON_ONCE(!divisor))  /* loss_thresh is 8 bits */
152913 +                       return ~0U;
152914 +               do_div(lost_prefix, divisor);
152915 +       }
152917 +       inflight_hi = inflight_prev + lost_prefix;
152918 +       return inflight_hi;
152921 +/* If loss/ECN rates during probing indicated we may have overfilled a
152922 + * buffer, return an operating point that tries to leave unutilized headroom in
152923 + * the path for other flows, for fairness convergence and lower RTTs and loss.
152924 + */
152925 +static u32 bbr2_inflight_with_headroom(const struct sock *sk)
152927 +       struct bbr *bbr = inet_csk_ca(sk);
152928 +       u32 headroom, headroom_fraction;
152930 +       if (bbr->inflight_hi == ~0U)
152931 +               return ~0U;
152933 +       headroom_fraction = bbr->params.inflight_headroom;
152934 +       headroom = ((u64)bbr->inflight_hi * headroom_fraction) >> BBR_SCALE;
152935 +       headroom = max(headroom, 1U);
152936 +       return max_t(s32, bbr->inflight_hi - headroom,
152937 +                    bbr->params.cwnd_min_target);
152940 +/* Bound cwnd to a sensible level, based on our current probing state
152941 + * machine phase and model of a good inflight level (inflight_lo, inflight_hi).
152942 + */
152943 +static void bbr2_bound_cwnd_for_inflight_model(struct sock *sk)
152945 +       struct tcp_sock *tp = tcp_sk(sk);
152946 +       struct bbr *bbr = inet_csk_ca(sk);
152947 +       u32 cap;
152949 +       /* tcp_rcv_synsent_state_process() currently calls tcp_ack()
152950 +        * and thus cong_control() without first initializing us(!).
152951 +        */
152952 +       if (!bbr->initialized)
152953 +               return;
152955 +       cap = ~0U;
152956 +       if (bbr->mode == BBR_PROBE_BW &&
152957 +           bbr->cycle_idx != BBR_BW_PROBE_CRUISE) {
152958 +               /* Probe to see if more packets fit in the path. */
152959 +               cap = bbr->inflight_hi;
152960 +       } else {
152961 +               if (bbr->mode == BBR_PROBE_RTT ||
152962 +                   (bbr->mode == BBR_PROBE_BW &&
152963 +                    bbr->cycle_idx == BBR_BW_PROBE_CRUISE))
152964 +                       cap = bbr2_inflight_with_headroom(sk);
152965 +       }
152966 +       /* Adapt to any loss/ECN since our last bw probe. */
152967 +       cap = min(cap, bbr->inflight_lo);
152969 +       cap = max_t(u32, cap, bbr->params.cwnd_min_target);
152970 +       tp->snd_cwnd = min(cap, tp->snd_cwnd);
152973 +/* Estimate a short-term lower bound on the capacity available now, based
152974 + * on measurements of the current delivery process and recent history. When we
152975 + * are seeing loss/ECN at times when we are not probing bw, then conservatively
152976 + * move toward flow balance by multiplicatively cutting our short-term
152977 + * estimated safe rate and volume of data (bw_lo and inflight_lo). We use a
152978 + * multiplicative decrease in order to converge to a lower capacity in time
152979 + * logarithmic in the magnitude of the decrease.
152981 + * However, we do not cut our short-term estimates lower than the current rate
152982 + * and volume of delivered data from this round trip, since from the current
152983 + * delivery process we can estimate the measured capacity available now.
152985 + * Anything faster than that approach would knowingly risk high loss, which can
152986 + * cause low bw for Reno/CUBIC and high loss recovery latency for
152987 + * request/response flows using any congestion control.
152988 + */
152989 +static void bbr2_adapt_lower_bounds(struct sock *sk)
152991 +       struct tcp_sock *tp = tcp_sk(sk);
152992 +       struct bbr *bbr = inet_csk_ca(sk);
152993 +       u32 ecn_cut, ecn_inflight_lo, beta;
152995 +       /* We only use lower-bound estimates when not probing bw.
152996 +        * When probing we need to push inflight higher to probe bw.
152997 +        */
152998 +       if (bbr2_is_probing_bandwidth(sk))
152999 +               return;
153001 +       /* ECN response. */
153002 +       if (bbr->ecn_in_round && bbr->ecn_eligible && bbr->params.ecn_factor) {
153003 +               /* Reduce inflight to (1 - alpha*ecn_factor). */
153004 +               ecn_cut = (BBR_UNIT -
153005 +                          ((bbr->ecn_alpha * bbr->params.ecn_factor) >>
153006 +                           BBR_SCALE));
153007 +               if (bbr->inflight_lo == ~0U)
153008 +                       bbr->inflight_lo = tp->snd_cwnd;
153009 +               ecn_inflight_lo = (u64)bbr->inflight_lo * ecn_cut >> BBR_SCALE;
153010 +       } else {
153011 +               ecn_inflight_lo = ~0U;
153012 +       }
153014 +       /* Loss response. */
153015 +       if (bbr->loss_in_round) {
153016 +               /* Reduce bw and inflight to (1 - beta). */
153017 +               if (bbr->bw_lo == ~0U)
153018 +                       bbr->bw_lo = bbr_max_bw(sk);
153019 +               if (bbr->inflight_lo == ~0U)
153020 +                       bbr->inflight_lo = tp->snd_cwnd;
153021 +               beta = bbr->params.beta;
153022 +               bbr->bw_lo =
153023 +                       max_t(u32, bbr->bw_latest,
153024 +                             (u64)bbr->bw_lo *
153025 +                             (BBR_UNIT - beta) >> BBR_SCALE);
153026 +               bbr->inflight_lo =
153027 +                       max_t(u32, bbr->inflight_latest,
153028 +                             (u64)bbr->inflight_lo *
153029 +                             (BBR_UNIT - beta) >> BBR_SCALE);
153030 +       }
153032 +       /* Adjust to the lower of the levels implied by loss or ECN. */
153033 +       bbr->inflight_lo = min(bbr->inflight_lo, ecn_inflight_lo);
153036 +/* Reset any short-term lower-bound adaptation to congestion, so that we can
153037 + * push our inflight up.
153038 + */
153039 +static void bbr2_reset_lower_bounds(struct sock *sk)
153041 +       struct bbr *bbr = inet_csk_ca(sk);
153043 +       bbr->bw_lo = ~0U;
153044 +       bbr->inflight_lo = ~0U;
153047 +/* After bw probing (STARTUP/PROBE_UP), reset signals before entering a state
153048 + * machine phase where we adapt our lower bound based on congestion signals.
153049 + */
153050 +static void bbr2_reset_congestion_signals(struct sock *sk)
153052 +       struct bbr *bbr = inet_csk_ca(sk);
153054 +       bbr->loss_in_round = 0;
153055 +       bbr->ecn_in_round = 0;
153056 +       bbr->loss_in_cycle = 0;
153057 +       bbr->ecn_in_cycle = 0;
153058 +       bbr->bw_latest = 0;
153059 +       bbr->inflight_latest = 0;
153062 +/* Update (most of) our congestion signals: track the recent rate and volume of
153063 + * delivered data, presence of loss, and EWMA degree of ECN marking.
153064 + */
153065 +static void bbr2_update_congestion_signals(
153066 +       struct sock *sk, const struct rate_sample *rs, struct bbr_context *ctx)
153068 +       struct tcp_sock *tp = tcp_sk(sk);
153069 +       struct bbr *bbr = inet_csk_ca(sk);
153070 +       u64 bw;
153072 +       bbr->loss_round_start = 0;
153073 +       if (rs->interval_us <= 0 || !rs->acked_sacked)
153074 +               return; /* Not a valid observation */
153075 +       bw = ctx->sample_bw;
153077 +       if (!rs->is_app_limited || bw >= bbr_max_bw(sk))
153078 +               bbr2_take_bw_hi_sample(sk, bw);
153080 +       bbr->loss_in_round |= (rs->losses > 0);
153082 +       /* Update rate and volume of delivered data from latest round trip: */
153083 +       bbr->bw_latest       = max_t(u32, bbr->bw_latest,       ctx->sample_bw);
153084 +       bbr->inflight_latest = max_t(u32, bbr->inflight_latest, rs->delivered);
153086 +       if (before(rs->prior_delivered, bbr->loss_round_delivered))
153087 +               return;         /* skip the per-round-trip updates */
153088 +       /* Now do per-round-trip updates. */
153089 +       bbr->loss_round_delivered = tp->delivered;  /* mark round trip */
153090 +       bbr->loss_round_start = 1;
153091 +       bbr2_adapt_lower_bounds(sk);
153093 +       /* Update windowed "latest" (single-round-trip) filters. */
153094 +       bbr->loss_in_round = 0;
153095 +       bbr->ecn_in_round  = 0;
153096 +       bbr->bw_latest = ctx->sample_bw;
153097 +       bbr->inflight_latest = rs->delivered;
153100 +/* Bandwidth probing can cause loss. To help coexistence with loss-based
153101 + * congestion control we spread out our probing in a Reno-conscious way. Due to
153102 + * the shape of the Reno sawtooth, the time required between loss epochs for an
153103 + * idealized Reno flow is a number of round trips that is the BDP of that
153104 + * flow. We count packet-timed round trips directly, since measured RTT can
153105 + * vary widely, and Reno is driven by packet-timed round trips.
153106 + */
153107 +static bool bbr2_is_reno_coexistence_probe_time(struct sock *sk)
153109 +       struct bbr *bbr = inet_csk_ca(sk);
153110 +       u32 inflight, rounds, reno_gain, reno_rounds;
153112 +       /* Random loss can shave some small percentage off of our inflight
153113 +        * in each round. To survive this, flows need robust periodic probes.
153114 +        */
153115 +       rounds = bbr->params.bw_probe_max_rounds;
153117 +       reno_gain = bbr->params.bw_probe_reno_gain;
153118 +       if (reno_gain) {
153119 +               inflight = bbr2_target_inflight(sk);
153120 +               reno_rounds = ((u64)inflight * reno_gain) >> BBR_SCALE;
153121 +               rounds = min(rounds, reno_rounds);
153122 +       }
153123 +       return bbr->rounds_since_probe >= rounds;
153126 +/* How long do we want to wait before probing for bandwidth (and risking
153127 + * loss)? We randomize the wait, for better mixing and fairness convergence.
153129 + * We bound the Reno-coexistence inter-bw-probe time to be 62-63 round trips.
153130 + * This is calculated to allow fairness with a 25Mbps, 30ms Reno flow,
153131 + * (eg 4K video to a broadband user):
153132 + *   BDP = 25Mbps * .030sec /(1514bytes) = 61.9 packets
153134 + * We bound the BBR-native inter-bw-probe wall clock time to be:
153135 + *  (a) higher than 2 sec: to try to avoid causing loss for a long enough time
153136 + *      to allow Reno at 30ms to get 4K video bw, the inter-bw-probe time must
153137 + *      be at least: 25Mbps * .030sec / (1514bytes) * 0.030sec = 1.9secs
153138 + *  (b) lower than 3 sec: to ensure flows can start probing in a reasonable
153139 + *      amount of time to discover unutilized bw on human-scale interactive
153140 + *      time-scales (e.g. perhaps traffic from a web page download that we
153141 + *      were competing with is now complete).
153142 + */
153143 +static void bbr2_pick_probe_wait(struct sock *sk)
153145 +       struct bbr *bbr = inet_csk_ca(sk);
153147 +       /* Decide the random round-trip bound for wait until probe: */
153148 +       bbr->rounds_since_probe =
153149 +               prandom_u32_max(bbr->params.bw_probe_rand_rounds);
153150 +       /* Decide the random wall clock bound for wait until probe: */
153151 +       bbr->probe_wait_us = bbr->params.bw_probe_base_us +
153152 +                            prandom_u32_max(bbr->params.bw_probe_rand_us);
153155 +static void bbr2_set_cycle_idx(struct sock *sk, int cycle_idx)
153157 +       struct bbr *bbr = inet_csk_ca(sk);
153159 +       bbr->cycle_idx = cycle_idx;
153160 +       /* New phase, so need to update cwnd and pacing rate. */
153161 +       bbr->try_fast_path = 0;
153164 +/* Send at estimated bw to fill the pipe, but not queue. We need this phase
153165 + * before PROBE_UP, because as soon as we send faster than the available bw
153166 + * we will start building a queue, and if the buffer is shallow we can cause
153167 + * loss. If we do not fill the pipe before we cause this loss, our bw_hi and
153168 + * inflight_hi estimates will underestimate.
153169 + */
153170 +static void bbr2_start_bw_probe_refill(struct sock *sk, u32 bw_probe_up_rounds)
153172 +       struct tcp_sock *tp = tcp_sk(sk);
153173 +       struct bbr *bbr = inet_csk_ca(sk);
153175 +       bbr2_reset_lower_bounds(sk);
153176 +       if (bbr->inflight_hi != ~0U)
153177 +               bbr->inflight_hi += bbr->params.refill_add_inc;
153178 +       bbr->bw_probe_up_rounds = bw_probe_up_rounds;
153179 +       bbr->bw_probe_up_acks = 0;
153180 +       bbr->stopped_risky_probe = 0;
153181 +       bbr->ack_phase = BBR_ACKS_REFILLING;
153182 +       bbr->next_rtt_delivered = tp->delivered;
153183 +       bbr2_set_cycle_idx(sk, BBR_BW_PROBE_REFILL);
153186 +/* Now probe max deliverable data rate and volume. */
153187 +static void bbr2_start_bw_probe_up(struct sock *sk)
153189 +       struct tcp_sock *tp = tcp_sk(sk);
153190 +       struct bbr *bbr = inet_csk_ca(sk);
153192 +       bbr->ack_phase = BBR_ACKS_PROBE_STARTING;
153193 +       bbr->next_rtt_delivered = tp->delivered;
153194 +       bbr->cycle_mstamp = tp->tcp_mstamp;
153195 +       bbr2_set_cycle_idx(sk, BBR_BW_PROBE_UP);
153196 +       bbr2_raise_inflight_hi_slope(sk);
153199 +/* Start a new PROBE_BW probing cycle of some wall clock length. Pick a wall
153200 + * clock time at which to probe beyond an inflight that we think to be
153201 + * safe. This will knowingly risk packet loss, so we want to do this rarely, to
153202 + * keep packet loss rates low. Also start a round-trip counter, to probe faster
153203 + * if we estimate a Reno flow at our BDP would probe faster.
153204 + */
153205 +static void bbr2_start_bw_probe_down(struct sock *sk)
153207 +       struct tcp_sock *tp = tcp_sk(sk);
153208 +       struct bbr *bbr = inet_csk_ca(sk);
153210 +       bbr2_reset_congestion_signals(sk);
153211 +       bbr->bw_probe_up_cnt = ~0U;     /* not growing inflight_hi any more */
153212 +       bbr2_pick_probe_wait(sk);
153213 +       bbr->cycle_mstamp = tp->tcp_mstamp;             /* start wall clock */
153214 +       bbr->ack_phase = BBR_ACKS_PROBE_STOPPING;
153215 +       bbr->next_rtt_delivered = tp->delivered;
153216 +       bbr2_set_cycle_idx(sk, BBR_BW_PROBE_DOWN);
153219 +/* Cruise: maintain what we estimate to be a neutral, conservative
153220 + * operating point, without attempting to probe up for bandwidth or down for
153221 + * RTT, and only reducing inflight in response to loss/ECN signals.
153222 + */
153223 +static void bbr2_start_bw_probe_cruise(struct sock *sk)
153225 +       struct bbr *bbr = inet_csk_ca(sk);
153227 +       if (bbr->inflight_lo != ~0U)
153228 +               bbr->inflight_lo = min(bbr->inflight_lo, bbr->inflight_hi);
153230 +       bbr2_set_cycle_idx(sk, BBR_BW_PROBE_CRUISE);
153233 +/* Loss and/or ECN rate is too high while probing.
153234 + * Adapt (once per bw probe) by cutting inflight_hi and then restarting cycle.
153235 + */
153236 +static void bbr2_handle_inflight_too_high(struct sock *sk,
153237 +                                         const struct rate_sample *rs)
153239 +       struct bbr *bbr = inet_csk_ca(sk);
153240 +       const u32 beta = bbr->params.beta;
153242 +       bbr->prev_probe_too_high = 1;
153243 +       bbr->bw_probe_samples = 0;  /* only react once per probe */
153244 +       bbr->debug.event = 'L';     /* Loss/ECN too high */
153245 +       /* If we are app-limited then we are not robustly
153246 +        * probing the max volume of inflight data we think
153247 +        * might be safe (analogous to how app-limited bw
153248 +        * samples are not known to be robustly probing bw).
153249 +        */
153250 +       if (!rs->is_app_limited)
153251 +               bbr->inflight_hi = max_t(u32, rs->tx_in_flight,
153252 +                                        (u64)bbr2_target_inflight(sk) *
153253 +                                        (BBR_UNIT - beta) >> BBR_SCALE);
153254 +       if (bbr->mode == BBR_PROBE_BW && bbr->cycle_idx == BBR_BW_PROBE_UP)
153255 +               bbr2_start_bw_probe_down(sk);
153258 +/* If we're seeing bw and loss samples reflecting our bw probing, adapt
153259 + * using the signals we see. If loss or ECN mark rate gets too high, then adapt
153260 + * inflight_hi downward. If we're able to push inflight higher without such
153261 + * signals, push higher: adapt inflight_hi upward.
153262 + */
153263 +static bool bbr2_adapt_upper_bounds(struct sock *sk,
153264 +                                  const struct rate_sample *rs)
153266 +       struct bbr *bbr = inet_csk_ca(sk);
153268 +       /* Track when we'll see bw/loss samples resulting from our bw probes. */
153269 +       if (bbr->ack_phase == BBR_ACKS_PROBE_STARTING && bbr->round_start)
153270 +               bbr->ack_phase = BBR_ACKS_PROBE_FEEDBACK;
153271 +       if (bbr->ack_phase == BBR_ACKS_PROBE_STOPPING && bbr->round_start) {
153272 +               /* End of samples from bw probing phase. */
153273 +               bbr->bw_probe_samples = 0;
153274 +               bbr->ack_phase = BBR_ACKS_INIT;
153275 +               /* At this point in the cycle, our current bw sample is also
153276 +                * our best recent chance at finding the highest available bw
153277 +                * for this flow. So now is the best time to forget the bw
153278 +                * samples from the previous cycle, by advancing the window.
153279 +                */
153280 +               if (bbr->mode == BBR_PROBE_BW && !rs->is_app_limited)
153281 +                       bbr2_advance_bw_hi_filter(sk);
153282 +               /* If we had an inflight_hi, then probed and pushed inflight all
153283 +                * the way up to hit that inflight_hi without seeing any
153284 +                * high loss/ECN in all the resulting ACKs from that probing,
153285 +                * then probe up again, this time letting inflight persist at
153286 +                * inflight_hi for a round trip, then accelerating beyond.
153287 +                */
153288 +               if (bbr->mode == BBR_PROBE_BW &&
153289 +                   bbr->stopped_risky_probe && !bbr->prev_probe_too_high) {
153290 +                       bbr->debug.event = 'R';  /* reprobe */
153291 +                       bbr2_start_bw_probe_refill(sk, 0);
153292 +                       return true;  /* yes, decided state transition */
153293 +               }
153294 +       }
153296 +       if (bbr2_is_inflight_too_high(sk, rs)) {
153297 +               if (bbr->bw_probe_samples)  /*  sample is from bw probing? */
153298 +                       bbr2_handle_inflight_too_high(sk, rs);
153299 +       } else {
153300 +               /* Loss/ECN rate is declared safe. Adjust upper bound upward. */
153301 +               if (bbr->inflight_hi == ~0U)  /* no excess queue signals yet? */
153302 +                       return false;
153304 +               /* To be resilient to random loss, we must raise inflight_hi
153305 +                * if we observe in any phase that a higher level is safe.
153306 +                */
153307 +               if (rs->tx_in_flight > bbr->inflight_hi) {
153308 +                       bbr->inflight_hi = rs->tx_in_flight;
153309 +                       bbr->debug.event = 'U';  /* raise up inflight_hi */
153310 +               }
153312 +               if (bbr->mode == BBR_PROBE_BW &&
153313 +                   bbr->cycle_idx == BBR_BW_PROBE_UP)
153314 +                       bbr2_probe_inflight_hi_upward(sk, rs);
153315 +       }
153317 +       return false;
153320 +/* Check if it's time to probe for bandwidth now, and if so, kick it off. */
153321 +static bool bbr2_check_time_to_probe_bw(struct sock *sk)
153323 +       struct bbr *bbr = inet_csk_ca(sk);
153324 +       u32 n;
153326 +       /* If we seem to be at an operating point where we are not seeing loss
153327 +        * but we are seeing ECN marks, then when the ECN marks cease we reprobe
153328 +        * quickly (in case a burst of cross-traffic has ceased and freed up bw,
153329 +        * or in case we are sharing with multiplicatively probing traffic).
153330 +        */
153331 +       if (bbr->params.ecn_reprobe_gain && bbr->ecn_eligible &&
153332 +           bbr->ecn_in_cycle && !bbr->loss_in_cycle &&
153333 +           inet_csk(sk)->icsk_ca_state == TCP_CA_Open) {
153334 +               bbr->debug.event = 'A';  /* *A*ll clear to probe *A*gain */
153335 +               /* Calculate n so that when bbr2_raise_inflight_hi_slope()
153336 +                * computes growth_this_round as 2^n it will be roughly the
153337 +                * desired volume of data (inflight_hi*ecn_reprobe_gain).
153338 +                */
153339 +               n = ilog2((((u64)bbr->inflight_hi *
153340 +                           bbr->params.ecn_reprobe_gain) >> BBR_SCALE));
153341 +               bbr2_start_bw_probe_refill(sk, n);
153342 +               return true;
153343 +       }
153345 +       if (bbr2_has_elapsed_in_phase(sk, bbr->probe_wait_us) ||
153346 +           bbr2_is_reno_coexistence_probe_time(sk)) {
153347 +               bbr2_start_bw_probe_refill(sk, 0);
153348 +               return true;
153349 +       }
153350 +       return false;
153353 +/* Is it time to transition from PROBE_DOWN to PROBE_CRUISE? */
153354 +static bool bbr2_check_time_to_cruise(struct sock *sk, u32 inflight, u32 bw)
153356 +       struct bbr *bbr = inet_csk_ca(sk);
153357 +       bool is_under_bdp, is_long_enough;
153359 +       /* Always need to pull inflight down to leave headroom in queue. */
153360 +       if (inflight > bbr2_inflight_with_headroom(sk))
153361 +               return false;
153363 +       is_under_bdp = inflight <= bbr_inflight(sk, bw, BBR_UNIT);
153364 +       if (bbr->params.drain_to_target)
153365 +               return is_under_bdp;
153367 +       is_long_enough = bbr2_has_elapsed_in_phase(sk, bbr->min_rtt_us);
153368 +       return is_under_bdp || is_long_enough;
153371 +/* PROBE_BW state machine: cruise, refill, probe for bw, or drain? */
153372 +static void bbr2_update_cycle_phase(struct sock *sk,
153373 +                                   const struct rate_sample *rs)
153375 +       struct bbr *bbr = inet_csk_ca(sk);
153376 +       bool is_risky = false, is_queuing = false;
153377 +       u32 inflight, bw;
153379 +       if (!bbr_full_bw_reached(sk))
153380 +               return;
153382 +       /* In DRAIN, PROBE_BW, or PROBE_RTT, adjust upper bounds. */
153383 +       if (bbr2_adapt_upper_bounds(sk, rs))
153384 +               return;         /* already decided state transition */
153386 +       if (bbr->mode != BBR_PROBE_BW)
153387 +               return;
153389 +       inflight = bbr_packets_in_net_at_edt(sk, rs->prior_in_flight);
153390 +       bw = bbr_max_bw(sk);
153392 +       switch (bbr->cycle_idx) {
153393 +       /* First we spend most of our time cruising with a pacing_gain of 1.0,
153394 +        * which paces at the estimated bw, to try to fully use the pipe
153395 +        * without building queue. If we encounter loss/ECN marks, we adapt
153396 +        * by slowing down.
153397 +        */
153398 +       case BBR_BW_PROBE_CRUISE:
153399 +               if (bbr2_check_time_to_probe_bw(sk))
153400 +                       return;         /* already decided state transition */
153401 +               break;
153403 +       /* After cruising, when it's time to probe, we first "refill": we send
153404 +        * at the estimated bw to fill the pipe, before probing higher and
153405 +        * knowingly risking overflowing the bottleneck buffer (causing loss).
153406 +        */
153407 +       case BBR_BW_PROBE_REFILL:
153408 +               if (bbr->round_start) {
153409 +                       /* After one full round trip of sending in REFILL, we
153410 +                        * start to see bw samples reflecting our REFILL, which
153411 +                        * may be putting too much data in flight.
153412 +                        */
153413 +                       bbr->bw_probe_samples = 1;
153414 +                       bbr2_start_bw_probe_up(sk);
153415 +               }
153416 +               break;
153418 +       /* After we refill the pipe, we probe by using a pacing_gain > 1.0, to
153419 +        * probe for bw. If we have not seen loss/ECN, we try to raise inflight
153420 +        * to at least pacing_gain*BDP; note that this may take more than
153421 +        * min_rtt if min_rtt is small (e.g. on a LAN).
153422 +        *
153423 +        * We terminate PROBE_UP bandwidth probing upon any of the following:
153424 +        *
153425 +        * (1) We've pushed inflight up to hit the inflight_hi target set in the
153426 +        *     most recent previous bw probe phase. Thus we want to start
153427 +        *     draining the queue immediately because it's very likely the most
153428 +        *     recently sent packets will fill the queue and cause drops.
153429 +        *     (checked here)
153430 +        * (2) We have probed for at least 1*min_rtt_us, and the
153431 +        *     estimated queue is high enough (inflight > 1.25 * estimated_bdp).
153432 +        *     (checked here)
153433 +        * (3) Loss filter says loss rate is "too high".
153434 +        *     (checked in bbr_is_inflight_too_high())
153435 +        * (4) ECN filter says ECN mark rate is "too high".
153436 +        *     (checked in bbr_is_inflight_too_high())
153437 +        */
153438 +       case BBR_BW_PROBE_UP:
153439 +               if (bbr->prev_probe_too_high &&
153440 +                   inflight >= bbr->inflight_hi) {
153441 +                       bbr->stopped_risky_probe = 1;
153442 +                       is_risky = true;
153443 +                       bbr->debug.event = 'D';   /* D for danger */
153444 +               } else if (bbr2_has_elapsed_in_phase(sk, bbr->min_rtt_us) &&
153445 +                          inflight >=
153446 +                          bbr_inflight(sk, bw,
153447 +                                       bbr->params.bw_probe_pif_gain)) {
153448 +                       is_queuing = true;
153449 +                       bbr->debug.event = 'Q'; /* building Queue */
153450 +               }
153451 +               if (is_risky || is_queuing) {
153452 +                       bbr->prev_probe_too_high = 0;  /* no loss/ECN (yet) */
153453 +                       bbr2_start_bw_probe_down(sk);  /* restart w/ down */
153454 +               }
153455 +               break;
153457 +       /* After probing in PROBE_UP, we have usually accumulated some data in
153458 +        * the bottleneck buffer (if bw probing didn't find more bw). We next
153459 +        * enter PROBE_DOWN to try to drain any excess data from the queue. To
153460 +        * do this, we use a pacing_gain < 1.0. We hold this pacing gain until
153461 +        * our inflight is less then that target cruising point, which is the
153462 +        * minimum of (a) the amount needed to leave headroom, and (b) the
153463 +        * estimated BDP. Once inflight falls to match the target, we estimate
153464 +        * the queue is drained; persisting would underutilize the pipe.
153465 +        */
153466 +       case BBR_BW_PROBE_DOWN:
153467 +               if (bbr2_check_time_to_probe_bw(sk))
153468 +                       return;         /* already decided state transition */
153469 +               if (bbr2_check_time_to_cruise(sk, inflight, bw))
153470 +                       bbr2_start_bw_probe_cruise(sk);
153471 +               break;
153473 +       default:
153474 +               WARN_ONCE(1, "BBR invalid cycle index %u\n", bbr->cycle_idx);
153475 +       }
153478 +/* Exiting PROBE_RTT, so return to bandwidth probing in STARTUP or PROBE_BW. */
153479 +static void bbr2_exit_probe_rtt(struct sock *sk)
153481 +       struct bbr *bbr = inet_csk_ca(sk);
153483 +       bbr2_reset_lower_bounds(sk);
153484 +       if (bbr_full_bw_reached(sk)) {
153485 +               bbr->mode = BBR_PROBE_BW;
153486 +               /* Raising inflight after PROBE_RTT may cause loss, so reset
153487 +                * the PROBE_BW clock and schedule the next bandwidth probe for
153488 +                * a friendly and randomized future point in time.
153489 +                */
153490 +               bbr2_start_bw_probe_down(sk);
153491 +               /* Since we are exiting PROBE_RTT, we know inflight is
153492 +                * below our estimated BDP, so it is reasonable to cruise.
153493 +                */
153494 +               bbr2_start_bw_probe_cruise(sk);
153495 +       } else {
153496 +               bbr->mode = BBR_STARTUP;
153497 +       }
153500 +/* Exit STARTUP based on loss rate > 1% and loss gaps in round >= N. Wait until
153501 + * the end of the round in recovery to get a good estimate of how many packets
153502 + * have been lost, and how many we need to drain with a low pacing rate.
153503 + */
153504 +static void bbr2_check_loss_too_high_in_startup(struct sock *sk,
153505 +                                              const struct rate_sample *rs)
153507 +       struct bbr *bbr = inet_csk_ca(sk);
153509 +       if (bbr_full_bw_reached(sk))
153510 +               return;
153512 +       /* For STARTUP exit, check the loss rate at the end of each round trip
153513 +        * of Recovery episodes in STARTUP. We check the loss rate at the end
153514 +        * of the round trip to filter out noisy/low loss and have a better
153515 +        * sense of inflight (extent of loss), so we can drain more accurately.
153516 +        */
153517 +       if (rs->losses && bbr->loss_events_in_round < 0xf)
153518 +               bbr->loss_events_in_round++;  /* update saturating counter */
153519 +       if (bbr->params.full_loss_cnt && bbr->loss_round_start &&
153520 +           inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery &&
153521 +           bbr->loss_events_in_round >= bbr->params.full_loss_cnt &&
153522 +           bbr2_is_inflight_too_high(sk, rs)) {
153523 +               bbr->debug.event = 'P';  /* Packet loss caused STARTUP exit */
153524 +               bbr2_handle_queue_too_high_in_startup(sk);
153525 +               return;
153526 +       }
153527 +       if (bbr->loss_round_start)
153528 +               bbr->loss_events_in_round = 0;
153531 +/* If we are done draining, advance into steady state operation in PROBE_BW. */
153532 +static void bbr2_check_drain(struct sock *sk, const struct rate_sample *rs,
153533 +                            struct bbr_context *ctx)
153535 +       struct bbr *bbr = inet_csk_ca(sk);
153537 +       if (bbr_check_drain(sk, rs, ctx)) {
153538 +               bbr->mode = BBR_PROBE_BW;
153539 +               bbr2_start_bw_probe_down(sk);
153540 +       }
153543 +static void bbr2_update_model(struct sock *sk, const struct rate_sample *rs,
153544 +                             struct bbr_context *ctx)
153546 +       bbr2_update_congestion_signals(sk, rs, ctx);
153547 +       bbr_update_ack_aggregation(sk, rs);
153548 +       bbr2_check_loss_too_high_in_startup(sk, rs);
153549 +       bbr_check_full_bw_reached(sk, rs);
153550 +       bbr2_check_drain(sk, rs, ctx);
153551 +       bbr2_update_cycle_phase(sk, rs);
153552 +       bbr_update_min_rtt(sk, rs);
153555 +/* Fast path for app-limited case.
153557 + * On each ack, we execute bbr state machine, which primarily consists of:
153558 + * 1) update model based on new rate sample, and
153559 + * 2) update control based on updated model or state change.
153561 + * There are certain workload/scenarios, e.g. app-limited case, where
153562 + * either we can skip updating model or we can skip update of both model
153563 + * as well as control. This provides signifcant softirq cpu savings for
153564 + * processing incoming acks.
153566 + * In case of app-limited, if there is no congestion (loss/ecn) and
153567 + * if observed bw sample is less than current estimated bw, then we can
153568 + * skip some of the computation in bbr state processing:
153570 + * - if there is no rtt/mode/phase change: In this case, since all the
153571 + *   parameters of the network model are constant, we can skip model
153572 + *   as well control update.
153574 + * - else we can skip rest of the model update. But we still need to
153575 + *   update the control to account for the new rtt/mode/phase.
153577 + * Returns whether we can take fast path or not.
153578 + */
153579 +static bool bbr2_fast_path(struct sock *sk, bool *update_model,
153580 +               const struct rate_sample *rs, struct bbr_context *ctx)
153582 +       struct bbr *bbr = inet_csk_ca(sk);
153583 +       u32 prev_min_rtt_us, prev_mode;
153585 +       if (bbr->params.fast_path && bbr->try_fast_path &&
153586 +           rs->is_app_limited && ctx->sample_bw < bbr_max_bw(sk) &&
153587 +           !bbr->loss_in_round && !bbr->ecn_in_round) {
153588 +               prev_mode = bbr->mode;
153589 +               prev_min_rtt_us = bbr->min_rtt_us;
153590 +               bbr2_check_drain(sk, rs, ctx);
153591 +               bbr2_update_cycle_phase(sk, rs);
153592 +               bbr_update_min_rtt(sk, rs);
153594 +               if (bbr->mode == prev_mode &&
153595 +                   bbr->min_rtt_us == prev_min_rtt_us &&
153596 +                   bbr->try_fast_path)
153597 +                       return true;
153599 +               /* Skip model update, but control still needs to be updated */
153600 +               *update_model = false;
153601 +       }
153602 +       return false;
153605 +static void bbr2_main(struct sock *sk, const struct rate_sample *rs)
153607 +       struct tcp_sock *tp = tcp_sk(sk);
153608 +       struct bbr *bbr = inet_csk_ca(sk);
153609 +       struct bbr_context ctx = { 0 };
153610 +       bool update_model = true;
153611 +       u32 bw;
153613 +       bbr->debug.event = '.';  /* init to default NOP (no event yet) */
153615 +       bbr_update_round_start(sk, rs, &ctx);
153616 +       if (bbr->round_start) {
153617 +               bbr->rounds_since_probe =
153618 +                       min_t(s32, bbr->rounds_since_probe + 1, 0xFF);
153619 +               bbr2_update_ecn_alpha(sk);
153620 +       }
153622 +       bbr->ecn_in_round  |= rs->is_ece;
153623 +       bbr_calculate_bw_sample(sk, rs, &ctx);
153625 +       if (bbr2_fast_path(sk, &update_model, rs, &ctx))
153626 +               goto out;
153628 +       if (update_model)
153629 +               bbr2_update_model(sk, rs, &ctx);
153631 +       bbr_update_gains(sk);
153632 +       bw = bbr_bw(sk);
153633 +       bbr_set_pacing_rate(sk, bw, bbr->pacing_gain);
153634 +       bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain,
153635 +                    tp->snd_cwnd, &ctx);
153636 +       bbr2_bound_cwnd_for_inflight_model(sk);
153638 +out:
153639 +       bbr->prev_ca_state = inet_csk(sk)->icsk_ca_state;
153640 +       bbr->loss_in_cycle |= rs->lost > 0;
153641 +       bbr->ecn_in_cycle  |= rs->delivered_ce > 0;
153643 +       bbr_debug(sk, rs->acked_sacked, rs, &ctx);
153646 +/* Module parameters that are settable by TCP_CONGESTION_PARAMS are declared
153647 + * down here, so that the algorithm functions that use the parameters must use
153648 + * the per-socket parameters; if they accidentally use the global version
153649 + * then there will be a compile error.
153650 + * TODO(ncardwell): move all per-socket parameters down to this section.
153651 + */
153653 +/* On losses, scale down inflight and pacing rate by beta scaled by BBR_SCALE.
153654 + * No loss response when 0. Max allwed value is 255.
153655 + */
153656 +static u32 bbr_beta = BBR_UNIT * 30 / 100;
153658 +/* Gain factor for ECN mark ratio samples, scaled by BBR_SCALE.
153659 + * Max allowed value is 255.
153660 + */
153661 +static u32 bbr_ecn_alpha_gain = BBR_UNIT * 1 / 16;  /* 1/16 = 6.25% */
153663 +/* The initial value for the ecn_alpha state variable. Default and max
153664 + * BBR_UNIT (256), representing 1.0. This allows a flow to respond quickly
153665 + * to congestion if the bottleneck is congested when the flow starts up.
153666 + */
153667 +static u32 bbr_ecn_alpha_init = BBR_UNIT;      /* 1.0, to respond quickly */
153669 +/* On ECN, cut inflight_lo to (1 - ecn_factor * ecn_alpha) scaled by BBR_SCALE.
153670 + * No ECN based bounding when 0. Max allwed value is 255.
153671 + */
153672 +static u32 bbr_ecn_factor = BBR_UNIT * 1 / 3;      /* 1/3 = 33% */
153674 +/* Estimate bw probing has gone too far if CE ratio exceeds this threshold.
153675 + * Scaled by BBR_SCALE. Disabled when 0. Max allowed is 255.
153676 + */
153677 +static u32 bbr_ecn_thresh = BBR_UNIT * 1 / 2;  /* 1/2 = 50% */
153679 +/* Max RTT (in usec) at which to use sender-side ECN logic.
153680 + * Disabled when 0 (ECN allowed at any RTT).
153681 + * Max allowed for the parameter is 524287 (0x7ffff) us, ~524 ms.
153682 + */
153683 +static u32 bbr_ecn_max_rtt_us = 5000;
153685 +/* If non-zero, if in a cycle with no losses but some ECN marks, after ECN
153686 + * clears then use a multiplicative increase to quickly reprobe bw by
153687 + * starting inflight probing at the given multiple of inflight_hi.
153688 + * Default for this experimental knob is 0 (disabled).
153689 + * Planned value for experiments: BBR_UNIT * 1 / 2 = 128, representing 0.5.
153690 + */
153691 +static u32 bbr_ecn_reprobe_gain;
153693 +/* Estimate bw probing has gone too far if loss rate exceeds this level. */
153694 +static u32 bbr_loss_thresh = BBR_UNIT * 2 / 100;  /* 2% loss */
153696 +/* Exit STARTUP if number of loss marking events in a Recovery round is >= N,
153697 + * and loss rate is higher than bbr_loss_thresh.
153698 + * Disabled if 0. Max allowed value is 15 (0xF).
153699 + */
153700 +static u32 bbr_full_loss_cnt = 8;
153702 +/* Exit STARTUP if number of round trips with ECN mark rate above ecn_thresh
153703 + * meets this count. Max allowed value is 3.
153704 + */
153705 +static u32 bbr_full_ecn_cnt = 2;
153707 +/* Fraction of unutilized headroom to try to leave in path upon high loss. */
153708 +static u32 bbr_inflight_headroom = BBR_UNIT * 15 / 100;
153710 +/* Multiplier to get target inflight (as multiple of BDP) for PROBE_UP phase.
153711 + * Default is 1.25x, as in BBR v1. Max allowed is 511.
153712 + */
153713 +static u32 bbr_bw_probe_pif_gain = BBR_UNIT * 5 / 4;
153715 +/* Multiplier to get Reno-style probe epoch duration as: k * BDP round trips.
153716 + * If zero, disables this BBR v2 Reno-style BDP-scaled coexistence mechanism.
153717 + * Max allowed is 511.
153718 + */
153719 +static u32 bbr_bw_probe_reno_gain = BBR_UNIT;
153721 +/* Max number of packet-timed rounds to wait before probing for bandwidth.  If
153722 + * we want to tolerate 1% random loss per round, and not have this cut our
153723 + * inflight too much, we must probe for bw periodically on roughly this scale.
153724 + * If low, limits Reno/CUBIC coexistence; if high, limits loss tolerance.
153725 + * We aim to be fair with Reno/CUBIC up to a BDP of at least:
153726 + *  BDP = 25Mbps * .030sec /(1514bytes) = 61.9 packets
153727 + */
153728 +static u32 bbr_bw_probe_max_rounds = 63;
153730 +/* Max amount of randomness to inject in round counting for Reno-coexistence.
153731 + * Max value is 15.
153732 + */
153733 +static u32 bbr_bw_probe_rand_rounds = 2;
153735 +/* Use BBR-native probe time scale starting at this many usec.
153736 + * We aim to be fair with Reno/CUBIC up to an inter-loss time epoch of at least:
153737 + *  BDP*RTT = 25Mbps * .030sec /(1514bytes) * 0.030sec = 1.9 secs
153738 + */
153739 +static u32 bbr_bw_probe_base_us = 2 * USEC_PER_SEC;  /* 2 secs */
153741 +/* Use BBR-native probes spread over this many usec: */
153742 +static u32 bbr_bw_probe_rand_us = 1 * USEC_PER_SEC;  /* 1 secs */
153744 +/* Undo the model changes made in loss recovery if recovery was spurious? */
153745 +static bool bbr_undo = true;
153747 +/* Use fast path if app-limited, no loss/ECN, and target cwnd was reached? */
153748 +static bool bbr_fast_path = true;      /* default: enabled */
153750 +/* Use fast ack mode ? */
153751 +static int bbr_fast_ack_mode = 1;      /* default: rwnd check off */
153753 +/* How much to additively increase inflight_hi when entering REFILL? */
153754 +static u32 bbr_refill_add_inc;         /* default: disabled */
153756 +module_param_named(beta,                 bbr_beta,                 uint, 0644);
153757 +module_param_named(ecn_alpha_gain,       bbr_ecn_alpha_gain,       uint, 0644);
153758 +module_param_named(ecn_alpha_init,       bbr_ecn_alpha_init,       uint, 0644);
153759 +module_param_named(ecn_factor,           bbr_ecn_factor,           uint, 0644);
153760 +module_param_named(ecn_thresh,           bbr_ecn_thresh,           uint, 0644);
153761 +module_param_named(ecn_max_rtt_us,       bbr_ecn_max_rtt_us,       uint, 0644);
153762 +module_param_named(ecn_reprobe_gain,     bbr_ecn_reprobe_gain,     uint, 0644);
153763 +module_param_named(loss_thresh,          bbr_loss_thresh,          uint, 0664);
153764 +module_param_named(full_loss_cnt,        bbr_full_loss_cnt,        uint, 0664);
153765 +module_param_named(full_ecn_cnt,         bbr_full_ecn_cnt,         uint, 0664);
153766 +module_param_named(inflight_headroom,    bbr_inflight_headroom,    uint, 0664);
153767 +module_param_named(bw_probe_pif_gain,    bbr_bw_probe_pif_gain,    uint, 0664);
153768 +module_param_named(bw_probe_reno_gain,   bbr_bw_probe_reno_gain,   uint, 0664);
153769 +module_param_named(bw_probe_max_rounds,  bbr_bw_probe_max_rounds,  uint, 0664);
153770 +module_param_named(bw_probe_rand_rounds, bbr_bw_probe_rand_rounds, uint, 0664);
153771 +module_param_named(bw_probe_base_us,     bbr_bw_probe_base_us,     uint, 0664);
153772 +module_param_named(bw_probe_rand_us,     bbr_bw_probe_rand_us,     uint, 0664);
153773 +module_param_named(undo,                 bbr_undo,                 bool, 0664);
153774 +module_param_named(fast_path,           bbr_fast_path,            bool, 0664);
153775 +module_param_named(fast_ack_mode,       bbr_fast_ack_mode,        uint, 0664);
153776 +module_param_named(refill_add_inc,       bbr_refill_add_inc,       uint, 0664);
153778 +static void bbr2_init(struct sock *sk)
153780 +       struct tcp_sock *tp = tcp_sk(sk);
153781 +       struct bbr *bbr = inet_csk_ca(sk);
153783 +       bbr_init(sk);   /* run shared init code for v1 and v2 */
153785 +       /* BBR v2 parameters: */
153786 +       bbr->params.beta = min_t(u32, 0xFFU, bbr_beta);
153787 +       bbr->params.ecn_alpha_gain = min_t(u32, 0xFFU, bbr_ecn_alpha_gain);
153788 +       bbr->params.ecn_alpha_init = min_t(u32, BBR_UNIT, bbr_ecn_alpha_init);
153789 +       bbr->params.ecn_factor = min_t(u32, 0xFFU, bbr_ecn_factor);
153790 +       bbr->params.ecn_thresh = min_t(u32, 0xFFU, bbr_ecn_thresh);
153791 +       bbr->params.ecn_max_rtt_us = min_t(u32, 0x7ffffU, bbr_ecn_max_rtt_us);
153792 +       bbr->params.ecn_reprobe_gain = min_t(u32, 0x1FF, bbr_ecn_reprobe_gain);
153793 +       bbr->params.loss_thresh = min_t(u32, 0xFFU, bbr_loss_thresh);
153794 +       bbr->params.full_loss_cnt = min_t(u32, 0xFU, bbr_full_loss_cnt);
153795 +       bbr->params.full_ecn_cnt = min_t(u32, 0x3U, bbr_full_ecn_cnt);
153796 +       bbr->params.inflight_headroom =
153797 +               min_t(u32, 0xFFU, bbr_inflight_headroom);
153798 +       bbr->params.bw_probe_pif_gain =
153799 +               min_t(u32, 0x1FFU, bbr_bw_probe_pif_gain);
153800 +       bbr->params.bw_probe_reno_gain =
153801 +               min_t(u32, 0x1FFU, bbr_bw_probe_reno_gain);
153802 +       bbr->params.bw_probe_max_rounds =
153803 +               min_t(u32, 0xFFU, bbr_bw_probe_max_rounds);
153804 +       bbr->params.bw_probe_rand_rounds =
153805 +               min_t(u32, 0xFU, bbr_bw_probe_rand_rounds);
153806 +       bbr->params.bw_probe_base_us =
153807 +               min_t(u32, (1 << 26) - 1, bbr_bw_probe_base_us);
153808 +       bbr->params.bw_probe_rand_us =
153809 +               min_t(u32, (1 << 26) - 1, bbr_bw_probe_rand_us);
153810 +       bbr->params.undo = bbr_undo;
153811 +       bbr->params.fast_path = bbr_fast_path ? 1 : 0;
153812 +       bbr->params.refill_add_inc = min_t(u32, 0x3U, bbr_refill_add_inc);
153814 +       /* BBR v2 state: */
153815 +       bbr->initialized = 1;
153816 +       /* Start sampling ECN mark rate after first full flight is ACKed: */
153817 +       bbr->loss_round_delivered = tp->delivered + 1;
153818 +       bbr->loss_round_start = 0;
153819 +       bbr->undo_bw_lo = 0;
153820 +       bbr->undo_inflight_lo = 0;
153821 +       bbr->undo_inflight_hi = 0;
153822 +       bbr->loss_events_in_round = 0;
153823 +       bbr->startup_ecn_rounds = 0;
153824 +       bbr2_reset_congestion_signals(sk);
153825 +       bbr->bw_lo = ~0U;
153826 +       bbr->bw_hi[0] = 0;
153827 +       bbr->bw_hi[1] = 0;
153828 +       bbr->inflight_lo = ~0U;
153829 +       bbr->inflight_hi = ~0U;
153830 +       bbr->bw_probe_up_cnt = ~0U;
153831 +       bbr->bw_probe_up_acks = 0;
153832 +       bbr->bw_probe_up_rounds = 0;
153833 +       bbr->probe_wait_us = 0;
153834 +       bbr->stopped_risky_probe = 0;
153835 +       bbr->ack_phase = BBR_ACKS_INIT;
153836 +       bbr->rounds_since_probe = 0;
153837 +       bbr->bw_probe_samples = 0;
153838 +       bbr->prev_probe_too_high = 0;
153839 +       bbr->ecn_eligible = 0;
153840 +       bbr->ecn_alpha = bbr->params.ecn_alpha_init;
153841 +       bbr->alpha_last_delivered = 0;
153842 +       bbr->alpha_last_delivered_ce = 0;
153844 +       tp->fast_ack_mode = min_t(u32, 0x2U, bbr_fast_ack_mode);
153847 +/* Core TCP stack informs us that the given skb was just marked lost. */
153848 +static void bbr2_skb_marked_lost(struct sock *sk, const struct sk_buff *skb)
153850 +       struct tcp_sock *tp = tcp_sk(sk);
153851 +       struct bbr *bbr = inet_csk_ca(sk);
153852 +       struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
153853 +       struct rate_sample rs;
153855 +       /* Capture "current" data over the full round trip of loss,
153856 +        * to have a better chance to see the full capacity of the path.
153857 +       */
153858 +       if (!bbr->loss_in_round)  /* first loss in this round trip? */
153859 +               bbr->loss_round_delivered = tp->delivered;  /* set round trip */
153860 +       bbr->loss_in_round = 1;
153861 +       bbr->loss_in_cycle = 1;
153863 +       if (!bbr->bw_probe_samples)
153864 +               return;  /* not an skb sent while probing for bandwidth */
153865 +       if (unlikely(!scb->tx.delivered_mstamp))
153866 +               return;  /* skb was SACKed, reneged, marked lost; ignore it */
153867 +       /* We are probing for bandwidth. Construct a rate sample that
153868 +        * estimates what happened in the flight leading up to this lost skb,
153869 +        * then see if the loss rate went too high, and if so at which packet.
153870 +        */
153871 +       memset(&rs, 0, sizeof(rs));
153872 +       rs.tx_in_flight = scb->tx.in_flight;
153873 +       rs.lost = tp->lost - scb->tx.lost;
153874 +       rs.is_app_limited = scb->tx.is_app_limited;
153875 +       if (bbr2_is_inflight_too_high(sk, &rs)) {
153876 +               rs.tx_in_flight = bbr2_inflight_hi_from_lost_skb(sk, &rs, skb);
153877 +               bbr2_handle_inflight_too_high(sk, &rs);
153878 +       }
153881 +/* Revert short-term model if current loss recovery event was spurious. */
153882 +static u32 bbr2_undo_cwnd(struct sock *sk)
153884 +       struct tcp_sock *tp = tcp_sk(sk);
153885 +       struct bbr *bbr = inet_csk_ca(sk);
153887 +       bbr->debug.undo = 1;
153888 +       bbr->full_bw = 0;   /* spurious slow-down; reset full pipe detection */
153889 +       bbr->full_bw_cnt = 0;
153890 +       bbr->loss_in_round = 0;
153892 +       if (!bbr->params.undo)
153893 +               return tp->snd_cwnd;
153895 +       /* Revert to cwnd and other state saved before loss episode. */
153896 +       bbr->bw_lo = max(bbr->bw_lo, bbr->undo_bw_lo);
153897 +       bbr->inflight_lo = max(bbr->inflight_lo, bbr->undo_inflight_lo);
153898 +       bbr->inflight_hi = max(bbr->inflight_hi, bbr->undo_inflight_hi);
153899 +       return bbr->prior_cwnd;
153902 +/* Entering loss recovery, so save state for when we undo recovery. */
153903 +static u32 bbr2_ssthresh(struct sock *sk)
153905 +       struct bbr *bbr = inet_csk_ca(sk);
153907 +       bbr_save_cwnd(sk);
153908 +       /* For undo, save state that adapts based on loss signal. */
153909 +       bbr->undo_bw_lo         = bbr->bw_lo;
153910 +       bbr->undo_inflight_lo   = bbr->inflight_lo;
153911 +       bbr->undo_inflight_hi   = bbr->inflight_hi;
153912 +       return tcp_sk(sk)->snd_ssthresh;
153915 +static enum tcp_bbr2_phase bbr2_get_phase(struct bbr *bbr)
153917 +       switch (bbr->mode) {
153918 +       case BBR_STARTUP:
153919 +               return BBR2_PHASE_STARTUP;
153920 +       case BBR_DRAIN:
153921 +               return BBR2_PHASE_DRAIN;
153922 +       case BBR_PROBE_BW:
153923 +               break;
153924 +       case BBR_PROBE_RTT:
153925 +               return BBR2_PHASE_PROBE_RTT;
153926 +       default:
153927 +               return BBR2_PHASE_INVALID;
153928 +       }
153929 +       switch (bbr->cycle_idx) {
153930 +       case BBR_BW_PROBE_UP:
153931 +               return BBR2_PHASE_PROBE_BW_UP;
153932 +       case BBR_BW_PROBE_DOWN:
153933 +               return BBR2_PHASE_PROBE_BW_DOWN;
153934 +       case BBR_BW_PROBE_CRUISE:
153935 +               return BBR2_PHASE_PROBE_BW_CRUISE;
153936 +       case BBR_BW_PROBE_REFILL:
153937 +               return BBR2_PHASE_PROBE_BW_REFILL;
153938 +       default:
153939 +               return BBR2_PHASE_INVALID;
153940 +       }
153943 +static size_t bbr2_get_info(struct sock *sk, u32 ext, int *attr,
153944 +                           union tcp_cc_info *info)
153946 +       if (ext & (1 << (INET_DIAG_BBRINFO - 1)) ||
153947 +           ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
153948 +               struct bbr *bbr = inet_csk_ca(sk);
153949 +               u64 bw = bbr_bw_bytes_per_sec(sk, bbr_bw(sk));
153950 +               u64 bw_hi = bbr_bw_bytes_per_sec(sk, bbr_max_bw(sk));
153951 +               u64 bw_lo = bbr->bw_lo == ~0U ?
153952 +                       ~0ULL : bbr_bw_bytes_per_sec(sk, bbr->bw_lo);
153954 +               memset(&info->bbr2, 0, sizeof(info->bbr2));
153955 +               info->bbr2.bbr_bw_lsb           = (u32)bw;
153956 +               info->bbr2.bbr_bw_msb           = (u32)(bw >> 32);
153957 +               info->bbr2.bbr_min_rtt          = bbr->min_rtt_us;
153958 +               info->bbr2.bbr_pacing_gain      = bbr->pacing_gain;
153959 +               info->bbr2.bbr_cwnd_gain        = bbr->cwnd_gain;
153960 +               info->bbr2.bbr_bw_hi_lsb        = (u32)bw_hi;
153961 +               info->bbr2.bbr_bw_hi_msb        = (u32)(bw_hi >> 32);
153962 +               info->bbr2.bbr_bw_lo_lsb        = (u32)bw_lo;
153963 +               info->bbr2.bbr_bw_lo_msb        = (u32)(bw_lo >> 32);
153964 +               info->bbr2.bbr_mode             = bbr->mode;
153965 +               info->bbr2.bbr_phase            = (__u8)bbr2_get_phase(bbr);
153966 +               info->bbr2.bbr_version          = (__u8)2;
153967 +               info->bbr2.bbr_inflight_lo      = bbr->inflight_lo;
153968 +               info->bbr2.bbr_inflight_hi      = bbr->inflight_hi;
153969 +               info->bbr2.bbr_extra_acked      = bbr_extra_acked(sk);
153970 +               *attr = INET_DIAG_BBRINFO;
153971 +               return sizeof(info->bbr2);
153972 +       }
153973 +       return 0;
153976 +static void bbr2_set_state(struct sock *sk, u8 new_state)
153978 +       struct tcp_sock *tp = tcp_sk(sk);
153979 +       struct bbr *bbr = inet_csk_ca(sk);
153981 +       if (new_state == TCP_CA_Loss) {
153982 +               struct rate_sample rs = { .losses = 1 };
153983 +               struct bbr_context ctx = { 0 };
153985 +               bbr->prev_ca_state = TCP_CA_Loss;
153986 +               bbr->full_bw = 0;
153987 +               if (!bbr2_is_probing_bandwidth(sk) && bbr->inflight_lo == ~0U) {
153988 +                       /* bbr_adapt_lower_bounds() needs cwnd before
153989 +                        * we suffered an RTO, to update inflight_lo:
153990 +                        */
153991 +                       bbr->inflight_lo =
153992 +                               max(tp->snd_cwnd, bbr->prior_cwnd);
153993 +               }
153994 +               bbr_debug(sk, 0, &rs, &ctx);
153995 +       } else if (bbr->prev_ca_state == TCP_CA_Loss &&
153996 +                  new_state != TCP_CA_Loss) {
153997 +               tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd);
153998 +               bbr->try_fast_path = 0; /* bound cwnd using latest model */
153999 +       }
154002 +static struct tcp_congestion_ops tcp_bbr2_cong_ops __read_mostly = {
154003 +       .flags          = TCP_CONG_NON_RESTRICTED | TCP_CONG_WANTS_CE_EVENTS,
154004 +       .name           = "bbr2",
154005 +       .owner          = THIS_MODULE,
154006 +       .init           = bbr2_init,
154007 +       .cong_control   = bbr2_main,
154008 +       .sndbuf_expand  = bbr_sndbuf_expand,
154009 +       .skb_marked_lost = bbr2_skb_marked_lost,
154010 +       .undo_cwnd      = bbr2_undo_cwnd,
154011 +       .cwnd_event     = bbr_cwnd_event,
154012 +       .ssthresh       = bbr2_ssthresh,
154013 +       .tso_segs       = bbr_tso_segs,
154014 +       .get_info       = bbr2_get_info,
154015 +       .set_state      = bbr2_set_state,
154018 +static int __init bbr_register(void)
154020 +       BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE);
154021 +       return tcp_register_congestion_control(&tcp_bbr2_cong_ops);
154024 +static void __exit bbr_unregister(void)
154026 +       tcp_unregister_congestion_control(&tcp_bbr2_cong_ops);
154029 +module_init(bbr_register);
154030 +module_exit(bbr_unregister);
154032 +MODULE_AUTHOR("Van Jacobson <vanj@google.com>");
154033 +MODULE_AUTHOR("Neal Cardwell <ncardwell@google.com>");
154034 +MODULE_AUTHOR("Yuchung Cheng <ycheng@google.com>");
154035 +MODULE_AUTHOR("Soheil Hassas Yeganeh <soheil@google.com>");
154036 +MODULE_AUTHOR("Priyaranjan Jha <priyarjha@google.com>");
154037 +MODULE_AUTHOR("Yousuk Seung <ysseung@google.com>");
154038 +MODULE_AUTHOR("Kevin Yang <yyd@google.com>");
154039 +MODULE_AUTHOR("Arjun Roy <arjunroy@google.com>");
154041 +MODULE_LICENSE("Dual BSD/GPL");
154042 +MODULE_DESCRIPTION("TCP BBR (Bottleneck Bandwidth and RTT)");
154043 diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
154044 index 563d016e7478..153ed9010c0c 100644
154045 --- a/net/ipv4/tcp_cong.c
154046 +++ b/net/ipv4/tcp_cong.c
154047 @@ -179,6 +179,7 @@ void tcp_init_congestion_control(struct sock *sk)
154048         struct inet_connection_sock *icsk = inet_csk(sk);
154050         tcp_sk(sk)->prior_ssthresh = 0;
154051 +       tcp_sk(sk)->fast_ack_mode = 0;
154052         if (icsk->icsk_ca_ops->init)
154053                 icsk->icsk_ca_ops->init(sk);
154054         if (tcp_ca_needs_ecn(sk))
154055 @@ -230,6 +231,10 @@ int tcp_set_default_congestion_control(struct net *net, const char *name)
154056                 ret = -ENOENT;
154057         } else if (!bpf_try_module_get(ca, ca->owner)) {
154058                 ret = -EBUSY;
154059 +       } else if (!net_eq(net, &init_net) &&
154060 +                       !(ca->flags & TCP_CONG_NON_RESTRICTED)) {
154061 +               /* Only init netns can set default to a restricted algorithm */
154062 +               ret = -EPERM;
154063         } else {
154064                 prev = xchg(&net->ipv4.tcp_congestion_control, ca);
154065                 if (prev)
154066 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
154067 index 69a545db80d2..45aaba87ce8e 100644
154068 --- a/net/ipv4/tcp_input.c
154069 +++ b/net/ipv4/tcp_input.c
154070 @@ -348,7 +348,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
154071                         tcp_enter_quickack_mode(sk, 2);
154072                 break;
154073         case INET_ECN_CE:
154074 -               if (tcp_ca_needs_ecn(sk))
154075 +               if (tcp_ca_wants_ce_events(sk))
154076                         tcp_ca_event(sk, CA_EVENT_ECN_IS_CE);
154078                 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
154079 @@ -359,7 +359,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
154080                 tp->ecn_flags |= TCP_ECN_SEEN;
154081                 break;
154082         default:
154083 -               if (tcp_ca_needs_ecn(sk))
154084 +               if (tcp_ca_wants_ce_events(sk))
154085                         tcp_ca_event(sk, CA_EVENT_ECN_NO_CE);
154086                 tp->ecn_flags |= TCP_ECN_SEEN;
154087                 break;
154088 @@ -1039,7 +1039,12 @@ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
154089   */
154090  static void tcp_notify_skb_loss_event(struct tcp_sock *tp, const struct sk_buff *skb)
154092 +       struct sock *sk = (struct sock *)tp;
154093 +       const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
154095         tp->lost += tcp_skb_pcount(skb);
154096 +       if (ca_ops->skb_marked_lost)
154097 +               ca_ops->skb_marked_lost(sk, skb);
154100  void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
154101 @@ -1420,6 +1425,17 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
154102         WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount);
154103         tcp_skb_pcount_add(skb, -pcount);
154105 +       /* Adjust tx.in_flight as pcount is shifted from skb to prev. */
154106 +       if (WARN_ONCE(TCP_SKB_CB(skb)->tx.in_flight < pcount,
154107 +                     "prev in_flight: %u skb in_flight: %u pcount: %u",
154108 +                     TCP_SKB_CB(prev)->tx.in_flight,
154109 +                     TCP_SKB_CB(skb)->tx.in_flight,
154110 +                     pcount))
154111 +               TCP_SKB_CB(skb)->tx.in_flight = 0;
154112 +       else
154113 +               TCP_SKB_CB(skb)->tx.in_flight -= pcount;
154114 +       TCP_SKB_CB(prev)->tx.in_flight += pcount;
154116         /* When we're adding to gso_segs == 1, gso_size will be zero,
154117          * in theory this shouldn't be necessary but as long as DSACK
154118          * code can come after this skb later on it's better to keep
154119 @@ -3182,7 +3198,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
154120         long seq_rtt_us = -1L;
154121         long ca_rtt_us = -1L;
154122         u32 pkts_acked = 0;
154123 -       u32 last_in_flight = 0;
154124         bool rtt_update;
154125         int flag = 0;
154127 @@ -3218,7 +3233,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
154128                         if (!first_ackt)
154129                                 first_ackt = last_ackt;
154131 -                       last_in_flight = TCP_SKB_CB(skb)->tx.in_flight;
154132                         if (before(start_seq, reord))
154133                                 reord = start_seq;
154134                         if (!after(scb->end_seq, tp->high_seq))
154135 @@ -3284,8 +3298,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
154136                 seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt);
154137                 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt);
154139 -               if (pkts_acked == 1 && last_in_flight < tp->mss_cache &&
154140 -                   last_in_flight && !prior_sacked && fully_acked &&
154141 +               if (pkts_acked == 1 && fully_acked && !prior_sacked &&
154142 +                   (tp->snd_una - prior_snd_una) < tp->mss_cache &&
154143                     sack->rate->prior_delivered + 1 == tp->delivered &&
154144                     !(flag & (FLAG_CA_ALERT | FLAG_SYN_ACKED))) {
154145                         /* Conservatively mark a delayed ACK. It's typically
154146 @@ -3342,9 +3356,10 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
154148         if (icsk->icsk_ca_ops->pkts_acked) {
154149                 struct ack_sample sample = { .pkts_acked = pkts_acked,
154150 -                                            .rtt_us = sack->rate->rtt_us,
154151 -                                            .in_flight = last_in_flight };
154152 +                                            .rtt_us = sack->rate->rtt_us };
154154 +               sample.in_flight = tp->mss_cache *
154155 +                       (tp->delivered - sack->rate->prior_delivered);
154156                 icsk->icsk_ca_ops->pkts_acked(sk, &sample);
154157         }
154159 @@ -3742,6 +3757,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
154161         prior_fack = tcp_is_sack(tp) ? tcp_highest_sack_seq(tp) : tp->snd_una;
154162         rs.prior_in_flight = tcp_packets_in_flight(tp);
154163 +       tcp_rate_check_app_limited(sk);
154165         /* ts_recent update must be made after we are sure that the packet
154166          * is in window.
154167 @@ -3839,6 +3855,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
154168         delivered = tcp_newly_delivered(sk, delivered, flag);
154169         lost = tp->lost - lost;                 /* freshly marked lost */
154170         rs.is_ack_delayed = !!(flag & FLAG_ACK_MAYBE_DELAYED);
154171 +       rs.is_ece = !!(flag & FLAG_ECE);
154172         tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate);
154173         tcp_cong_control(sk, ack, delivered, flag, sack_state.rate);
154174         tcp_xmit_recovery(sk, rexmit);
154175 @@ -5399,13 +5416,14 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
154177             /* More than one full frame received... */
154178         if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
154179 +            (tp->fast_ack_mode == 1 ||
154180              /* ... and right edge of window advances far enough.
154181               * (tcp_recvmsg() will send ACK otherwise).
154182               * If application uses SO_RCVLOWAT, we want send ack now if
154183               * we have not received enough bytes to satisfy the condition.
154184               */
154185 -           (tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat ||
154186 -            __tcp_select_window(sk) >= tp->rcv_wnd)) ||
154187 +             (tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat ||
154188 +              __tcp_select_window(sk) >= tp->rcv_wnd))) ||
154189             /* We ACK each frame or... */
154190             tcp_in_quickack_mode(sk) ||
154191             /* Protocol state mandates a one-time immediate ACK */
154192 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
154193 index fbf140a770d8..90d939375b29 100644
154194 --- a/net/ipv4/tcp_output.c
154195 +++ b/net/ipv4/tcp_output.c
154196 @@ -1256,8 +1256,6 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
154197         tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
154198         skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
154199         if (clone_it) {
154200 -               TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
154201 -                       - tp->snd_una;
154202                 oskb = skb;
154204                 tcp_skb_tsorted_save(oskb) {
154205 @@ -1536,7 +1534,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
154207         struct tcp_sock *tp = tcp_sk(sk);
154208         struct sk_buff *buff;
154209 -       int nsize, old_factor;
154210 +       int nsize, old_factor, inflight_prev;
154211         long limit;
154212         int nlen;
154213         u8 flags;
154214 @@ -1615,6 +1613,15 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
154216                 if (diff)
154217                         tcp_adjust_pcount(sk, skb, diff);
154219 +               /* Set buff tx.in_flight as if buff were sent by itself. */
154220 +               inflight_prev = TCP_SKB_CB(skb)->tx.in_flight - old_factor;
154221 +               if (WARN_ONCE(inflight_prev < 0,
154222 +                             "inconsistent: tx.in_flight: %u old_factor: %d",
154223 +                             TCP_SKB_CB(skb)->tx.in_flight, old_factor))
154224 +                       inflight_prev = 0;
154225 +               TCP_SKB_CB(buff)->tx.in_flight = inflight_prev +
154226 +                                                tcp_skb_pcount(buff);
154227         }
154229         /* Link BUFF into the send queue. */
154230 @@ -1982,13 +1989,12 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
154231  static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
154233         const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
154234 -       u32 min_tso, tso_segs;
154236 -       min_tso = ca_ops->min_tso_segs ?
154237 -                       ca_ops->min_tso_segs(sk) :
154238 -                       sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs;
154239 +       u32 tso_segs;
154241 -       tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
154242 +       tso_segs = ca_ops->tso_segs ?
154243 +               ca_ops->tso_segs(sk, mss_now) :
154244 +               tcp_tso_autosize(sk, mss_now,
154245 +                                sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
154246         return min_t(u32, tso_segs, sk->sk_gso_max_segs);
154249 @@ -2628,6 +2634,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
154250                         skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache;
154251                         list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
154252                         tcp_init_tso_segs(skb, mss_now);
154253 +                       tcp_set_tx_in_flight(sk, skb);
154254                         goto repair; /* Skip network transmission */
154255                 }
154257 diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
154258 index 0de693565963..796fa6e5310c 100644
154259 --- a/net/ipv4/tcp_rate.c
154260 +++ b/net/ipv4/tcp_rate.c
154261 @@ -34,6 +34,24 @@
154262   * ready to send in the write queue.
154263   */
154265 +void tcp_set_tx_in_flight(struct sock *sk, struct sk_buff *skb)
154267 +       struct tcp_sock *tp = tcp_sk(sk);
154268 +       u32 in_flight;
154270 +       /* Check, sanitize, and record packets in flight after skb was sent. */
154271 +       in_flight = tcp_packets_in_flight(tp) + tcp_skb_pcount(skb);
154272 +       if (WARN_ONCE(in_flight > TCPCB_IN_FLIGHT_MAX,
154273 +                     "insane in_flight %u cc %s mss %u "
154274 +                     "cwnd %u pif %u %u %u %u\n",
154275 +                     in_flight, inet_csk(sk)->icsk_ca_ops->name,
154276 +                     tp->mss_cache, tp->snd_cwnd,
154277 +                     tp->packets_out, tp->retrans_out,
154278 +                     tp->sacked_out, tp->lost_out))
154279 +               in_flight = TCPCB_IN_FLIGHT_MAX;
154280 +       TCP_SKB_CB(skb)->tx.in_flight = in_flight;
154283  /* Snapshot the current delivery information in the skb, to generate
154284   * a rate sample later when the skb is (s)acked in tcp_rate_skb_delivered().
154285   */
154286 @@ -65,7 +83,10 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
154287         TCP_SKB_CB(skb)->tx.first_tx_mstamp     = tp->first_tx_mstamp;
154288         TCP_SKB_CB(skb)->tx.delivered_mstamp    = tp->delivered_mstamp;
154289         TCP_SKB_CB(skb)->tx.delivered           = tp->delivered;
154290 +       TCP_SKB_CB(skb)->tx.delivered_ce        = tp->delivered_ce;
154291 +       TCP_SKB_CB(skb)->tx.lost                = tp->lost;
154292         TCP_SKB_CB(skb)->tx.is_app_limited      = tp->app_limited ? 1 : 0;
154293 +       tcp_set_tx_in_flight(sk, skb);
154296  /* When an skb is sacked or acked, we fill in the rate sample with the (prior)
154297 @@ -86,16 +107,20 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
154299         if (!rs->prior_delivered ||
154300             after(scb->tx.delivered, rs->prior_delivered)) {
154301 +               rs->prior_lost       = scb->tx.lost;
154302 +               rs->prior_delivered_ce  = scb->tx.delivered_ce;
154303                 rs->prior_delivered  = scb->tx.delivered;
154304                 rs->prior_mstamp     = scb->tx.delivered_mstamp;
154305                 rs->is_app_limited   = scb->tx.is_app_limited;
154306                 rs->is_retrans       = scb->sacked & TCPCB_RETRANS;
154307 +               rs->tx_in_flight     = scb->tx.in_flight;
154309                 /* Record send time of most recently ACKed packet: */
154310                 tp->first_tx_mstamp  = tcp_skb_timestamp_us(skb);
154311                 /* Find the duration of the "send phase" of this window: */
154312 -               rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
154313 -                                                    scb->tx.first_tx_mstamp);
154314 +               rs->interval_us      = tcp_stamp32_us_delta(
154315 +                                               tp->first_tx_mstamp,
154316 +                                               scb->tx.first_tx_mstamp);
154318         }
154319         /* Mark off the skb delivered once it's sacked to avoid being
154320 @@ -137,6 +162,11 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
154321                 return;
154322         }
154323         rs->delivered   = tp->delivered - rs->prior_delivered;
154324 +       rs->lost        = tp->lost - rs->prior_lost;
154326 +       rs->delivered_ce = tp->delivered_ce - rs->prior_delivered_ce;
154327 +       /* delivered_ce occupies less than 32 bits in the skb control block */
154328 +       rs->delivered_ce &= TCPCB_DELIVERED_CE_MASK;
154330         /* Model sending data and receiving ACKs as separate pipeline phases
154331          * for a window. Usually the ACK phase is longer, but with ACK
154332 @@ -144,7 +174,7 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
154333          * longer phase.
154334          */
154335         snd_us = rs->interval_us;                               /* send phase */
154336 -       ack_us = tcp_stamp_us_delta(tp->tcp_mstamp,
154337 +       ack_us = tcp_stamp32_us_delta(tp->tcp_mstamp,
154338                                     rs->prior_mstamp); /* ack phase */
154339         rs->interval_us = max(snd_us, ack_us);
154341 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
154342 index 4ef08079ccfa..b5b24caa8ba0 100644
154343 --- a/net/ipv4/tcp_timer.c
154344 +++ b/net/ipv4/tcp_timer.c
154345 @@ -607,6 +607,7 @@ void tcp_write_timer_handler(struct sock *sk)
154346                 goto out;
154347         }
154349 +       tcp_rate_check_app_limited(sk);
154350         tcp_mstamp_refresh(tcp_sk(sk));
154351         event = icsk->icsk_pending;
154353 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
154354 index 99d743eb9dc4..c586a6bb8c6d 100644
154355 --- a/net/ipv4/udp.c
154356 +++ b/net/ipv4/udp.c
154357 @@ -2664,9 +2664,12 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
154359         case UDP_GRO:
154360                 lock_sock(sk);
154362 +               /* when enabling GRO, accept the related GSO packet type */
154363                 if (valbool)
154364                         udp_tunnel_encap_enable(sk->sk_socket);
154365                 up->gro_enabled = valbool;
154366 +               up->accept_udp_l4 = valbool;
154367                 release_sock(sk);
154368                 break;
154370 diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
154371 index c5b4b586570f..25134a3548e9 100644
154372 --- a/net/ipv4/udp_offload.c
154373 +++ b/net/ipv4/udp_offload.c
154374 @@ -515,21 +515,24 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
154375         unsigned int off = skb_gro_offset(skb);
154376         int flush = 1;
154378 +       /* we can do L4 aggregation only if the packet can't land in a tunnel
154379 +        * otherwise we could corrupt the inner stream
154380 +        */
154381         NAPI_GRO_CB(skb)->is_flist = 0;
154382 -       if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
154383 -               NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled: 1;
154384 +       if (!sk || !udp_sk(sk)->gro_receive) {
154385 +               if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
154386 +                       NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled : 1;
154388 -       if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) ||
154389 -           (sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist) {
154390 -               pp = call_gro_receive(udp_gro_receive_segment, head, skb);
154391 +               if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) ||
154392 +                   (sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist)
154393 +                       pp = call_gro_receive(udp_gro_receive_segment, head, skb);
154394                 return pp;
154395         }
154397 -       if (!sk || NAPI_GRO_CB(skb)->encap_mark ||
154398 +       if (NAPI_GRO_CB(skb)->encap_mark ||
154399             (uh->check && skb->ip_summed != CHECKSUM_PARTIAL &&
154400              NAPI_GRO_CB(skb)->csum_cnt == 0 &&
154401 -            !NAPI_GRO_CB(skb)->csum_valid) ||
154402 -           !udp_sk(sk)->gro_receive)
154403 +            !NAPI_GRO_CB(skb)->csum_valid))
154404                 goto out;
154406         /* mark that this skb passed once through the tunnel gro layer */
154407 diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
154408 index 1baf43aacb2e..bc224f917bbd 100644
154409 --- a/net/ipv6/ip6_gre.c
154410 +++ b/net/ipv6/ip6_gre.c
154411 @@ -387,7 +387,6 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
154412         if (!(nt->parms.o_flags & TUNNEL_SEQ))
154413                 dev->features |= NETIF_F_LLTX;
154415 -       dev_hold(dev);
154416         ip6gre_tunnel_link(ign, nt);
154417         return nt;
154419 @@ -1496,6 +1495,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
154420         }
154421         ip6gre_tnl_init_features(dev);
154423 +       dev_hold(dev);
154424         return 0;
154426  cleanup_dst_cache_init:
154427 @@ -1538,8 +1538,6 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
154428         strcpy(tunnel->parms.name, dev->name);
154430         tunnel->hlen            = sizeof(struct ipv6hdr) + 4;
154432 -       dev_hold(dev);
154435  static struct inet6_protocol ip6gre_protocol __read_mostly = {
154436 @@ -1889,6 +1887,7 @@ static int ip6erspan_tap_init(struct net_device *dev)
154437         dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
154438         ip6erspan_tnl_link_config(tunnel, 1);
154440 +       dev_hold(dev);
154441         return 0;
154443  cleanup_dst_cache_init:
154444 @@ -1988,8 +1987,6 @@ static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev,
154445         if (tb[IFLA_MTU])
154446                 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
154448 -       dev_hold(dev);
154450  out:
154451         return err;
154453 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
154454 index 42fe7db6bbb3..d42f471b0d65 100644
154455 --- a/net/ipv6/ip6_tunnel.c
154456 +++ b/net/ipv6/ip6_tunnel.c
154457 @@ -266,7 +266,6 @@ static int ip6_tnl_create2(struct net_device *dev)
154459         strcpy(t->parms.name, dev->name);
154461 -       dev_hold(dev);
154462         ip6_tnl_link(ip6n, t);
154463         return 0;
154465 @@ -1882,6 +1881,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
154466         dev->min_mtu = ETH_MIN_MTU;
154467         dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
154469 +       dev_hold(dev);
154470         return 0;
154472  destroy_dst:
154473 @@ -1925,7 +1925,6 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
154474         struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
154476         t->parms.proto = IPPROTO_IPV6;
154477 -       dev_hold(dev);
154479         rcu_assign_pointer(ip6n->tnls_wc[0], t);
154480         return 0;
154481 diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
154482 index e0cc32e45880..2d048e21abbb 100644
154483 --- a/net/ipv6/ip6_vti.c
154484 +++ b/net/ipv6/ip6_vti.c
154485 @@ -193,7 +193,6 @@ static int vti6_tnl_create2(struct net_device *dev)
154487         strcpy(t->parms.name, dev->name);
154489 -       dev_hold(dev);
154490         vti6_tnl_link(ip6n, t);
154492         return 0;
154493 @@ -934,6 +933,7 @@ static inline int vti6_dev_init_gen(struct net_device *dev)
154494         dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
154495         if (!dev->tstats)
154496                 return -ENOMEM;
154497 +       dev_hold(dev);
154498         return 0;
154501 @@ -965,7 +965,6 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev)
154502         struct vti6_net *ip6n = net_generic(net, vti6_net_id);
154504         t->parms.proto = IPPROTO_IPV6;
154505 -       dev_hold(dev);
154507         rcu_assign_pointer(ip6n->tnls_wc[0], t);
154508         return 0;
154509 diff --git a/net/ipv6/mcast_snoop.c b/net/ipv6/mcast_snoop.c
154510 index d3d6b6a66e5f..04d5fcdfa6e0 100644
154511 --- a/net/ipv6/mcast_snoop.c
154512 +++ b/net/ipv6/mcast_snoop.c
154513 @@ -109,7 +109,7 @@ static int ipv6_mc_check_mld_msg(struct sk_buff *skb)
154514         struct mld_msg *mld;
154516         if (!ipv6_mc_may_pull(skb, len))
154517 -               return -EINVAL;
154518 +               return -ENODATA;
154520         mld = (struct mld_msg *)skb_transport_header(skb);
154522 @@ -122,7 +122,7 @@ static int ipv6_mc_check_mld_msg(struct sk_buff *skb)
154523         case ICMPV6_MGM_QUERY:
154524                 return ipv6_mc_check_mld_query(skb);
154525         default:
154526 -               return -ENOMSG;
154527 +               return -ENODATA;
154528         }
154531 @@ -131,7 +131,7 @@ static inline __sum16 ipv6_mc_validate_checksum(struct sk_buff *skb)
154532         return skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo);
154535 -int ipv6_mc_check_icmpv6(struct sk_buff *skb)
154536 +static int ipv6_mc_check_icmpv6(struct sk_buff *skb)
154538         unsigned int len = skb_transport_offset(skb) + sizeof(struct icmp6hdr);
154539         unsigned int transport_len = ipv6_transport_len(skb);
154540 @@ -150,7 +150,6 @@ int ipv6_mc_check_icmpv6(struct sk_buff *skb)
154542         return 0;
154544 -EXPORT_SYMBOL(ipv6_mc_check_icmpv6);
154546  /**
154547   * ipv6_mc_check_mld - checks whether this is a sane MLD packet
154548 @@ -161,7 +160,10 @@ EXPORT_SYMBOL(ipv6_mc_check_icmpv6);
154549   *
154550   * -EINVAL: A broken packet was detected, i.e. it violates some internet
154551   *  standard
154552 - * -ENOMSG: IP header validation succeeded but it is not an MLD packet.
154553 + * -ENOMSG: IP header validation succeeded but it is not an ICMPv6 packet
154554 + *  with a hop-by-hop option.
154555 + * -ENODATA: IP+ICMPv6 header with hop-by-hop option validation succeeded
154556 + *  but it is not an MLD packet.
154557   * -ENOMEM: A memory allocation failure happened.
154558   *
154559   * Caller needs to set the skb network header and free any returned skb if it
154560 diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
154561 index 9fdccf0718b5..fcc9ba2c80e9 100644
154562 --- a/net/ipv6/sit.c
154563 +++ b/net/ipv6/sit.c
154564 @@ -218,8 +218,6 @@ static int ipip6_tunnel_create(struct net_device *dev)
154566         ipip6_tunnel_clone_6rd(dev, sitn);
154568 -       dev_hold(dev);
154570         ipip6_tunnel_link(sitn, t);
154571         return 0;
154573 @@ -1456,7 +1454,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
154574                 dev->tstats = NULL;
154575                 return err;
154576         }
154578 +       dev_hold(dev);
154579         return 0;
154582 @@ -1472,7 +1470,6 @@ static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
154583         iph->ihl                = 5;
154584         iph->ttl                = 64;
154586 -       dev_hold(dev);
154587         rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
154590 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
154591 index 1b9c82616606..0331f3a3c40e 100644
154592 --- a/net/mac80211/main.c
154593 +++ b/net/mac80211/main.c
154594 @@ -1141,8 +1141,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
154595         if (local->hw.wiphy->max_scan_ie_len)
154596                 local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len;
154598 -       WARN_ON(!ieee80211_cs_list_valid(local->hw.cipher_schemes,
154599 -                                        local->hw.n_cipher_schemes));
154600 +       if (WARN_ON(!ieee80211_cs_list_valid(local->hw.cipher_schemes,
154601 +                                            local->hw.n_cipher_schemes))) {
154602 +               result = -EINVAL;
154603 +               goto fail_workqueue;
154604 +       }
154606         result = ieee80211_init_cipher_suites(local);
154607         if (result < 0)
154608 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
154609 index 96f487fc0071..0fe91dc9817e 100644
154610 --- a/net/mac80211/mlme.c
154611 +++ b/net/mac80211/mlme.c
154612 @@ -1295,6 +1295,11 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_sub_if_data *sdata)
154614         sdata->vif.csa_active = false;
154615         ifmgd->csa_waiting_bcn = false;
154616 +       /*
154617 +        * If the CSA IE is still present on the beacon after the switch,
154618 +        * we need to consider it as a new CSA (possibly to self).
154619 +        */
154620 +       ifmgd->beacon_crc_valid = false;
154622         ret = drv_post_channel_switch(sdata);
154623         if (ret) {
154624 @@ -1400,11 +1405,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
154625                 ch_switch.delay = csa_ie.max_switch_time;
154626         }
154628 -       if (res < 0) {
154629 -               ieee80211_queue_work(&local->hw,
154630 -                                    &ifmgd->csa_connection_drop_work);
154631 -               return;
154632 -       }
154633 +       if (res < 0)
154634 +               goto lock_and_drop_connection;
154636         if (beacon && sdata->vif.csa_active && !ifmgd->csa_waiting_bcn) {
154637                 if (res)
154638 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
154639 index 3b3bcefbf657..28422d687096 100644
154640 --- a/net/mac80211/tx.c
154641 +++ b/net/mac80211/tx.c
154642 @@ -2267,17 +2267,6 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
154643                                                     payload[7]);
154644         }
154646 -       /* Initialize skb->priority for QoS frames. If the DONT_REORDER flag
154647 -        * is set, stick to the default value for skb->priority to assure
154648 -        * frames injected with this flag are not reordered relative to each
154649 -        * other.
154650 -        */
154651 -       if (ieee80211_is_data_qos(hdr->frame_control) &&
154652 -           !(info->control.flags & IEEE80211_TX_CTRL_DONT_REORDER)) {
154653 -               u8 *p = ieee80211_get_qos_ctl(hdr);
154654 -               skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
154655 -       }
154657         rcu_read_lock();
154659         /*
154660 @@ -2341,6 +2330,15 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
154662         info->band = chandef->chan->band;
154664 +       /* Initialize skb->priority according to frame type and TID class,
154665 +        * with respect to the sub interface that the frame will actually
154666 +        * be transmitted on. If the DONT_REORDER flag is set, the original
154667 +        * skb-priority is preserved to assure frames injected with this
154668 +        * flag are not reordered relative to each other.
154669 +        */
154670 +       ieee80211_select_queue_80211(sdata, skb, hdr);
154671 +       skb_set_queue_mapping(skb, ieee80211_ac_from_tid(skb->priority));
154673         /* remove the injection radiotap header */
154674         skb_pull(skb, len_rthdr);
154676 diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
154677 index 4bde960e19dc..65e5d3eb1078 100644
154678 --- a/net/mptcp/protocol.c
154679 +++ b/net/mptcp/protocol.c
154680 @@ -399,6 +399,14 @@ static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
154681         return false;
154684 +static void mptcp_set_datafin_timeout(const struct sock *sk)
154686 +       struct inet_connection_sock *icsk = inet_csk(sk);
154688 +       mptcp_sk(sk)->timer_ival = min(TCP_RTO_MAX,
154689 +                                      TCP_RTO_MIN << icsk->icsk_retransmits);
154692  static void mptcp_set_timeout(const struct sock *sk, const struct sock *ssk)
154694         long tout = ssk && inet_csk(ssk)->icsk_pending ?
154695 @@ -1052,7 +1060,7 @@ static void __mptcp_clean_una(struct sock *sk)
154696         }
154698         if (snd_una == READ_ONCE(msk->snd_nxt)) {
154699 -               if (msk->timer_ival)
154700 +               if (msk->timer_ival && !mptcp_data_fin_enabled(msk))
154701                         mptcp_stop_timer(sk);
154702         } else {
154703                 mptcp_reset_timer(sk);
154704 @@ -1275,7 +1283,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
154705         int avail_size;
154706         size_t ret = 0;
154708 -       pr_debug("msk=%p ssk=%p sending dfrag at seq=%lld len=%d already sent=%d",
154709 +       pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u",
154710                  msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
154712         /* compute send limit */
154713 @@ -1693,7 +1701,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
154714                         if (!msk->first_pending)
154715                                 WRITE_ONCE(msk->first_pending, dfrag);
154716                 }
154717 -               pr_debug("msk=%p dfrag at seq=%lld len=%d sent=%d new=%d", msk,
154718 +               pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk,
154719                          dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
154720                          !dfrag_collapsed);
154722 @@ -2276,8 +2284,19 @@ static void __mptcp_retrans(struct sock *sk)
154724         __mptcp_clean_una_wakeup(sk);
154725         dfrag = mptcp_rtx_head(sk);
154726 -       if (!dfrag)
154727 +       if (!dfrag) {
154728 +               if (mptcp_data_fin_enabled(msk)) {
154729 +                       struct inet_connection_sock *icsk = inet_csk(sk);
154731 +                       icsk->icsk_retransmits++;
154732 +                       mptcp_set_datafin_timeout(sk);
154733 +                       mptcp_send_ack(msk);
154735 +                       goto reset_timer;
154736 +               }
154738                 return;
154739 +       }
154741         ssk = mptcp_subflow_get_retrans(msk);
154742         if (!ssk)
154743 @@ -2460,6 +2479,8 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
154744                         pr_debug("Sending DATA_FIN on subflow %p", ssk);
154745                         mptcp_set_timeout(sk, ssk);
154746                         tcp_send_ack(ssk);
154747 +                       if (!mptcp_timer_pending(sk))
154748 +                               mptcp_reset_timer(sk);
154749                 }
154750                 break;
154751         }
154752 diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
154753 index d17d39ccdf34..4fe7acaa472f 100644
154754 --- a/net/mptcp/subflow.c
154755 +++ b/net/mptcp/subflow.c
154756 @@ -524,8 +524,7 @@ static void mptcp_sock_destruct(struct sock *sk)
154757          * ESTABLISHED state and will not have the SOCK_DEAD flag.
154758          * Both result in warnings from inet_sock_destruct.
154759          */
154761 -       if (sk->sk_state == TCP_ESTABLISHED) {
154762 +       if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
154763                 sk->sk_state = TCP_CLOSE;
154764                 WARN_ON_ONCE(sk->sk_socket);
154765                 sock_orphan(sk);
154766 diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
154767 index c6c0cb465664..313d1c8ff066 100644
154768 --- a/net/netfilter/nf_conntrack_standalone.c
154769 +++ b/net/netfilter/nf_conntrack_standalone.c
154770 @@ -1060,16 +1060,10 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
154771         nf_conntrack_standalone_init_dccp_sysctl(net, table);
154772         nf_conntrack_standalone_init_gre_sysctl(net, table);
154774 -       /* Don't allow unprivileged users to alter certain sysctls */
154775 -       if (net->user_ns != &init_user_ns) {
154776 +       /* Don't allow non-init_net ns to alter global sysctls */
154777 +       if (!net_eq(&init_net, net)) {
154778                 table[NF_SYSCTL_CT_MAX].mode = 0444;
154779                 table[NF_SYSCTL_CT_EXPECT_MAX].mode = 0444;
154780 -               table[NF_SYSCTL_CT_HELPER].mode = 0444;
154781 -#ifdef CONFIG_NF_CONNTRACK_EVENTS
154782 -               table[NF_SYSCTL_CT_EVENTS].mode = 0444;
154783 -#endif
154784 -               table[NF_SYSCTL_CT_BUCKETS].mode = 0444;
154785 -       } else if (!net_eq(&init_net, net)) {
154786                 table[NF_SYSCTL_CT_BUCKETS].mode = 0444;
154787         }
154789 diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
154790 index 589d2f6978d3..878ed49d0c56 100644
154791 --- a/net/netfilter/nf_tables_api.c
154792 +++ b/net/netfilter/nf_tables_api.c
154793 @@ -6246,9 +6246,9 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
154794         INIT_LIST_HEAD(&obj->list);
154795         return err;
154796  err_trans:
154797 -       kfree(obj->key.name);
154798 -err_userdata:
154799         kfree(obj->udata);
154800 +err_userdata:
154801 +       kfree(obj->key.name);
154802  err_strdup:
154803         if (obj->ops->destroy)
154804                 obj->ops->destroy(&ctx, obj);
154805 diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
154806 index 9ae14270c543..2b00f7f47693 100644
154807 --- a/net/netfilter/nf_tables_offload.c
154808 +++ b/net/netfilter/nf_tables_offload.c
154809 @@ -45,6 +45,48 @@ void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
154810                 offsetof(struct nft_flow_key, control);
154813 +struct nft_offload_ethertype {
154814 +       __be16 value;
154815 +       __be16 mask;
154818 +static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
154819 +                                       struct nft_flow_rule *flow)
154821 +       struct nft_flow_match *match = &flow->match;
154822 +       struct nft_offload_ethertype ethertype;
154824 +       if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL) &&
154825 +           match->key.basic.n_proto != htons(ETH_P_8021Q) &&
154826 +           match->key.basic.n_proto != htons(ETH_P_8021AD))
154827 +               return;
154829 +       ethertype.value = match->key.basic.n_proto;
154830 +       ethertype.mask = match->mask.basic.n_proto;
154832 +       if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_VLAN) &&
154833 +           (match->key.vlan.vlan_tpid == htons(ETH_P_8021Q) ||
154834 +            match->key.vlan.vlan_tpid == htons(ETH_P_8021AD))) {
154835 +               match->key.basic.n_proto = match->key.cvlan.vlan_tpid;
154836 +               match->mask.basic.n_proto = match->mask.cvlan.vlan_tpid;
154837 +               match->key.cvlan.vlan_tpid = match->key.vlan.vlan_tpid;
154838 +               match->mask.cvlan.vlan_tpid = match->mask.vlan.vlan_tpid;
154839 +               match->key.vlan.vlan_tpid = ethertype.value;
154840 +               match->mask.vlan.vlan_tpid = ethertype.mask;
154841 +               match->dissector.offset[FLOW_DISSECTOR_KEY_CVLAN] =
154842 +                       offsetof(struct nft_flow_key, cvlan);
154843 +               match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN);
154844 +       } else {
154845 +               match->key.basic.n_proto = match->key.vlan.vlan_tpid;
154846 +               match->mask.basic.n_proto = match->mask.vlan.vlan_tpid;
154847 +               match->key.vlan.vlan_tpid = ethertype.value;
154848 +               match->mask.vlan.vlan_tpid = ethertype.mask;
154849 +               match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] =
154850 +                       offsetof(struct nft_flow_key, vlan);
154851 +               match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
154852 +       }
154855  struct nft_flow_rule *nft_flow_rule_create(struct net *net,
154856                                            const struct nft_rule *rule)
154858 @@ -89,6 +131,8 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net,
154860                 expr = nft_expr_next(expr);
154861         }
154862 +       nft_flow_rule_transfer_vlan(ctx, flow);
154864         flow->proto = ctx->dep.l3num;
154865         kfree(ctx);
154867 diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
154868 index 916a3c7f9eaf..79fbf37291f3 100644
154869 --- a/net/netfilter/nfnetlink_osf.c
154870 +++ b/net/netfilter/nfnetlink_osf.c
154871 @@ -186,6 +186,8 @@ static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx,
154873                 ctx->optp = skb_header_pointer(skb, ip_hdrlen(skb) +
154874                                 sizeof(struct tcphdr), ctx->optsize, opts);
154875 +               if (!ctx->optp)
154876 +                       return NULL;
154877         }
154879         return tcp;
154880 diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
154881 index eb6a43a180bb..47b6d05f1ae6 100644
154882 --- a/net/netfilter/nft_cmp.c
154883 +++ b/net/netfilter/nft_cmp.c
154884 @@ -114,19 +114,56 @@ static int nft_cmp_dump(struct sk_buff *skb, const struct nft_expr *expr)
154885         return -1;
154888 +union nft_cmp_offload_data {
154889 +       u16     val16;
154890 +       u32     val32;
154891 +       u64     val64;
154894 +static void nft_payload_n2h(union nft_cmp_offload_data *data,
154895 +                           const u8 *val, u32 len)
154897 +       switch (len) {
154898 +       case 2:
154899 +               data->val16 = ntohs(*((u16 *)val));
154900 +               break;
154901 +       case 4:
154902 +               data->val32 = ntohl(*((u32 *)val));
154903 +               break;
154904 +       case 8:
154905 +               data->val64 = be64_to_cpu(*((u64 *)val));
154906 +               break;
154907 +       default:
154908 +               WARN_ON_ONCE(1);
154909 +               break;
154910 +       }
154913  static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
154914                              struct nft_flow_rule *flow,
154915                              const struct nft_cmp_expr *priv)
154917         struct nft_offload_reg *reg = &ctx->regs[priv->sreg];
154918 +       union nft_cmp_offload_data _data, _datamask;
154919         u8 *mask = (u8 *)&flow->match.mask;
154920         u8 *key = (u8 *)&flow->match.key;
154921 +       u8 *data, *datamask;
154923         if (priv->op != NFT_CMP_EQ || priv->len > reg->len)
154924                 return -EOPNOTSUPP;
154926 -       memcpy(key + reg->offset, &priv->data, reg->len);
154927 -       memcpy(mask + reg->offset, &reg->mask, reg->len);
154928 +       if (reg->flags & NFT_OFFLOAD_F_NETWORK2HOST) {
154929 +               nft_payload_n2h(&_data, (u8 *)&priv->data, reg->len);
154930 +               nft_payload_n2h(&_datamask, (u8 *)&reg->mask, reg->len);
154931 +               data = (u8 *)&_data;
154932 +               datamask = (u8 *)&_datamask;
154933 +       } else {
154934 +               data = (u8 *)&priv->data;
154935 +               datamask = (u8 *)&reg->mask;
154936 +       }
154938 +       memcpy(key + reg->offset, data, reg->len);
154939 +       memcpy(mask + reg->offset, datamask, reg->len);
154941         flow->match.dissector.used_keys |= BIT(reg->key);
154942         flow->match.dissector.offset[reg->key] = reg->base_offset;
154943 diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
154944 index cb1c8c231880..501c5b24cc39 100644
154945 --- a/net/netfilter/nft_payload.c
154946 +++ b/net/netfilter/nft_payload.c
154947 @@ -226,8 +226,9 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
154948                 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
154949                         return -EOPNOTSUPP;
154951 -               NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
154952 -                                 vlan_tci, sizeof(__be16), reg);
154953 +               NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN, vlan,
154954 +                                       vlan_tci, sizeof(__be16), reg,
154955 +                                       NFT_OFFLOAD_F_NETWORK2HOST);
154956                 break;
154957         case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
154958                 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
154959 @@ -241,16 +242,18 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
154960                 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
154961                         return -EOPNOTSUPP;
154963 -               NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
154964 -                                 vlan_tci, sizeof(__be16), reg);
154965 +               NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
154966 +                                       vlan_tci, sizeof(__be16), reg,
154967 +                                       NFT_OFFLOAD_F_NETWORK2HOST);
154968                 break;
154969         case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
154970                                                         sizeof(struct vlan_hdr):
154971                 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
154972                         return -EOPNOTSUPP;
154974 -               NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
154975 +               NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
154976                                   vlan_tpid, sizeof(__be16), reg);
154977 +               nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
154978                 break;
154979         default:
154980                 return -EOPNOTSUPP;
154981 diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
154982 index bf618b7ec1ae..560c2cda52ee 100644
154983 --- a/net/netfilter/nft_set_hash.c
154984 +++ b/net/netfilter/nft_set_hash.c
154985 @@ -406,9 +406,17 @@ static void nft_rhash_destroy(const struct nft_set *set)
154986                                     (void *)set);
154989 +/* Number of buckets is stored in u32, so cap our result to 1U<<31 */
154990 +#define NFT_MAX_BUCKETS (1U << 31)
154992  static u32 nft_hash_buckets(u32 size)
154994 -       return roundup_pow_of_two(size * 4 / 3);
154995 +       u64 val = div_u64((u64)size * 4, 3);
154997 +       if (val >= NFT_MAX_BUCKETS)
154998 +               return NFT_MAX_BUCKETS;
155000 +       return roundup_pow_of_two(val);
155003  static bool nft_rhash_estimate(const struct nft_set_desc *desc, u32 features,
155004 diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
155005 index 75625d13e976..498a0bf6f044 100644
155006 --- a/net/netfilter/xt_SECMARK.c
155007 +++ b/net/netfilter/xt_SECMARK.c
155008 @@ -24,10 +24,9 @@ MODULE_ALIAS("ip6t_SECMARK");
155009  static u8 mode;
155011  static unsigned int
155012 -secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
155013 +secmark_tg(struct sk_buff *skb, const struct xt_secmark_target_info_v1 *info)
155015         u32 secmark = 0;
155016 -       const struct xt_secmark_target_info *info = par->targinfo;
155018         switch (mode) {
155019         case SECMARK_MODE_SEL:
155020 @@ -41,7 +40,7 @@ secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
155021         return XT_CONTINUE;
155024 -static int checkentry_lsm(struct xt_secmark_target_info *info)
155025 +static int checkentry_lsm(struct xt_secmark_target_info_v1 *info)
155027         int err;
155029 @@ -73,15 +72,15 @@ static int checkentry_lsm(struct xt_secmark_target_info *info)
155030         return 0;
155033 -static int secmark_tg_check(const struct xt_tgchk_param *par)
155034 +static int
155035 +secmark_tg_check(const char *table, struct xt_secmark_target_info_v1 *info)
155037 -       struct xt_secmark_target_info *info = par->targinfo;
155038         int err;
155040 -       if (strcmp(par->table, "mangle") != 0 &&
155041 -           strcmp(par->table, "security") != 0) {
155042 +       if (strcmp(table, "mangle") != 0 &&
155043 +           strcmp(table, "security") != 0) {
155044                 pr_info_ratelimited("only valid in \'mangle\' or \'security\' table, not \'%s\'\n",
155045 -                                   par->table);
155046 +                                   table);
155047                 return -EINVAL;
155048         }
155050 @@ -116,25 +115,76 @@ static void secmark_tg_destroy(const struct xt_tgdtor_param *par)
155051         }
155054 -static struct xt_target secmark_tg_reg __read_mostly = {
155055 -       .name       = "SECMARK",
155056 -       .revision   = 0,
155057 -       .family     = NFPROTO_UNSPEC,
155058 -       .checkentry = secmark_tg_check,
155059 -       .destroy    = secmark_tg_destroy,
155060 -       .target     = secmark_tg,
155061 -       .targetsize = sizeof(struct xt_secmark_target_info),
155062 -       .me         = THIS_MODULE,
155063 +static int secmark_tg_check_v0(const struct xt_tgchk_param *par)
155065 +       struct xt_secmark_target_info *info = par->targinfo;
155066 +       struct xt_secmark_target_info_v1 newinfo = {
155067 +               .mode   = info->mode,
155068 +       };
155069 +       int ret;
155071 +       memcpy(newinfo.secctx, info->secctx, SECMARK_SECCTX_MAX);
155073 +       ret = secmark_tg_check(par->table, &newinfo);
155074 +       info->secid = newinfo.secid;
155076 +       return ret;
155079 +static unsigned int
155080 +secmark_tg_v0(struct sk_buff *skb, const struct xt_action_param *par)
155082 +       const struct xt_secmark_target_info *info = par->targinfo;
155083 +       struct xt_secmark_target_info_v1 newinfo = {
155084 +               .secid  = info->secid,
155085 +       };
155087 +       return secmark_tg(skb, &newinfo);
155090 +static int secmark_tg_check_v1(const struct xt_tgchk_param *par)
155092 +       return secmark_tg_check(par->table, par->targinfo);
155095 +static unsigned int
155096 +secmark_tg_v1(struct sk_buff *skb, const struct xt_action_param *par)
155098 +       return secmark_tg(skb, par->targinfo);
155101 +static struct xt_target secmark_tg_reg[] __read_mostly = {
155102 +       {
155103 +               .name           = "SECMARK",
155104 +               .revision       = 0,
155105 +               .family         = NFPROTO_UNSPEC,
155106 +               .checkentry     = secmark_tg_check_v0,
155107 +               .destroy        = secmark_tg_destroy,
155108 +               .target         = secmark_tg_v0,
155109 +               .targetsize     = sizeof(struct xt_secmark_target_info),
155110 +               .me             = THIS_MODULE,
155111 +       },
155112 +       {
155113 +               .name           = "SECMARK",
155114 +               .revision       = 1,
155115 +               .family         = NFPROTO_UNSPEC,
155116 +               .checkentry     = secmark_tg_check_v1,
155117 +               .destroy        = secmark_tg_destroy,
155118 +               .target         = secmark_tg_v1,
155119 +               .targetsize     = sizeof(struct xt_secmark_target_info_v1),
155120 +               .usersize       = offsetof(struct xt_secmark_target_info_v1, secid),
155121 +               .me             = THIS_MODULE,
155122 +       },
155125  static int __init secmark_tg_init(void)
155127 -       return xt_register_target(&secmark_tg_reg);
155128 +       return xt_register_targets(secmark_tg_reg, ARRAY_SIZE(secmark_tg_reg));
155131  static void __exit secmark_tg_exit(void)
155133 -       xt_unregister_target(&secmark_tg_reg);
155134 +       xt_unregister_targets(secmark_tg_reg, ARRAY_SIZE(secmark_tg_reg));
155137  module_init(secmark_tg_init);
155138 diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c
155139 index 5971fb6f51cc..dc21b4141b0a 100644
155140 --- a/net/nfc/digital_dep.c
155141 +++ b/net/nfc/digital_dep.c
155142 @@ -1273,6 +1273,8 @@ static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg,
155143         }
155145         rc = nfc_tm_data_received(ddev->nfc_dev, resp);
155146 +       if (rc)
155147 +               resp = NULL;
155149  exit:
155150         kfree_skb(ddev->chaining_skb);
155151 diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
155152 index a3b46f888803..53dbe733f998 100644
155153 --- a/net/nfc/llcp_sock.c
155154 +++ b/net/nfc/llcp_sock.c
155155 @@ -109,12 +109,14 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
155156                                           GFP_KERNEL);
155157         if (!llcp_sock->service_name) {
155158                 nfc_llcp_local_put(llcp_sock->local);
155159 +               llcp_sock->local = NULL;
155160                 ret = -ENOMEM;
155161                 goto put_dev;
155162         }
155163         llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
155164         if (llcp_sock->ssap == LLCP_SAP_MAX) {
155165                 nfc_llcp_local_put(llcp_sock->local);
155166 +               llcp_sock->local = NULL;
155167                 kfree(llcp_sock->service_name);
155168                 llcp_sock->service_name = NULL;
155169                 ret = -EADDRINUSE;
155170 @@ -709,6 +711,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
155171         llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
155172         if (llcp_sock->ssap == LLCP_SAP_MAX) {
155173                 nfc_llcp_local_put(llcp_sock->local);
155174 +               llcp_sock->local = NULL;
155175                 ret = -ENOMEM;
155176                 goto put_dev;
155177         }
155178 @@ -756,6 +759,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
155179  sock_llcp_release:
155180         nfc_llcp_put_ssap(local, llcp_sock->ssap);
155181         nfc_llcp_local_put(llcp_sock->local);
155182 +       llcp_sock->local = NULL;
155184  put_dev:
155185         nfc_put_device(dev);
155186 diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
155187 index 59257400697d..142d71c8d652 100644
155188 --- a/net/nfc/nci/core.c
155189 +++ b/net/nfc/nci/core.c
155190 @@ -1191,6 +1191,7 @@ EXPORT_SYMBOL(nci_allocate_device);
155191  void nci_free_device(struct nci_dev *ndev)
155193         nfc_free_device(ndev->nfc_dev);
155194 +       nci_hci_deallocate(ndev);
155195         kfree(ndev);
155197  EXPORT_SYMBOL(nci_free_device);
155198 diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
155199 index 6b275a387a92..96865142104f 100644
155200 --- a/net/nfc/nci/hci.c
155201 +++ b/net/nfc/nci/hci.c
155202 @@ -792,3 +792,8 @@ struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev)
155204         return hdev;
155207 +void nci_hci_deallocate(struct nci_dev *ndev)
155209 +       kfree(ndev->hci_dev);
155211 diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
155212 index 92a0b67b2728..77d924ab8cdb 100644
155213 --- a/net/openvswitch/actions.c
155214 +++ b/net/openvswitch/actions.c
155215 @@ -827,17 +827,17 @@ static void ovs_fragment(struct net *net, struct vport *vport,
155216         }
155218         if (key->eth.type == htons(ETH_P_IP)) {
155219 -               struct dst_entry ovs_dst;
155220 +               struct rtable ovs_rt = { 0 };
155221                 unsigned long orig_dst;
155223                 prepare_frag(vport, skb, orig_network_offset,
155224                              ovs_key_mac_proto(key));
155225 -               dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
155226 +               dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
155227                          DST_OBSOLETE_NONE, DST_NOCOUNT);
155228 -               ovs_dst.dev = vport->dev;
155229 +               ovs_rt.dst.dev = vport->dev;
155231                 orig_dst = skb->_skb_refdst;
155232 -               skb_dst_set_noref(skb, &ovs_dst);
155233 +               skb_dst_set_noref(skb, &ovs_rt.dst);
155234                 IPCB(skb)->frag_max_size = mru;
155236                 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
155237 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
155238 index e24b2841c643..9611e41c7b8b 100644
155239 --- a/net/packet/af_packet.c
155240 +++ b/net/packet/af_packet.c
155241 @@ -1359,7 +1359,7 @@ static unsigned int fanout_demux_rollover(struct packet_fanout *f,
155242         struct packet_sock *po, *po_next, *po_skip = NULL;
155243         unsigned int i, j, room = ROOM_NONE;
155245 -       po = pkt_sk(f->arr[idx]);
155246 +       po = pkt_sk(rcu_dereference(f->arr[idx]));
155248         if (try_self) {
155249                 room = packet_rcv_has_room(po, skb);
155250 @@ -1371,7 +1371,7 @@ static unsigned int fanout_demux_rollover(struct packet_fanout *f,
155252         i = j = min_t(int, po->rollover->sock, num - 1);
155253         do {
155254 -               po_next = pkt_sk(f->arr[i]);
155255 +               po_next = pkt_sk(rcu_dereference(f->arr[i]));
155256                 if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
155257                     packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
155258                         if (i != j)
155259 @@ -1466,7 +1466,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
155260         if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
155261                 idx = fanout_demux_rollover(f, skb, idx, true, num);
155263 -       po = pkt_sk(f->arr[idx]);
155264 +       po = pkt_sk(rcu_dereference(f->arr[idx]));
155265         return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
155268 @@ -1480,7 +1480,7 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po)
155269         struct packet_fanout *f = po->fanout;
155271         spin_lock(&f->lock);
155272 -       f->arr[f->num_members] = sk;
155273 +       rcu_assign_pointer(f->arr[f->num_members], sk);
155274         smp_wmb();
155275         f->num_members++;
155276         if (f->num_members == 1)
155277 @@ -1495,11 +1495,14 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
155279         spin_lock(&f->lock);
155280         for (i = 0; i < f->num_members; i++) {
155281 -               if (f->arr[i] == sk)
155282 +               if (rcu_dereference_protected(f->arr[i],
155283 +                                             lockdep_is_held(&f->lock)) == sk)
155284                         break;
155285         }
155286         BUG_ON(i >= f->num_members);
155287 -       f->arr[i] = f->arr[f->num_members - 1];
155288 +       rcu_assign_pointer(f->arr[i],
155289 +                          rcu_dereference_protected(f->arr[f->num_members - 1],
155290 +                                                    lockdep_is_held(&f->lock)));
155291         f->num_members--;
155292         if (f->num_members == 0)
155293                 __dev_remove_pack(&f->prot_hook);
155294 diff --git a/net/packet/internal.h b/net/packet/internal.h
155295 index 5f61e59ebbff..48af35b1aed2 100644
155296 --- a/net/packet/internal.h
155297 +++ b/net/packet/internal.h
155298 @@ -94,7 +94,7 @@ struct packet_fanout {
155299         spinlock_t              lock;
155300         refcount_t              sk_ref;
155301         struct packet_type      prot_hook ____cacheline_aligned_in_smp;
155302 -       struct sock             *arr[];
155303 +       struct sock     __rcu   *arr[];
155306  struct packet_rollover {
155307 diff --git a/net/qrtr/mhi.c b/net/qrtr/mhi.c
155308 index 2bf2b1943e61..fa611678af05 100644
155309 --- a/net/qrtr/mhi.c
155310 +++ b/net/qrtr/mhi.c
155311 @@ -50,6 +50,9 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
155312         struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep);
155313         int rc;
155315 +       if (skb->sk)
155316 +               sock_hold(skb->sk);
155318         rc = skb_linearize(skb);
155319         if (rc)
155320                 goto free_skb;
155321 @@ -59,12 +62,11 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
155322         if (rc)
155323                 goto free_skb;
155325 -       if (skb->sk)
155326 -               sock_hold(skb->sk);
155328         return rc;
155330  free_skb:
155331 +       if (skb->sk)
155332 +               sock_put(skb->sk);
155333         kfree_skb(skb);
155335         return rc;
155336 diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
155337 index 16e888a9601d..48fdf7293dea 100644
155338 --- a/net/sched/act_ct.c
155339 +++ b/net/sched/act_ct.c
155340 @@ -732,7 +732,8 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
155341  #endif
155342         }
155344 -       *qdisc_skb_cb(skb) = cb;
155345 +       if (err != -EINPROGRESS)
155346 +               *qdisc_skb_cb(skb) = cb;
155347         skb_clear_hash(skb);
155348         skb->ignore_df = 1;
155349         return err;
155350 @@ -967,7 +968,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
155351         err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
155352         if (err == -EINPROGRESS) {
155353                 retval = TC_ACT_STOLEN;
155354 -               goto out;
155355 +               goto out_clear;
155356         }
155357         if (err)
155358                 goto drop;
155359 @@ -1030,7 +1031,6 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
155360  out_push:
155361         skb_push_rcsum(skb, nh_ofs);
155363 -out:
155364         qdisc_skb_cb(skb)->post_ct = true;
155365  out_clear:
155366         tcf_action_update_bstats(&c->common, skb);
155367 diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
155368 index c69a4ba9c33f..3035f96c6e6c 100644
155369 --- a/net/sched/cls_flower.c
155370 +++ b/net/sched/cls_flower.c
155371 @@ -209,16 +209,16 @@ static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
155372                                   struct fl_flow_key *key,
155373                                   struct fl_flow_key *mkey)
155375 -       __be16 min_mask, max_mask, min_val, max_val;
155376 +       u16 min_mask, max_mask, min_val, max_val;
155378 -       min_mask = htons(filter->mask->key.tp_range.tp_min.dst);
155379 -       max_mask = htons(filter->mask->key.tp_range.tp_max.dst);
155380 -       min_val = htons(filter->key.tp_range.tp_min.dst);
155381 -       max_val = htons(filter->key.tp_range.tp_max.dst);
155382 +       min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
155383 +       max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
155384 +       min_val = ntohs(filter->key.tp_range.tp_min.dst);
155385 +       max_val = ntohs(filter->key.tp_range.tp_max.dst);
155387         if (min_mask && max_mask) {
155388 -               if (htons(key->tp_range.tp.dst) < min_val ||
155389 -                   htons(key->tp_range.tp.dst) > max_val)
155390 +               if (ntohs(key->tp_range.tp.dst) < min_val ||
155391 +                   ntohs(key->tp_range.tp.dst) > max_val)
155392                         return false;
155394                 /* skb does not have min and max values */
155395 @@ -232,16 +232,16 @@ static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
155396                                   struct fl_flow_key *key,
155397                                   struct fl_flow_key *mkey)
155399 -       __be16 min_mask, max_mask, min_val, max_val;
155400 +       u16 min_mask, max_mask, min_val, max_val;
155402 -       min_mask = htons(filter->mask->key.tp_range.tp_min.src);
155403 -       max_mask = htons(filter->mask->key.tp_range.tp_max.src);
155404 -       min_val = htons(filter->key.tp_range.tp_min.src);
155405 -       max_val = htons(filter->key.tp_range.tp_max.src);
155406 +       min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
155407 +       max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
155408 +       min_val = ntohs(filter->key.tp_range.tp_min.src);
155409 +       max_val = ntohs(filter->key.tp_range.tp_max.src);
155411         if (min_mask && max_mask) {
155412 -               if (htons(key->tp_range.tp.src) < min_val ||
155413 -                   htons(key->tp_range.tp.src) > max_val)
155414 +               if (ntohs(key->tp_range.tp.src) < min_val ||
155415 +                   ntohs(key->tp_range.tp.src) > max_val)
155416                         return false;
155418                 /* skb does not have min and max values */
155419 @@ -783,16 +783,16 @@ static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
155420                        TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
155422         if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
155423 -           htons(key->tp_range.tp_max.dst) <=
155424 -           htons(key->tp_range.tp_min.dst)) {
155425 +           ntohs(key->tp_range.tp_max.dst) <=
155426 +           ntohs(key->tp_range.tp_min.dst)) {
155427                 NL_SET_ERR_MSG_ATTR(extack,
155428                                     tb[TCA_FLOWER_KEY_PORT_DST_MIN],
155429                                     "Invalid destination port range (min must be strictly smaller than max)");
155430                 return -EINVAL;
155431         }
155432         if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
155433 -           htons(key->tp_range.tp_max.src) <=
155434 -           htons(key->tp_range.tp_min.src)) {
155435 +           ntohs(key->tp_range.tp_max.src) <=
155436 +           ntohs(key->tp_range.tp_min.src)) {
155437                 NL_SET_ERR_MSG_ATTR(extack,
155438                                     tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
155439                                     "Invalid source port range (min must be strictly smaller than max)");
155440 diff --git a/net/sched/sch_frag.c b/net/sched/sch_frag.c
155441 index e1e77d3fb6c0..8c06381391d6 100644
155442 --- a/net/sched/sch_frag.c
155443 +++ b/net/sched/sch_frag.c
155444 @@ -90,16 +90,16 @@ static int sch_fragment(struct net *net, struct sk_buff *skb,
155445         }
155447         if (skb_protocol(skb, true) == htons(ETH_P_IP)) {
155448 -               struct dst_entry sch_frag_dst;
155449 +               struct rtable sch_frag_rt = { 0 };
155450                 unsigned long orig_dst;
155452                 sch_frag_prepare_frag(skb, xmit);
155453 -               dst_init(&sch_frag_dst, &sch_frag_dst_ops, NULL, 1,
155454 +               dst_init(&sch_frag_rt.dst, &sch_frag_dst_ops, NULL, 1,
155455                          DST_OBSOLETE_NONE, DST_NOCOUNT);
155456 -               sch_frag_dst.dev = skb->dev;
155457 +               sch_frag_rt.dst.dev = skb->dev;
155459                 orig_dst = skb->_skb_refdst;
155460 -               skb_dst_set_noref(skb, &sch_frag_dst);
155461 +               skb_dst_set_noref(skb, &sch_frag_rt.dst);
155462                 IPCB(skb)->frag_max_size = mru;
155464                 ret = ip_do_fragment(net, skb->sk, skb, sch_frag_xmit);
155465 diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
155466 index 8287894541e3..909c798b7403 100644
155467 --- a/net/sched/sch_taprio.c
155468 +++ b/net/sched/sch_taprio.c
155469 @@ -901,6 +901,12 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
155471                 list_for_each_entry(entry, &new->entries, list)
155472                         cycle = ktime_add_ns(cycle, entry->interval);
155474 +               if (!cycle) {
155475 +                       NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
155476 +                       return -EINVAL;
155477 +               }
155479                 new->cycle_time = cycle;
155480         }
155482 diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
155483 index f77484df097b..da4ce0947c3a 100644
155484 --- a/net/sctp/sm_make_chunk.c
155485 +++ b/net/sctp/sm_make_chunk.c
155486 @@ -3147,7 +3147,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
155487                  * primary.
155488                  */
155489                 if (af->is_any(&addr))
155490 -                       memcpy(&addr.v4, sctp_source(asconf), sizeof(addr));
155491 +                       memcpy(&addr, sctp_source(asconf), sizeof(addr));
155493                 if (security_sctp_bind_connect(asoc->ep->base.sk,
155494                                                SCTP_PARAM_SET_PRIMARY,
155495 diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
155496 index af2b7041fa4e..73bb4c6e9201 100644
155497 --- a/net/sctp/sm_statefuns.c
155498 +++ b/net/sctp/sm_statefuns.c
155499 @@ -1852,20 +1852,35 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
155500                         SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
155501         sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_ASCONF_QUEUE, SCTP_NULL());
155503 -       repl = sctp_make_cookie_ack(new_asoc, chunk);
155504 +       /* Update the content of current association. */
155505 +       if (sctp_assoc_update((struct sctp_association *)asoc, new_asoc)) {
155506 +               struct sctp_chunk *abort;
155508 +               abort = sctp_make_abort(asoc, NULL, sizeof(struct sctp_errhdr));
155509 +               if (abort) {
155510 +                       sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
155511 +                       sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
155512 +               }
155513 +               sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED));
155514 +               sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
155515 +                               SCTP_PERR(SCTP_ERROR_RSRC_LOW));
155516 +               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
155517 +               SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
155518 +               goto nomem;
155519 +       }
155521 +       repl = sctp_make_cookie_ack(asoc, chunk);
155522         if (!repl)
155523                 goto nomem;
155525         /* Report association restart to upper layer. */
155526         ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_RESTART, 0,
155527 -                                            new_asoc->c.sinit_num_ostreams,
155528 -                                            new_asoc->c.sinit_max_instreams,
155529 +                                            asoc->c.sinit_num_ostreams,
155530 +                                            asoc->c.sinit_max_instreams,
155531                                              NULL, GFP_ATOMIC);
155532         if (!ev)
155533                 goto nomem_ev;
155535 -       /* Update the content of current association. */
155536 -       sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
155537         sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
155538         if ((sctp_state(asoc, SHUTDOWN_PENDING) ||
155539              sctp_state(asoc, SHUTDOWN_SENT)) &&
155540 @@ -1929,7 +1944,8 @@ static enum sctp_disposition sctp_sf_do_dupcook_b(
155541         sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
155542         sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
155543                         SCTP_STATE(SCTP_STATE_ESTABLISHED));
155544 -       SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
155545 +       if (asoc->state < SCTP_STATE_ESTABLISHED)
155546 +               SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
155547         sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
155549         repl = sctp_make_cookie_ack(new_asoc, chunk);
155550 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
155551 index b9b3d899a611..4ae428f2f2c5 100644
155552 --- a/net/sctp/socket.c
155553 +++ b/net/sctp/socket.c
155554 @@ -357,6 +357,18 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
155555         return af;
155558 +static void sctp_auto_asconf_init(struct sctp_sock *sp)
155560 +       struct net *net = sock_net(&sp->inet.sk);
155562 +       if (net->sctp.default_auto_asconf) {
155563 +               spin_lock(&net->sctp.addr_wq_lock);
155564 +               list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist);
155565 +               spin_unlock(&net->sctp.addr_wq_lock);
155566 +               sp->do_auto_asconf = 1;
155567 +       }
155570  /* Bind a local address either to an endpoint or to an association.  */
155571  static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
155573 @@ -418,8 +430,10 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
155574                 return -EADDRINUSE;
155576         /* Refresh ephemeral port.  */
155577 -       if (!bp->port)
155578 +       if (!bp->port) {
155579                 bp->port = inet_sk(sk)->inet_num;
155580 +               sctp_auto_asconf_init(sp);
155581 +       }
155583         /* Add the address to the bind address list.
155584          * Use GFP_ATOMIC since BHs will be disabled.
155585 @@ -1520,9 +1534,11 @@ static void sctp_close(struct sock *sk, long timeout)
155587         /* Supposedly, no process has access to the socket, but
155588          * the net layers still may.
155589 +        * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
155590 +        * held and that should be grabbed before socket lock.
155591          */
155592 -       local_bh_disable();
155593 -       bh_lock_sock(sk);
155594 +       spin_lock_bh(&net->sctp.addr_wq_lock);
155595 +       bh_lock_sock_nested(sk);
155597         /* Hold the sock, since sk_common_release() will put sock_put()
155598          * and we have just a little more cleanup.
155599 @@ -1531,7 +1547,7 @@ static void sctp_close(struct sock *sk, long timeout)
155600         sk_common_release(sk);
155602         bh_unlock_sock(sk);
155603 -       local_bh_enable();
155604 +       spin_unlock_bh(&net->sctp.addr_wq_lock);
155606         sock_put(sk);
155608 @@ -4991,16 +5007,6 @@ static int sctp_init_sock(struct sock *sk)
155609         sk_sockets_allocated_inc(sk);
155610         sock_prot_inuse_add(net, sk->sk_prot, 1);
155612 -       if (net->sctp.default_auto_asconf) {
155613 -               spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
155614 -               list_add_tail(&sp->auto_asconf_list,
155615 -                   &net->sctp.auto_asconf_splist);
155616 -               sp->do_auto_asconf = 1;
155617 -               spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
155618 -       } else {
155619 -               sp->do_auto_asconf = 0;
155620 -       }
155622         local_bh_enable();
155624         return 0;
155625 @@ -5025,9 +5031,7 @@ static void sctp_destroy_sock(struct sock *sk)
155627         if (sp->do_auto_asconf) {
155628                 sp->do_auto_asconf = 0;
155629 -               spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
155630                 list_del(&sp->auto_asconf_list);
155631 -               spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
155632         }
155633         sctp_endpoint_free(sp->ep);
155634         local_bh_disable();
155635 @@ -9398,6 +9402,8 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
155636                         return err;
155637         }
155639 +       sctp_auto_asconf_init(newsp);
155641         /* Move any messages in the old socket's receive queue that are for the
155642          * peeled off association to the new socket's receive queue.
155643          */
155644 diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
155645 index 47340b3b514f..cb23cca72c24 100644
155646 --- a/net/smc/af_smc.c
155647 +++ b/net/smc/af_smc.c
155648 @@ -2162,6 +2162,9 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
155649         struct smc_sock *smc;
155650         int val, rc;
155652 +       if (level == SOL_TCP && optname == TCP_ULP)
155653 +               return -EOPNOTSUPP;
155655         smc = smc_sk(sk);
155657         /* generic setsockopts reaching us here always apply to the
155658 @@ -2186,7 +2189,6 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
155659         if (rc || smc->use_fallback)
155660                 goto out;
155661         switch (optname) {
155662 -       case TCP_ULP:
155663         case TCP_FASTOPEN:
155664         case TCP_FASTOPEN_CONNECT:
155665         case TCP_FASTOPEN_KEY:
155666 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
155667 index 612f0a641f4c..f555d335e910 100644
155668 --- a/net/sunrpc/clnt.c
155669 +++ b/net/sunrpc/clnt.c
155670 @@ -1799,7 +1799,6 @@ call_allocate(struct rpc_task *task)
155672         status = xprt->ops->buf_alloc(task);
155673         trace_rpc_buf_alloc(task, status);
155674 -       xprt_inject_disconnect(xprt);
155675         if (status == 0)
155676                 return;
155677         if (status != -ENOMEM) {
155678 @@ -2457,12 +2456,6 @@ call_decode(struct rpc_task *task)
155679                 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
155680         }
155682 -       /*
155683 -        * Ensure that we see all writes made by xprt_complete_rqst()
155684 -        * before it changed req->rq_reply_bytes_recvd.
155685 -        */
155686 -       smp_rmb();
155688         /*
155689          * Did we ever call xprt_complete_rqst()? If not, we should assume
155690          * the message is incomplete.
155691 @@ -2471,6 +2464,11 @@ call_decode(struct rpc_task *task)
155692         if (!req->rq_reply_bytes_recvd)
155693                 goto out;
155695 +       /* Ensure that we see all writes made by xprt_complete_rqst()
155696 +        * before it changed req->rq_reply_bytes_recvd.
155697 +        */
155698 +       smp_rmb();
155700         req->rq_rcv_buf.len = req->rq_private_buf.len;
155701         trace_rpc_xdr_recvfrom(task, &req->rq_rcv_buf);
155703 diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
155704 index d76dc9d95d16..0de918cb3d90 100644
155705 --- a/net/sunrpc/svc.c
155706 +++ b/net/sunrpc/svc.c
155707 @@ -846,7 +846,8 @@ void
155708  svc_rqst_free(struct svc_rqst *rqstp)
155710         svc_release_buffer(rqstp);
155711 -       put_page(rqstp->rq_scratch_page);
155712 +       if (rqstp->rq_scratch_page)
155713 +               put_page(rqstp->rq_scratch_page);
155714         kfree(rqstp->rq_resp);
155715         kfree(rqstp->rq_argp);
155716         kfree(rqstp->rq_auth_data);
155717 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
155718 index 2e2f007dfc9f..7cde41a936a4 100644
155719 --- a/net/sunrpc/svcsock.c
155720 +++ b/net/sunrpc/svcsock.c
155721 @@ -1171,7 +1171,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
155722         tcp_sock_set_cork(svsk->sk_sk, true);
155723         err = svc_tcp_sendmsg(svsk->sk_sock, xdr, marker, &sent);
155724         xdr_free_bvec(xdr);
155725 -       trace_svcsock_tcp_send(xprt, err < 0 ? err : sent);
155726 +       trace_svcsock_tcp_send(xprt, err < 0 ? (long)err : sent);
155727         if (err < 0 || sent != (xdr->len + sizeof(marker)))
155728                 goto out_close;
155729         if (atomic_dec_and_test(&svsk->sk_sendqlen))
155730 diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
155731 index 691ccf8049a4..20fe31b1b776 100644
155732 --- a/net/sunrpc/xprt.c
155733 +++ b/net/sunrpc/xprt.c
155734 @@ -698,9 +698,9 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
155735         const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
155736         int status = 0;
155738 -       if (time_before(jiffies, req->rq_minortimeo))
155739 -               return status;
155740         if (time_before(jiffies, req->rq_majortimeo)) {
155741 +               if (time_before(jiffies, req->rq_minortimeo))
155742 +                       return status;
155743                 if (to->to_exponential)
155744                         req->rq_timeout <<= 1;
155745                 else
155746 @@ -1469,8 +1469,6 @@ bool xprt_prepare_transmit(struct rpc_task *task)
155747         struct rpc_xprt *xprt = req->rq_xprt;
155749         if (!xprt_lock_write(xprt, task)) {
155750 -               trace_xprt_transmit_queued(xprt, task);
155752                 /* Race breaker: someone may have transmitted us */
155753                 if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
155754                         rpc_wake_up_queued_task_set_status(&xprt->sending,
155755 @@ -1483,7 +1481,10 @@ bool xprt_prepare_transmit(struct rpc_task *task)
155757  void xprt_end_transmit(struct rpc_task *task)
155759 -       xprt_release_write(task->tk_rqstp->rq_xprt, task);
155760 +       struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
155762 +       xprt_inject_disconnect(xprt);
155763 +       xprt_release_write(xprt, task);
155766  /**
155767 @@ -1885,7 +1886,6 @@ void xprt_release(struct rpc_task *task)
155768         spin_unlock(&xprt->transport_lock);
155769         if (req->rq_buffer)
155770                 xprt->ops->buf_free(task);
155771 -       xprt_inject_disconnect(xprt);
155772         xdr_free_bvec(&req->rq_rcv_buf);
155773         xdr_free_bvec(&req->rq_snd_buf);
155774         if (req->rq_cred != NULL)
155775 diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
155776 index 766a1048a48a..aca2228095db 100644
155777 --- a/net/sunrpc/xprtrdma/frwr_ops.c
155778 +++ b/net/sunrpc/xprtrdma/frwr_ops.c
155779 @@ -257,6 +257,7 @@ int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device)
155780         ep->re_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
155781         ep->re_attr.cap.max_recv_wr = ep->re_max_requests;
155782         ep->re_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
155783 +       ep->re_attr.cap.max_recv_wr += RPCRDMA_MAX_RECV_BATCH;
155784         ep->re_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
155786         ep->re_max_rdma_segs =
155787 @@ -575,7 +576,6 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
155788                 mr = container_of(frwr, struct rpcrdma_mr, frwr);
155789                 bad_wr = bad_wr->next;
155791 -               list_del_init(&mr->mr_list);
155792                 frwr_mr_recycle(mr);
155793         }
155795 diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
155796 index 292f066d006e..21ddd78a8c35 100644
155797 --- a/net/sunrpc/xprtrdma/rpc_rdma.c
155798 +++ b/net/sunrpc/xprtrdma/rpc_rdma.c
155799 @@ -1430,9 +1430,10 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
155800                 credits = 1;    /* don't deadlock */
155801         else if (credits > r_xprt->rx_ep->re_max_requests)
155802                 credits = r_xprt->rx_ep->re_max_requests;
155803 +       rpcrdma_post_recvs(r_xprt, credits + (buf->rb_bc_srv_max_requests << 1),
155804 +                          false);
155805         if (buf->rb_credits != credits)
155806                 rpcrdma_update_cwnd(r_xprt, credits);
155807 -       rpcrdma_post_recvs(r_xprt, false);
155809         req = rpcr_to_rdmar(rqst);
155810         if (unlikely(req->rl_reply))
155811 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
155812 index 52c759a8543e..3669661457c1 100644
155813 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
155814 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
155815 @@ -958,7 +958,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
155816         p = xdr_reserve_space(&sctxt->sc_stream,
155817                               rpcrdma_fixed_maxsz * sizeof(*p));
155818         if (!p)
155819 -               goto err0;
155820 +               goto err1;
155822         ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res);
155823         if (ret < 0)
155824 @@ -970,11 +970,11 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
155825         *p = pcl_is_empty(&rctxt->rc_reply_pcl) ? rdma_msg : rdma_nomsg;
155827         if (svc_rdma_encode_read_list(sctxt) < 0)
155828 -               goto err0;
155829 +               goto err1;
155830         if (svc_rdma_encode_write_list(rctxt, sctxt) < 0)
155831 -               goto err0;
155832 +               goto err1;
155833         if (svc_rdma_encode_reply_chunk(rctxt, sctxt, ret) < 0)
155834 -               goto err0;
155835 +               goto err1;
155837         ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp);
155838         if (ret < 0)
155839 diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
155840 index 78d29d1bcc20..09953597d055 100644
155841 --- a/net/sunrpc/xprtrdma/transport.c
155842 +++ b/net/sunrpc/xprtrdma/transport.c
155843 @@ -262,8 +262,10 @@ xprt_rdma_connect_worker(struct work_struct *work)
155844   * xprt_rdma_inject_disconnect - inject a connection fault
155845   * @xprt: transport context
155846   *
155847 - * If @xprt is connected, disconnect it to simulate spurious connection
155848 - * loss.
155849 + * If @xprt is connected, disconnect it to simulate spurious
155850 + * connection loss. Caller must hold @xprt's send lock to
155851 + * ensure that data structures and hardware resources are
155852 + * stable during the rdma_disconnect() call.
155853   */
155854  static void
155855  xprt_rdma_inject_disconnect(struct rpc_xprt *xprt)
155856 diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
155857 index ec912cf9c618..f3fffc74ab0f 100644
155858 --- a/net/sunrpc/xprtrdma/verbs.c
155859 +++ b/net/sunrpc/xprtrdma/verbs.c
155860 @@ -535,7 +535,7 @@ int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt)
155861          * outstanding Receives.
155862          */
155863         rpcrdma_ep_get(ep);
155864 -       rpcrdma_post_recvs(r_xprt, true);
155865 +       rpcrdma_post_recvs(r_xprt, 1, true);
155867         rc = rdma_connect(ep->re_id, &ep->re_remote_cma);
155868         if (rc)
155869 @@ -1364,21 +1364,21 @@ int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
155870  /**
155871   * rpcrdma_post_recvs - Refill the Receive Queue
155872   * @r_xprt: controlling transport instance
155873 - * @temp: mark Receive buffers to be deleted after use
155874 + * @needed: current credit grant
155875 + * @temp: mark Receive buffers to be deleted after one use
155876   *
155877   */
155878 -void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
155879 +void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp)
155881         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
155882         struct rpcrdma_ep *ep = r_xprt->rx_ep;
155883         struct ib_recv_wr *wr, *bad_wr;
155884         struct rpcrdma_rep *rep;
155885 -       int needed, count, rc;
155886 +       int count, rc;
155888         rc = 0;
155889         count = 0;
155891 -       needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1);
155892         if (likely(ep->re_receive_count > needed))
155893                 goto out;
155894         needed -= ep->re_receive_count;
155895 diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
155896 index fe3be985e239..28af11fbe643 100644
155897 --- a/net/sunrpc/xprtrdma/xprt_rdma.h
155898 +++ b/net/sunrpc/xprtrdma/xprt_rdma.h
155899 @@ -461,7 +461,7 @@ int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt);
155900  void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt);
155902  int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
155903 -void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp);
155904 +void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp);
155907   * Buffer calls - xprtrdma/verbs.c
155908 diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
155909 index 97710ce36047..c89ce47c56cf 100644
155910 --- a/net/tipc/crypto.c
155911 +++ b/net/tipc/crypto.c
155912 @@ -1492,6 +1492,8 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
155913         /* Allocate statistic structure */
155914         c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
155915         if (!c->stats) {
155916 +               if (c->wq)
155917 +                       destroy_workqueue(c->wq);
155918                 kfree_sensitive(c);
155919                 return -ENOMEM;
155920         }
155921 diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
155922 index 5a1ce64039f7..0749df80454d 100644
155923 --- a/net/tipc/netlink_compat.c
155924 +++ b/net/tipc/netlink_compat.c
155925 @@ -696,7 +696,7 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
155926         if (err)
155927                 return err;
155929 -       link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
155930 +       link_info.dest = htonl(nla_get_flag(link[TIPC_NLA_LINK_DEST]));
155931         link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
155932         nla_strscpy(link_info.str, link[TIPC_NLA_LINK_NAME],
155933                     TIPC_MAX_LINK_NAME);
155934 diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
155935 index e4370b1b7494..902cb6dd710b 100644
155936 --- a/net/vmw_vsock/virtio_transport_common.c
155937 +++ b/net/vmw_vsock/virtio_transport_common.c
155938 @@ -733,6 +733,23 @@ static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
155939         return t->send_pkt(reply);
155942 +/* This function should be called with sk_lock held and SOCK_DONE set */
155943 +static void virtio_transport_remove_sock(struct vsock_sock *vsk)
155945 +       struct virtio_vsock_sock *vvs = vsk->trans;
155946 +       struct virtio_vsock_pkt *pkt, *tmp;
155948 +       /* We don't need to take rx_lock, as the socket is closing and we are
155949 +        * removing it.
155950 +        */
155951 +       list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
155952 +               list_del(&pkt->list);
155953 +               virtio_transport_free_pkt(pkt);
155954 +       }
155956 +       vsock_remove_sock(vsk);
155959  static void virtio_transport_wait_close(struct sock *sk, long timeout)
155961         if (timeout) {
155962 @@ -765,7 +782,7 @@ static void virtio_transport_do_close(struct vsock_sock *vsk,
155963             (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
155964                 vsk->close_work_scheduled = false;
155966 -               vsock_remove_sock(vsk);
155967 +               virtio_transport_remove_sock(vsk);
155969                 /* Release refcnt obtained when we scheduled the timeout */
155970                 sock_put(sk);
155971 @@ -828,22 +845,15 @@ static bool virtio_transport_close(struct vsock_sock *vsk)
155973  void virtio_transport_release(struct vsock_sock *vsk)
155975 -       struct virtio_vsock_sock *vvs = vsk->trans;
155976 -       struct virtio_vsock_pkt *pkt, *tmp;
155977         struct sock *sk = &vsk->sk;
155978         bool remove_sock = true;
155980         if (sk->sk_type == SOCK_STREAM)
155981                 remove_sock = virtio_transport_close(vsk);
155983 -       list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
155984 -               list_del(&pkt->list);
155985 -               virtio_transport_free_pkt(pkt);
155986 -       }
155988         if (remove_sock) {
155989                 sock_set_flag(sk, SOCK_DONE);
155990 -               vsock_remove_sock(vsk);
155991 +               virtio_transport_remove_sock(vsk);
155992         }
155994  EXPORT_SYMBOL_GPL(virtio_transport_release);
155995 diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
155996 index 8b65323207db..1c9ecb18b8e6 100644
155997 --- a/net/vmw_vsock/vmci_transport.c
155998 +++ b/net/vmw_vsock/vmci_transport.c
155999 @@ -568,8 +568,7 @@ vmci_transport_queue_pair_alloc(struct vmci_qp **qpair,
156000                                peer, flags, VMCI_NO_PRIVILEGE_FLAGS);
156001  out:
156002         if (err < 0) {
156003 -               pr_err("Could not attach to queue pair with %d\n",
156004 -                      err);
156005 +               pr_err_once("Could not attach to queue pair with %d\n", err);
156006                 err = vmci_transport_error_to_vsock_error(err);
156007         }
156009 diff --git a/net/wireless/core.c b/net/wireless/core.c
156010 index a2785379df6e..589ee5a69a2e 100644
156011 --- a/net/wireless/core.c
156012 +++ b/net/wireless/core.c
156013 @@ -332,14 +332,29 @@ static void cfg80211_event_work(struct work_struct *work)
156014  void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev)
156016         struct wireless_dev *wdev, *tmp;
156017 +       bool found = false;
156019         ASSERT_RTNL();
156020 -       lockdep_assert_wiphy(&rdev->wiphy);
156022 +       list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
156023 +               if (wdev->nl_owner_dead) {
156024 +                       if (wdev->netdev)
156025 +                               dev_close(wdev->netdev);
156026 +                       found = true;
156027 +               }
156028 +       }
156030 +       if (!found)
156031 +               return;
156033 +       wiphy_lock(&rdev->wiphy);
156034         list_for_each_entry_safe(wdev, tmp, &rdev->wiphy.wdev_list, list) {
156035 -               if (wdev->nl_owner_dead)
156036 +               if (wdev->nl_owner_dead) {
156037 +                       cfg80211_leave(rdev, wdev);
156038                         rdev_del_virtual_intf(rdev, wdev);
156039 +               }
156040         }
156041 +       wiphy_unlock(&rdev->wiphy);
156044  static void cfg80211_destroy_iface_wk(struct work_struct *work)
156045 @@ -350,9 +365,7 @@ static void cfg80211_destroy_iface_wk(struct work_struct *work)
156046                             destroy_work);
156048         rtnl_lock();
156049 -       wiphy_lock(&rdev->wiphy);
156050         cfg80211_destroy_ifaces(rdev);
156051 -       wiphy_unlock(&rdev->wiphy);
156052         rtnl_unlock();
156055 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
156056 index b1df42e4f1eb..a5224da63832 100644
156057 --- a/net/wireless/nl80211.c
156058 +++ b/net/wireless/nl80211.c
156059 @@ -3929,7 +3929,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
156060         return err;
156063 -static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
156064 +static int _nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
156066         struct cfg80211_registered_device *rdev = info->user_ptr[0];
156067         struct vif_params params;
156068 @@ -3938,9 +3938,6 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
156069         int err;
156070         enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED;
156072 -       /* to avoid failing a new interface creation due to pending removal */
156073 -       cfg80211_destroy_ifaces(rdev);
156075         memset(&params, 0, sizeof(params));
156077         if (!info->attrs[NL80211_ATTR_IFNAME])
156078 @@ -4028,6 +4025,21 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
156079         return genlmsg_reply(msg, info);
156082 +static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
156084 +       struct cfg80211_registered_device *rdev = info->user_ptr[0];
156085 +       int ret;
156087 +       /* to avoid failing a new interface creation due to pending removal */
156088 +       cfg80211_destroy_ifaces(rdev);
156090 +       wiphy_lock(&rdev->wiphy);
156091 +       ret = _nl80211_new_interface(skb, info);
156092 +       wiphy_unlock(&rdev->wiphy);
156094 +       return ret;
156097  static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
156099         struct cfg80211_registered_device *rdev = info->user_ptr[0];
156100 @@ -15040,7 +15052,9 @@ static const struct genl_small_ops nl80211_small_ops[] = {
156101                 .doit = nl80211_new_interface,
156102                 .flags = GENL_UNS_ADMIN_PERM,
156103                 .internal_flags = NL80211_FLAG_NEED_WIPHY |
156104 -                                 NL80211_FLAG_NEED_RTNL,
156105 +                                 NL80211_FLAG_NEED_RTNL |
156106 +                                 /* we take the wiphy mutex later ourselves */
156107 +                                 NL80211_FLAG_NO_WIPHY_MTX,
156108         },
156109         {
156110                 .cmd = NL80211_CMD_DEL_INTERFACE,
156111 diff --git a/net/wireless/scan.c b/net/wireless/scan.c
156112 index 758eb7d2a706..caa8eafbd583 100644
156113 --- a/net/wireless/scan.c
156114 +++ b/net/wireless/scan.c
156115 @@ -1751,6 +1751,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
156117                 if (rdev->bss_entries >= bss_entries_limit &&
156118                     !cfg80211_bss_expire_oldest(rdev)) {
156119 +                       if (!list_empty(&new->hidden_list))
156120 +                               list_del(&new->hidden_list);
156121                         kfree(new);
156122                         goto drop;
156123                 }
156124 diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
156125 index 4faabd1ecfd1..143979ea4165 100644
156126 --- a/net/xdp/xsk.c
156127 +++ b/net/xdp/xsk.c
156128 @@ -454,12 +454,16 @@ static int xsk_generic_xmit(struct sock *sk)
156129         struct sk_buff *skb;
156130         unsigned long flags;
156131         int err = 0;
156132 +       u32 hr, tr;
156134         mutex_lock(&xs->mutex);
156136         if (xs->queue_id >= xs->dev->real_num_tx_queues)
156137                 goto out;
156139 +       hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
156140 +       tr = xs->dev->needed_tailroom;
156142         while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
156143                 char *buffer;
156144                 u64 addr;
156145 @@ -471,11 +475,13 @@ static int xsk_generic_xmit(struct sock *sk)
156146                 }
156148                 len = desc.len;
156149 -               skb = sock_alloc_send_skb(sk, len, 1, &err);
156150 +               skb = sock_alloc_send_skb(sk, hr + len + tr, 1, &err);
156151                 if (unlikely(!skb))
156152                         goto out;
156154 +               skb_reserve(skb, hr);
156155                 skb_put(skb, len);
156157                 addr = desc.addr;
156158                 buffer = xsk_buff_raw_get_data(xs->pool, addr);
156159                 err = skb_store_bits(skb, 0, buffer, len);
156160 diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
156161 index 2823b7c3302d..40f359bf2044 100644
156162 --- a/net/xdp/xsk_queue.h
156163 +++ b/net/xdp/xsk_queue.h
156164 @@ -128,13 +128,12 @@ static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
156165  static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
156166                                             struct xdp_desc *desc)
156168 -       u64 chunk, chunk_end;
156169 +       u64 chunk;
156171 -       chunk = xp_aligned_extract_addr(pool, desc->addr);
156172 -       chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len);
156173 -       if (chunk != chunk_end)
156174 +       if (desc->len > pool->chunk_size)
156175                 return false;
156177 +       chunk = xp_aligned_extract_addr(pool, desc->addr);
156178         if (chunk >= pool->addrs_cnt)
156179                 return false;
156181 diff --git a/samples/bpf/tracex1_kern.c b/samples/bpf/tracex1_kern.c
156182 index 3f4599c9a202..ef30d2b353b0 100644
156183 --- a/samples/bpf/tracex1_kern.c
156184 +++ b/samples/bpf/tracex1_kern.c
156185 @@ -26,7 +26,7 @@
156186  SEC("kprobe/__netif_receive_skb_core")
156187  int bpf_prog1(struct pt_regs *ctx)
156189 -       /* attaches to kprobe netif_receive_skb,
156190 +       /* attaches to kprobe __netif_receive_skb_core,
156191          * looks for packets on loobpack device and prints them
156192          */
156193         char devname[IFNAMSIZ];
156194 @@ -35,7 +35,7 @@ int bpf_prog1(struct pt_regs *ctx)
156195         int len;
156197         /* non-portable! works for the given kernel only */
156198 -       skb = (struct sk_buff *) PT_REGS_PARM1(ctx);
156199 +       bpf_probe_read_kernel(&skb, sizeof(skb), (void *)PT_REGS_PARM1(ctx));
156200         dev = _(skb->dev);
156201         len = _(skb->len);
156203 diff --git a/samples/kfifo/bytestream-example.c b/samples/kfifo/bytestream-example.c
156204 index c406f03ee551..5a90aa527877 100644
156205 --- a/samples/kfifo/bytestream-example.c
156206 +++ b/samples/kfifo/bytestream-example.c
156207 @@ -122,8 +122,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf,
156208         ret = kfifo_from_user(&test, buf, count, &copied);
156210         mutex_unlock(&write_lock);
156211 +       if (ret)
156212 +               return ret;
156214 -       return ret ? ret : copied;
156215 +       return copied;
156218  static ssize_t fifo_read(struct file *file, char __user *buf,
156219 @@ -138,8 +140,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
156220         ret = kfifo_to_user(&test, buf, count, &copied);
156222         mutex_unlock(&read_lock);
156223 +       if (ret)
156224 +               return ret;
156226 -       return ret ? ret : copied;
156227 +       return copied;
156230  static const struct proc_ops fifo_proc_ops = {
156231 diff --git a/samples/kfifo/inttype-example.c b/samples/kfifo/inttype-example.c
156232 index 78977fc4a23f..e5403d8c971a 100644
156233 --- a/samples/kfifo/inttype-example.c
156234 +++ b/samples/kfifo/inttype-example.c
156235 @@ -115,8 +115,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf,
156236         ret = kfifo_from_user(&test, buf, count, &copied);
156238         mutex_unlock(&write_lock);
156239 +       if (ret)
156240 +               return ret;
156242 -       return ret ? ret : copied;
156243 +       return copied;
156246  static ssize_t fifo_read(struct file *file, char __user *buf,
156247 @@ -131,8 +133,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
156248         ret = kfifo_to_user(&test, buf, count, &copied);
156250         mutex_unlock(&read_lock);
156251 +       if (ret)
156252 +               return ret;
156254 -       return ret ? ret : copied;
156255 +       return copied;
156258  static const struct proc_ops fifo_proc_ops = {
156259 diff --git a/samples/kfifo/record-example.c b/samples/kfifo/record-example.c
156260 index c507998a2617..f64f3d62d6c2 100644
156261 --- a/samples/kfifo/record-example.c
156262 +++ b/samples/kfifo/record-example.c
156263 @@ -129,8 +129,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf,
156264         ret = kfifo_from_user(&test, buf, count, &copied);
156266         mutex_unlock(&write_lock);
156267 +       if (ret)
156268 +               return ret;
156270 -       return ret ? ret : copied;
156271 +       return copied;
156274  static ssize_t fifo_read(struct file *file, char __user *buf,
156275 @@ -145,8 +147,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
156276         ret = kfifo_to_user(&test, buf, count, &copied);
156278         mutex_unlock(&read_lock);
156279 +       if (ret)
156280 +               return ret;
156282 -       return ret ? ret : copied;
156283 +       return copied;
156286  static const struct proc_ops fifo_proc_ops = {
156287 diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
156288 index 066beffca09a..4ca5579af4e4 100644
156289 --- a/scripts/Makefile.modpost
156290 +++ b/scripts/Makefile.modpost
156291 @@ -68,7 +68,20 @@ else
156292  ifeq ($(KBUILD_EXTMOD),)
156294  input-symdump := vmlinux.symvers
156295 -output-symdump := Module.symvers
156296 +output-symdump := modules-only.symvers
156298 +quiet_cmd_cat = GEN     $@
156299 +      cmd_cat = cat $(real-prereqs) > $@
156301 +ifneq ($(wildcard vmlinux.symvers),)
156303 +__modpost: Module.symvers
156304 +Module.symvers: vmlinux.symvers modules-only.symvers FORCE
156305 +       $(call if_changed,cat)
156307 +targets += Module.symvers
156309 +endif
156311  else
156313 diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
156314 index e0f965529166..af814b39b876 100644
156315 --- a/scripts/kconfig/nconf.c
156316 +++ b/scripts/kconfig/nconf.c
156317 @@ -504,8 +504,8 @@ static int get_mext_match(const char *match_str, match_f flag)
156318         else if (flag == FIND_NEXT_MATCH_UP)
156319                 --match_start;
156321 +       match_start = (match_start + items_num) % items_num;
156322         index = match_start;
156323 -       index = (index + items_num) % items_num;
156324         while (true) {
156325                 char *str = k_menu_items[index].str;
156326                 if (strcasestr(str, match_str) != NULL)
156327 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
156328 index 24725e50c7b4..10c3fba26f03 100644
156329 --- a/scripts/mod/modpost.c
156330 +++ b/scripts/mod/modpost.c
156331 @@ -2423,19 +2423,6 @@ static void read_dump(const char *fname)
156332         fatal("parse error in symbol dump file\n");
156335 -/* For normal builds always dump all symbols.
156336 - * For external modules only dump symbols
156337 - * that are not read from kernel Module.symvers.
156338 - **/
156339 -static int dump_sym(struct symbol *sym)
156341 -       if (!external_module)
156342 -               return 1;
156343 -       if (sym->module->from_dump)
156344 -               return 0;
156345 -       return 1;
156348  static void write_dump(const char *fname)
156350         struct buffer buf = { };
156351 @@ -2446,7 +2433,7 @@ static void write_dump(const char *fname)
156352         for (n = 0; n < SYMBOL_HASH_SIZE ; n++) {
156353                 symbol = symbolhash[n];
156354                 while (symbol) {
156355 -                       if (dump_sym(symbol)) {
156356 +                       if (!symbol->module->from_dump) {
156357                                 namespace = symbol->namespace;
156358                                 buf_printf(&buf, "0x%08x\t%s\t%s\t%s\t%s\n",
156359                                            symbol->crc, symbol->name,
156360 diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
156361 index 867860ea57da..7b83a1aaec98 100755
156362 --- a/scripts/recordmcount.pl
156363 +++ b/scripts/recordmcount.pl
156364 @@ -392,7 +392,7 @@ if ($arch eq "x86_64") {
156365      $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
156366  } elsif ($arch eq "riscv") {
156367      $function_regex = "^([0-9a-fA-F]+)\\s+<([^.0-9][0-9a-zA-Z_\\.]+)>:";
156368 -    $mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL\\s_mcount\$";
156369 +    $mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL(_PLT)?\\s_?mcount\$";
156370      $type = ".quad";
156371      $alignment = 2;
156372  } elsif ($arch eq "nds32") {
156373 diff --git a/scripts/setlocalversion b/scripts/setlocalversion
156374 index bb709eda96cd..cf323fa660b6 100755
156375 --- a/scripts/setlocalversion
156376 +++ b/scripts/setlocalversion
156377 @@ -54,7 +54,7 @@ scm_version()
156378                         # If only the short version is requested, don't bother
156379                         # running further git commands
156380                         if $short; then
156381 -                               echo "+"
156382 +                       #       echo "+"
156383                                 return
156384                         fi
156385                         # If we are past a tagged commit (like
156386 diff --git a/security/commoncap.c b/security/commoncap.c
156387 index 1c519c875217..5cdeb73ca8fa 100644
156388 --- a/security/commoncap.c
156389 +++ b/security/commoncap.c
156390 @@ -400,7 +400,7 @@ int cap_inode_getsecurity(struct user_namespace *mnt_userns,
156391                                       &tmpbuf, size, GFP_NOFS);
156392         dput(dentry);
156394 -       if (ret < 0)
156395 +       if (ret < 0 || !tmpbuf)
156396                 return ret;
156398         fs_ns = inode->i_sb->s_user_ns;
156399 diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
156400 index e22e510ae92d..4e081e650047 100644
156401 --- a/security/integrity/ima/ima_template.c
156402 +++ b/security/integrity/ima/ima_template.c
156403 @@ -494,8 +494,8 @@ int ima_restore_measurement_list(loff_t size, void *buf)
156404                         }
156405                 }
156407 -               entry->pcr = !ima_canonical_fmt ? *(hdr[HDR_PCR].data) :
156408 -                            le32_to_cpu(*(hdr[HDR_PCR].data));
156409 +               entry->pcr = !ima_canonical_fmt ? *(u32 *)(hdr[HDR_PCR].data) :
156410 +                            le32_to_cpu(*(u32 *)(hdr[HDR_PCR].data));
156411                 ret = ima_restore_measurement_entry(entry);
156412                 if (ret < 0)
156413                         break;
156414 diff --git a/security/keys/trusted-keys/trusted_tpm1.c b/security/keys/trusted-keys/trusted_tpm1.c
156415 index 493eb91ed017..56c9b48460d9 100644
156416 --- a/security/keys/trusted-keys/trusted_tpm1.c
156417 +++ b/security/keys/trusted-keys/trusted_tpm1.c
156418 @@ -500,10 +500,12 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
156420         ret = tpm_get_random(chip, td->nonceodd, TPM_NONCE_SIZE);
156421         if (ret < 0)
156422 -               return ret;
156423 +               goto out;
156425 -       if (ret != TPM_NONCE_SIZE)
156426 -               return -EIO;
156427 +       if (ret != TPM_NONCE_SIZE) {
156428 +               ret = -EIO;
156429 +               goto out;
156430 +       }
156432         ordinal = htonl(TPM_ORD_SEAL);
156433         datsize = htonl(datalen);
156434 @@ -791,13 +793,33 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
156435                                 return -EINVAL;
156436                         break;
156437                 case Opt_blobauth:
156438 -                       if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
156439 -                               return -EINVAL;
156440 -                       res = hex2bin(opt->blobauth, args[0].from,
156441 -                                     SHA1_DIGEST_SIZE);
156442 -                       if (res < 0)
156443 -                               return -EINVAL;
156444 +                       /*
156445 +                        * TPM 1.2 authorizations are sha1 hashes passed in as
156446 +                        * hex strings.  TPM 2.0 authorizations are simple
156447 +                        * passwords (although it can take a hash as well)
156448 +                        */
156449 +                       opt->blobauth_len = strlen(args[0].from);
156451 +                       if (opt->blobauth_len == 2 * TPM_DIGEST_SIZE) {
156452 +                               res = hex2bin(opt->blobauth, args[0].from,
156453 +                                             TPM_DIGEST_SIZE);
156454 +                               if (res < 0)
156455 +                                       return -EINVAL;
156457 +                               opt->blobauth_len = TPM_DIGEST_SIZE;
156458 +                               break;
156459 +                       }
156461 +                       if (tpm2 && opt->blobauth_len <= sizeof(opt->blobauth)) {
156462 +                               memcpy(opt->blobauth, args[0].from,
156463 +                                      opt->blobauth_len);
156464 +                               break;
156465 +                       }
156467 +                       return -EINVAL;
156469                         break;
156471                 case Opt_migratable:
156472                         if (*args[0].from == '0')
156473                                 pay->migratable = 0;
156474 diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c
156475 index c87c4df8703d..4c19d3abddbe 100644
156476 --- a/security/keys/trusted-keys/trusted_tpm2.c
156477 +++ b/security/keys/trusted-keys/trusted_tpm2.c
156478 @@ -97,10 +97,12 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
156479                              TPM_DIGEST_SIZE);
156481         /* sensitive */
156482 -       tpm_buf_append_u16(&buf, 4 + TPM_DIGEST_SIZE + payload->key_len + 1);
156483 +       tpm_buf_append_u16(&buf, 4 + options->blobauth_len + payload->key_len + 1);
156485 +       tpm_buf_append_u16(&buf, options->blobauth_len);
156486 +       if (options->blobauth_len)
156487 +               tpm_buf_append(&buf, options->blobauth, options->blobauth_len);
156489 -       tpm_buf_append_u16(&buf, TPM_DIGEST_SIZE);
156490 -       tpm_buf_append(&buf, options->blobauth, TPM_DIGEST_SIZE);
156491         tpm_buf_append_u16(&buf, payload->key_len + 1);
156492         tpm_buf_append(&buf, payload->key, payload->key_len);
156493         tpm_buf_append_u8(&buf, payload->migratable);
156494 @@ -265,7 +267,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
156495                              NULL /* nonce */, 0,
156496                              TPM2_SA_CONTINUE_SESSION,
156497                              options->blobauth /* hmac */,
156498 -                            TPM_DIGEST_SIZE);
156499 +                            options->blobauth_len);
156501         rc = tpm_transmit_cmd(chip, &buf, 6, "unsealing");
156502         if (rc > 0)
156503 diff --git a/security/security.c b/security/security.c
156504 index 5ac96b16f8fa..8ef0ce0faba7 100644
156505 --- a/security/security.c
156506 +++ b/security/security.c
156507 @@ -727,24 +727,28 @@ int security_binder_set_context_mgr(struct task_struct *mgr)
156509         return call_int_hook(binder_set_context_mgr, 0, mgr);
156511 +EXPORT_SYMBOL(security_binder_set_context_mgr);
156513  int security_binder_transaction(struct task_struct *from,
156514                                 struct task_struct *to)
156516         return call_int_hook(binder_transaction, 0, from, to);
156518 +EXPORT_SYMBOL(security_binder_transaction);
156520  int security_binder_transfer_binder(struct task_struct *from,
156521                                     struct task_struct *to)
156523         return call_int_hook(binder_transfer_binder, 0, from, to);
156525 +EXPORT_SYMBOL(security_binder_transfer_binder);
156527  int security_binder_transfer_file(struct task_struct *from,
156528                                   struct task_struct *to, struct file *file)
156530         return call_int_hook(binder_transfer_file, 0, from, to, file);
156532 +EXPORT_SYMBOL(security_binder_transfer_file);
156534  int security_ptrace_access_check(struct task_struct *child, unsigned int mode)
156536 diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
156537 index ba2e01a6955c..62d19bccf3de 100644
156538 --- a/security/selinux/include/classmap.h
156539 +++ b/security/selinux/include/classmap.h
156540 @@ -242,11 +242,12 @@ struct security_class_mapping secclass_map[] = {
156541         { "infiniband_endport",
156542           { "manage_subnet", NULL } },
156543         { "bpf",
156544 -         {"map_create", "map_read", "map_write", "prog_load", "prog_run"} },
156545 +         { "map_create", "map_read", "map_write", "prog_load", "prog_run",
156546 +           NULL } },
156547         { "xdp_socket",
156548           { COMMON_SOCK_PERMS, NULL } },
156549         { "perf_event",
156550 -         {"open", "cpu", "kernel", "tracepoint", "read", "write"} },
156551 +         { "open", "cpu", "kernel", "tracepoint", "read", "write", NULL } },
156552         { "lockdown",
156553           { "integrity", "confidentiality", NULL } },
156554         { "anon_inode",
156555 diff --git a/sound/core/init.c b/sound/core/init.c
156556 index 45f4b01de23f..ef41f5b3a240 100644
156557 --- a/sound/core/init.c
156558 +++ b/sound/core/init.c
156559 @@ -398,10 +398,8 @@ int snd_card_disconnect(struct snd_card *card)
156560                 return 0;
156561         }
156562         card->shutdown = 1;
156563 -       spin_unlock(&card->files_lock);
156565         /* replace file->f_op with special dummy operations */
156566 -       spin_lock(&card->files_lock);
156567         list_for_each_entry(mfile, &card->files_list, list) {
156568                 /* it's critical part, use endless loop */
156569                 /* we have no room to fail */
156570 diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
156571 index 25778765cbfe..9897bd26a438 100644
156572 --- a/sound/firewire/Kconfig
156573 +++ b/sound/firewire/Kconfig
156574 @@ -38,7 +38,7 @@ config SND_OXFW
156575            * Mackie(Loud) Onyx 1640i (former model)
156576            * Mackie(Loud) Onyx Satellite
156577            * Mackie(Loud) Tapco Link.Firewire
156578 -          * Mackie(Loud) d.2 pro/d.4 pro
156579 +          * Mackie(Loud) d.4 pro
156580            * Mackie(Loud) U.420/U.420d
156581            * TASCAM FireOne
156582            * Stanton Controllers & Systems 1 Deck/Mixer
156583 @@ -84,7 +84,7 @@ config SND_BEBOB
156584           * PreSonus FIREBOX/FIREPOD/FP10/Inspire1394
156585           * BridgeCo RDAudio1/Audio5
156586           * Mackie Onyx 1220/1620/1640 (FireWire I/O Card)
156587 -         * Mackie d.2 (FireWire Option)
156588 +         * Mackie d.2 (FireWire Option) and d.2 Pro
156589           * Stanton FinalScratch 2 (ScratchAmp)
156590           * Tascam IF-FW/DM
156591           * Behringer XENIX UFX 1204/1604
156592 diff --git a/sound/firewire/amdtp-stream-trace.h b/sound/firewire/amdtp-stream-trace.h
156593 index 26e7cb555d3c..aa53c13b89d3 100644
156594 --- a/sound/firewire/amdtp-stream-trace.h
156595 +++ b/sound/firewire/amdtp-stream-trace.h
156596 @@ -14,8 +14,8 @@
156597  #include <linux/tracepoint.h>
156599  TRACE_EVENT(amdtp_packet,
156600 -       TP_PROTO(const struct amdtp_stream *s, u32 cycles, const __be32 *cip_header, unsigned int payload_length, unsigned int data_blocks, unsigned int data_block_counter, unsigned int index),
156601 -       TP_ARGS(s, cycles, cip_header, payload_length, data_blocks, data_block_counter, index),
156602 +       TP_PROTO(const struct amdtp_stream *s, u32 cycles, const __be32 *cip_header, unsigned int payload_length, unsigned int data_blocks, unsigned int data_block_counter, unsigned int packet_index, unsigned int index),
156603 +       TP_ARGS(s, cycles, cip_header, payload_length, data_blocks, data_block_counter, packet_index, index),
156604         TP_STRUCT__entry(
156605                 __field(unsigned int, second)
156606                 __field(unsigned int, cycle)
156607 @@ -48,7 +48,7 @@ TRACE_EVENT(amdtp_packet,
156608                 __entry->payload_quadlets = payload_length / sizeof(__be32);
156609                 __entry->data_blocks = data_blocks;
156610                 __entry->data_block_counter = data_block_counter,
156611 -               __entry->packet_index = s->packet_index;
156612 +               __entry->packet_index = packet_index;
156613                 __entry->irq = !!in_interrupt();
156614                 __entry->index = index;
156615         ),
156616 diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
156617 index 4e2f2bb7879f..e0faa6601966 100644
156618 --- a/sound/firewire/amdtp-stream.c
156619 +++ b/sound/firewire/amdtp-stream.c
156620 @@ -526,7 +526,7 @@ static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle,
156621         }
156623         trace_amdtp_packet(s, cycle, cip_header, payload_length, data_blocks,
156624 -                          data_block_counter, index);
156625 +                          data_block_counter, s->packet_index, index);
156628  static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
156629 @@ -630,21 +630,27 @@ static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
156630                                unsigned int *payload_length,
156631                                unsigned int *data_blocks,
156632                                unsigned int *data_block_counter,
156633 -                              unsigned int *syt, unsigned int index)
156634 +                              unsigned int *syt, unsigned int packet_index, unsigned int index)
156636         const __be32 *cip_header;
156637 +       unsigned int cip_header_size;
156638         int err;
156640         *payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT;
156641 -       if (*payload_length > s->ctx_data.tx.ctx_header_size +
156642 -                                       s->ctx_data.tx.max_ctx_payload_length) {
156644 +       if (!(s->flags & CIP_NO_HEADER))
156645 +               cip_header_size = 8;
156646 +       else
156647 +               cip_header_size = 0;
156649 +       if (*payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) {
156650                 dev_err(&s->unit->device,
156651                         "Detect jumbo payload: %04x %04x\n",
156652 -                       *payload_length, s->ctx_data.tx.max_ctx_payload_length);
156653 +                       *payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length);
156654                 return -EIO;
156655         }
156657 -       if (!(s->flags & CIP_NO_HEADER)) {
156658 +       if (cip_header_size > 0) {
156659                 cip_header = ctx_header + 2;
156660                 err = check_cip_header(s, cip_header, *payload_length,
156661                                        data_blocks, data_block_counter, syt);
156662 @@ -662,7 +668,7 @@ static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
156663         }
156665         trace_amdtp_packet(s, cycle, cip_header, *payload_length, *data_blocks,
156666 -                          *data_block_counter, index);
156667 +                          *data_block_counter, packet_index, index);
156669         return err;
156671 @@ -701,12 +707,13 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
156672                                      unsigned int packets)
156674         unsigned int dbc = s->data_block_counter;
156675 +       unsigned int packet_index = s->packet_index;
156676 +       unsigned int queue_size = s->queue_size;
156677         int i;
156678         int err;
156680         for (i = 0; i < packets; ++i) {
156681                 struct pkt_desc *desc = descs + i;
156682 -               unsigned int index = (s->packet_index + i) % s->queue_size;
156683                 unsigned int cycle;
156684                 unsigned int payload_length;
156685                 unsigned int data_blocks;
156686 @@ -715,7 +722,7 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
156687                 cycle = compute_cycle_count(ctx_header[1]);
156689                 err = parse_ir_ctx_header(s, cycle, ctx_header, &payload_length,
156690 -                                         &data_blocks, &dbc, &syt, i);
156691 +                                         &data_blocks, &dbc, &syt, packet_index, i);
156692                 if (err < 0)
156693                         return err;
156695 @@ -723,13 +730,15 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
156696                 desc->syt = syt;
156697                 desc->data_blocks = data_blocks;
156698                 desc->data_block_counter = dbc;
156699 -               desc->ctx_payload = s->buffer.packets[index].buffer;
156700 +               desc->ctx_payload = s->buffer.packets[packet_index].buffer;
156702                 if (!(s->flags & CIP_DBC_IS_END_EVENT))
156703                         dbc = (dbc + desc->data_blocks) & 0xff;
156705                 ctx_header +=
156706                         s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
156708 +               packet_index = (packet_index + 1) % queue_size;
156709         }
156711         s->data_block_counter = dbc;
156712 @@ -1065,23 +1074,22 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
156713                 s->data_block_counter = 0;
156714         }
156716 -       /* initialize packet buffer */
156717 +       // initialize packet buffer.
156718 +       max_ctx_payload_size = amdtp_stream_get_max_payload(s);
156719         if (s->direction == AMDTP_IN_STREAM) {
156720                 dir = DMA_FROM_DEVICE;
156721                 type = FW_ISO_CONTEXT_RECEIVE;
156722 -               if (!(s->flags & CIP_NO_HEADER))
156723 +               if (!(s->flags & CIP_NO_HEADER)) {
156724 +                       max_ctx_payload_size -= 8;
156725                         ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
156726 -               else
156727 +               } else {
156728                         ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
156730 -               max_ctx_payload_size = amdtp_stream_get_max_payload(s) -
156731 -                                      ctx_header_size;
156732 +               }
156733         } else {
156734                 dir = DMA_TO_DEVICE;
156735                 type = FW_ISO_CONTEXT_TRANSMIT;
156736                 ctx_header_size = 0;    // No effect for IT context.
156738 -               max_ctx_payload_size = amdtp_stream_get_max_payload(s);
156739                 if (!(s->flags & CIP_NO_HEADER))
156740                         max_ctx_payload_size -= IT_PKT_HEADER_SIZE_CIP;
156741         }
156742 diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
156743 index 2c8e3392a490..daeecfa8b9aa 100644
156744 --- a/sound/firewire/bebob/bebob.c
156745 +++ b/sound/firewire/bebob/bebob.c
156746 @@ -387,7 +387,7 @@ static const struct ieee1394_device_id bebob_id_table[] = {
156747         SND_BEBOB_DEV_ENTRY(VEN_BRIDGECO, 0x00010049, &spec_normal),
156748         /* Mackie, Onyx 1220/1620/1640 (Firewire I/O Card) */
156749         SND_BEBOB_DEV_ENTRY(VEN_MACKIE2, 0x00010065, &spec_normal),
156750 -       /* Mackie, d.2 (Firewire Option) */
156751 +       // Mackie, d.2 (Firewire option card) and d.2 Pro (the card is built-in).
156752         SND_BEBOB_DEV_ENTRY(VEN_MACKIE1, 0x00010067, &spec_normal),
156753         /* Stanton, ScratchAmp */
156754         SND_BEBOB_DEV_ENTRY(VEN_STANTON, 0x00000001, &spec_normal),
156755 diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
156756 index bbae04793c50..c18017e0a3d9 100644
156757 --- a/sound/firewire/bebob/bebob_stream.c
156758 +++ b/sound/firewire/bebob/bebob_stream.c
156759 @@ -517,20 +517,22 @@ int snd_bebob_stream_init_duplex(struct snd_bebob *bebob)
156760  static int keep_resources(struct snd_bebob *bebob, struct amdtp_stream *stream,
156761                           unsigned int rate, unsigned int index)
156763 -       struct snd_bebob_stream_formation *formation;
156764 +       unsigned int pcm_channels;
156765 +       unsigned int midi_ports;
156766         struct cmp_connection *conn;
156767         int err;
156769         if (stream == &bebob->tx_stream) {
156770 -               formation = bebob->tx_stream_formations + index;
156771 +               pcm_channels = bebob->tx_stream_formations[index].pcm;
156772 +               midi_ports = bebob->midi_input_ports;
156773                 conn = &bebob->out_conn;
156774         } else {
156775 -               formation = bebob->rx_stream_formations + index;
156776 +               pcm_channels = bebob->rx_stream_formations[index].pcm;
156777 +               midi_ports = bebob->midi_output_ports;
156778                 conn = &bebob->in_conn;
156779         }
156781 -       err = amdtp_am824_set_parameters(stream, rate, formation->pcm,
156782 -                                        formation->midi, false);
156783 +       err = amdtp_am824_set_parameters(stream, rate, pcm_channels, midi_ports, false);
156784         if (err < 0)
156785                 return err;
156787 diff --git a/sound/firewire/dice/dice-alesis.c b/sound/firewire/dice/dice-alesis.c
156788 index 0916864511d5..27c13b9cc9ef 100644
156789 --- a/sound/firewire/dice/dice-alesis.c
156790 +++ b/sound/firewire/dice/dice-alesis.c
156791 @@ -16,7 +16,7 @@ alesis_io14_tx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT] = {
156792  static const unsigned int
156793  alesis_io26_tx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT] = {
156794         {10, 10, 4},    /* Tx0 = Analog + S/PDIF. */
156795 -       {16, 8, 0},     /* Tx1 = ADAT1 + ADAT2. */
156796 +       {16, 4, 0},     /* Tx1 = ADAT1 + ADAT2 (available at low rate). */
156799  int snd_dice_detect_alesis_formats(struct snd_dice *dice)
156800 diff --git a/sound/firewire/dice/dice-tcelectronic.c b/sound/firewire/dice/dice-tcelectronic.c
156801 index a8875d24ba2a..43a3bcb15b3d 100644
156802 --- a/sound/firewire/dice/dice-tcelectronic.c
156803 +++ b/sound/firewire/dice/dice-tcelectronic.c
156804 @@ -38,8 +38,8 @@ static const struct dice_tc_spec konnekt_24d = {
156807  static const struct dice_tc_spec konnekt_live = {
156808 -       .tx_pcm_chs = {{16, 16, 16}, {0, 0, 0} },
156809 -       .rx_pcm_chs = {{16, 16, 16}, {0, 0, 0} },
156810 +       .tx_pcm_chs = {{16, 16, 6}, {0, 0, 0} },
156811 +       .rx_pcm_chs = {{16, 16, 6}, {0, 0, 0} },
156812         .has_midi = true,
156815 diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
156816 index 1f1e3236efb8..9eea25c46dc7 100644
156817 --- a/sound/firewire/oxfw/oxfw.c
156818 +++ b/sound/firewire/oxfw/oxfw.c
156819 @@ -355,7 +355,6 @@ static const struct ieee1394_device_id oxfw_id_table[] = {
156820          *  Onyx-i series (former models):      0x081216
156821          *  Mackie Onyx Satellite:              0x00200f
156822          *  Tapco LINK.firewire 4x6:            0x000460
156823 -        *  d.2 pro:                            Unknown
156824          *  d.4 pro:                            Unknown
156825          *  U.420:                              Unknown
156826          *  U.420d:                             Unknown
156827 diff --git a/sound/isa/sb/emu8000.c b/sound/isa/sb/emu8000.c
156828 index 0aa545ac6e60..1c90421a88dc 100644
156829 --- a/sound/isa/sb/emu8000.c
156830 +++ b/sound/isa/sb/emu8000.c
156831 @@ -1029,8 +1029,10 @@ snd_emu8000_create_mixer(struct snd_card *card, struct snd_emu8000 *emu)
156833         memset(emu->controls, 0, sizeof(emu->controls));
156834         for (i = 0; i < EMU8000_NUM_CONTROLS; i++) {
156835 -               if ((err = snd_ctl_add(card, emu->controls[i] = snd_ctl_new1(mixer_defs[i], emu))) < 0)
156836 +               if ((err = snd_ctl_add(card, emu->controls[i] = snd_ctl_new1(mixer_defs[i], emu))) < 0) {
156837 +                       emu->controls[i] = NULL;
156838                         goto __error;
156839 +               }
156840         }
156841         return 0;
156843 diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c
156844 index 8635a2b6b36b..4789345a8fdd 100644
156845 --- a/sound/isa/sb/sb16_csp.c
156846 +++ b/sound/isa/sb/sb16_csp.c
156847 @@ -1045,10 +1045,14 @@ static int snd_sb_qsound_build(struct snd_sb_csp * p)
156849         spin_lock_init(&p->q_lock);
156851 -       if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0)
156852 +       if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0) {
156853 +               p->qsound_switch = NULL;
156854                 goto __error;
156855 -       if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0)
156856 +       }
156857 +       if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0) {
156858 +               p->qsound_space = NULL;
156859                 goto __error;
156860 +       }
156862         return 0;
156864 diff --git a/sound/isa/sb/sb8.c b/sound/isa/sb/sb8.c
156865 index 6c9d534ce8b6..95290ffe5c6e 100644
156866 --- a/sound/isa/sb/sb8.c
156867 +++ b/sound/isa/sb/sb8.c
156868 @@ -95,10 +95,6 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev)
156870         /* block the 0x388 port to avoid PnP conflicts */
156871         acard->fm_res = request_region(0x388, 4, "SoundBlaster FM");
156872 -       if (!acard->fm_res) {
156873 -               err = -EBUSY;
156874 -               goto _err;
156875 -       }
156877         if (port[dev] != SNDRV_AUTO_PORT) {
156878                 if ((err = snd_sbdsp_create(card, port[dev], irq[dev],
156879 diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
156880 index f5cba7afd1c6..ff0fb2d16d82 100644
156881 --- a/sound/pci/hda/hda_generic.c
156882 +++ b/sound/pci/hda/hda_generic.c
156883 @@ -1202,11 +1202,17 @@ static const char *get_line_out_pfx(struct hda_codec *codec, int ch,
156884                 *index = ch;
156885                 return "Headphone";
156886         case AUTO_PIN_LINE_OUT:
156887 -               /* This deals with the case where we have two DACs and
156888 -                * one LO, one HP and one Speaker */
156889 -               if (!ch && cfg->speaker_outs && cfg->hp_outs) {
156890 -                       bool hp_lo_shared = !path_has_mixer(codec, spec->hp_paths[0], ctl_type);
156891 -                       bool spk_lo_shared = !path_has_mixer(codec, spec->speaker_paths[0], ctl_type);
156892 +               /* This deals with the case where one HP or one Speaker or
156893 +                * one HP + one Speaker need to share the DAC with LO
156894 +                */
156895 +               if (!ch) {
156896 +                       bool hp_lo_shared = false, spk_lo_shared = false;
156898 +                       if (cfg->speaker_outs)
156899 +                               spk_lo_shared = !path_has_mixer(codec,
156900 +                                                               spec->speaker_paths[0], ctl_type);
156901 +                       if (cfg->hp_outs)
156902 +                               hp_lo_shared = !path_has_mixer(codec, spec->hp_paths[0], ctl_type);
156903                         if (hp_lo_shared && spk_lo_shared)
156904                                 return spec->vmaster_mute.hook ? "PCM" : "Master";
156905                         if (hp_lo_shared)
156906 diff --git a/sound/pci/hda/ideapad_s740_helper.c b/sound/pci/hda/ideapad_s740_helper.c
156907 new file mode 100644
156908 index 000000000000..564b9086e52d
156909 --- /dev/null
156910 +++ b/sound/pci/hda/ideapad_s740_helper.c
156911 @@ -0,0 +1,492 @@
156912 +// SPDX-License-Identifier: GPL-2.0
156913 +/* Fixes for Lenovo Ideapad S740, to be included from codec driver */
156915 +static const struct hda_verb alc285_ideapad_s740_coefs[] = {
156916 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x10 },
156917 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0320 },
156918 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
156919 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0041 },
156920 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
156921 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0041 },
156922 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
156923 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
156924 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
156925 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
156926 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
156927 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
156928 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
156929 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
156930 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
156931 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
156932 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
156933 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
156934 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
156935 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
156936 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
156937 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x007f },
156938 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
156939 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
156940 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
156941 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
156942 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x007f },
156943 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
156944 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
156945 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
156946 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
156947 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
156948 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
156949 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
156950 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
156951 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
156952 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
156953 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
156954 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
156955 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
156956 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
156957 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
156958 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
156959 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
156960 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
156961 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x003c },
156962 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
156963 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0011 },
156964 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
156965 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
156966 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x003c },
156967 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
156968 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0011 },
156969 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
156970 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
156971 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
156972 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
156973 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000c },
156974 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
156975 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
156976 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
156977 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
156978 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000c },
156979 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
156980 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
156981 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
156982 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
156983 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
156984 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
156985 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000f },
156986 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
156987 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0042 },
156988 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
156989 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
156990 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000f },
156991 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
156992 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0042 },
156993 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
156994 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
156995 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
156996 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
156997 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
156998 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
156999 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
157000 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157001 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157002 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
157003 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157004 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
157005 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157006 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157007 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157008 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157009 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0003 },
157010 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157011 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0009 },
157012 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157013 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157014 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0003 },
157015 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157016 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0009 },
157017 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157018 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157019 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157020 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157021 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001c },
157022 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157023 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x004c },
157024 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157025 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157026 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001c },
157027 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157028 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x004c },
157029 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157030 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157031 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157032 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157033 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001d },
157034 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157035 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x004e },
157036 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157037 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157038 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001d },
157039 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157040 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x004e },
157041 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157042 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157043 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157044 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157045 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001b },
157046 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157047 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
157048 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157049 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157050 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001b },
157051 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157052 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
157053 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157054 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157055 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157056 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157057 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0019 },
157058 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157059 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0025 },
157060 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157061 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157062 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0019 },
157063 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157064 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0025 },
157065 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157066 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157067 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157068 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157069 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0018 },
157070 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157071 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0037 },
157072 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157073 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157074 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0018 },
157075 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157076 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0037 },
157077 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157078 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157079 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157080 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157081 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
157082 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157083 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
157084 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157085 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157086 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
157087 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157088 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
157089 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157090 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157091 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157092 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157093 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0016 },
157094 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157095 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0076 },
157096 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157097 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157098 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0016 },
157099 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157100 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0076 },
157101 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157102 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157103 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157104 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157105 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0017 },
157106 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157107 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
157108 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157109 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157110 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0017 },
157111 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157112 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
157113 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157114 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157115 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157116 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157117 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
157118 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157119 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
157120 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157121 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157122 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
157123 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157124 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
157125 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157126 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157127 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157128 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157129 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0007 },
157130 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157131 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0086 },
157132 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157133 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157134 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0007 },
157135 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157136 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0086 },
157137 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157138 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157139 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157140 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157141 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
157142 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157143 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
157144 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157145 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157146 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
157147 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157148 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
157149 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157150 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157151 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157152 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157153 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
157154 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157155 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157156 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157157 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157158 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
157159 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157160 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157161 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157162 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
157163 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0042 },
157164 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
157165 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0042 },
157166 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157167 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157168 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157169 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157170 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157171 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157172 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157173 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157174 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157175 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157176 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157177 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157178 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157179 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157180 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157181 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x007f },
157182 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157183 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157184 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157185 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157186 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x007f },
157187 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157188 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157189 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157190 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157191 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157192 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157193 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
157194 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157195 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
157196 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157197 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157198 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
157199 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157200 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
157201 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157202 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157203 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157204 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157205 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x003c },
157206 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157207 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0011 },
157208 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157209 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157210 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x003c },
157211 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157212 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0011 },
157213 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157214 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157215 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157216 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157217 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000c },
157218 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157219 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x002a },
157220 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157221 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157222 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000c },
157223 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157224 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x002a },
157225 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157226 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157227 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157228 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157229 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000f },
157230 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157231 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0046 },
157232 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157233 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157234 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000f },
157235 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157236 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0046 },
157237 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157238 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157239 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157240 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157241 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
157242 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157243 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0044 },
157244 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157245 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157246 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
157247 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157248 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0044 },
157249 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157250 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157251 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157252 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157253 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0003 },
157254 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157255 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0009 },
157256 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157257 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157258 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0003 },
157259 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157260 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0009 },
157261 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157262 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157263 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157264 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157265 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001c },
157266 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157267 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x004c },
157268 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157269 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157270 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001c },
157271 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157272 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x004c },
157273 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157274 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157275 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157276 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157277 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157278 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157279 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001b },
157280 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157281 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
157282 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157283 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157284 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001b },
157285 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157286 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
157287 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157288 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157289 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157290 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157291 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0019 },
157292 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157293 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0025 },
157294 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157295 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157296 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0019 },
157297 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157298 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0025 },
157299 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157300 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157301 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157302 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157303 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0018 },
157304 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157305 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0037 },
157306 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157307 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157308 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0018 },
157309 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157310 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0037 },
157311 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157312 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157313 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157314 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157315 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
157316 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157317 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
157318 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157319 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157320 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
157321 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157322 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
157323 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157324 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157325 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157326 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157327 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0016 },
157328 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157329 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0076 },
157330 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157331 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157332 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0016 },
157333 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157334 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0076 },
157335 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157336 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157337 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157338 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157339 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0017 },
157340 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157341 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
157342 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157343 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157344 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0017 },
157345 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157346 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
157347 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157348 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157349 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157350 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157351 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
157352 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157353 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
157354 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157355 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157356 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
157357 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157358 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
157359 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157360 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157361 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157362 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157363 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0007 },
157364 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157365 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0086 },
157366 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157367 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157368 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0007 },
157369 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157370 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0086 },
157371 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157372 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157373 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157374 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157375 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
157376 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157377 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
157378 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157379 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157380 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
157381 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157382 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
157383 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157384 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157385 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157386 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157387 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
157388 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157389 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157390 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157394 +static void alc285_fixup_ideapad_s740_coef(struct hda_codec *codec,
157395 +                                          const struct hda_fixup *fix,
157396 +                                          int action)
157398 +       switch (action) {
157399 +       case HDA_FIXUP_ACT_PRE_PROBE:
157400 +               snd_hda_add_verbs(codec, alc285_ideapad_s740_coefs);
157401 +               break;
157402 +       }
157404 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
157405 index dfef9c17e140..d111258c6f45 100644
157406 --- a/sound/pci/hda/patch_conexant.c
157407 +++ b/sound/pci/hda/patch_conexant.c
157408 @@ -930,18 +930,18 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
157409         SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
157410         SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
157411         SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
157412 -       SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
157413 -       SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
157414 -       SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
157415 -       SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
157416 -       SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
157417         SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
157418         SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
157419 +       SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
157420         SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
157421 -       SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
157422 -       SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
157423 +       SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
157424         SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
157425         SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
157426 +       SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
157427 +       SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
157428 +       SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
157429 +       SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
157430 +       SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
157431         SND_PCI_QUIRK(0x103c, 0x8402, "HP ProBook 645 G4", CXT_FIXUP_MUTE_LED_GPIO),
157432         SND_PCI_QUIRK(0x103c, 0x8427, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
157433         SND_PCI_QUIRK(0x103c, 0x844f, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
157434 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
157435 index 45ae845e82df..4b2cc8cb55c4 100644
157436 --- a/sound/pci/hda/patch_hdmi.c
157437 +++ b/sound/pci/hda/patch_hdmi.c
157438 @@ -1848,16 +1848,12 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
157439          */
157440         if (spec->intel_hsw_fixup) {
157441                 /*
157442 -                * On Intel platforms, device entries number is
157443 -                * changed dynamically. If there is a DP MST
157444 -                * hub connected, the device entries number is 3.
157445 -                * Otherwise, it is 1.
157446 -                * Here we manually set dev_num to 3, so that
157447 -                * we can initialize all the device entries when
157448 -                * bootup statically.
157449 +                * On Intel platforms, device entries count returned
157450 +                * by AC_PAR_DEVLIST_LEN is dynamic, and depends on
157451 +                * the type of receiver that is connected. Allocate pin
157452 +                * structures based on worst case.
157453                  */
157454 -               dev_num = 3;
157455 -               spec->dev_num = 3;
157456 +               dev_num = spec->dev_num;
157457         } else if (spec->dyn_pcm_assign && codec->dp_mst) {
157458                 dev_num = snd_hda_get_num_devices(codec, pin_nid) + 1;
157459                 /*
157460 @@ -2658,7 +2654,7 @@ static void generic_acomp_pin_eld_notify(void *audio_ptr, int port, int dev_id)
157461         /* skip notification during system suspend (but not in runtime PM);
157462          * the state will be updated at resume
157463          */
157464 -       if (snd_power_get_state(codec->card) != SNDRV_CTL_POWER_D0)
157465 +       if (codec->core.dev.power.power_state.event == PM_EVENT_SUSPEND)
157466                 return;
157467         /* ditto during suspend/resume process itself */
157468         if (snd_hdac_is_in_pm(&codec->core))
157469 @@ -2844,7 +2840,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe)
157470         /* skip notification during system suspend (but not in runtime PM);
157471          * the state will be updated at resume
157472          */
157473 -       if (snd_power_get_state(codec->card) != SNDRV_CTL_POWER_D0)
157474 +       if (codec->core.dev.power.power_state.event == PM_EVENT_SUSPEND)
157475                 return;
157476         /* ditto during suspend/resume process itself */
157477         if (snd_hdac_is_in_pm(&codec->core))
157478 @@ -2942,7 +2938,7 @@ static int parse_intel_hdmi(struct hda_codec *codec)
157480  /* Intel Haswell and onwards; audio component with eld notifier */
157481  static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
157482 -                                const int *port_map, int port_num)
157483 +                                const int *port_map, int port_num, int dev_num)
157485         struct hdmi_spec *spec;
157486         int err;
157487 @@ -2957,6 +2953,7 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
157488         spec->port_map = port_map;
157489         spec->port_num = port_num;
157490         spec->intel_hsw_fixup = true;
157491 +       spec->dev_num = dev_num;
157493         intel_haswell_enable_all_pins(codec, true);
157494         intel_haswell_fixup_enable_dp12(codec);
157495 @@ -2982,12 +2979,12 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
157497  static int patch_i915_hsw_hdmi(struct hda_codec *codec)
157499 -       return intel_hsw_common_init(codec, 0x08, NULL, 0);
157500 +       return intel_hsw_common_init(codec, 0x08, NULL, 0, 3);
157503  static int patch_i915_glk_hdmi(struct hda_codec *codec)
157505 -       return intel_hsw_common_init(codec, 0x0b, NULL, 0);
157506 +       return intel_hsw_common_init(codec, 0x0b, NULL, 0, 3);
157509  static int patch_i915_icl_hdmi(struct hda_codec *codec)
157510 @@ -2998,7 +2995,7 @@ static int patch_i915_icl_hdmi(struct hda_codec *codec)
157511          */
157512         static const int map[] = {0x0, 0x4, 0x6, 0x8, 0xa, 0xb};
157514 -       return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map));
157515 +       return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 3);
157518  static int patch_i915_tgl_hdmi(struct hda_codec *codec)
157519 @@ -3010,7 +3007,7 @@ static int patch_i915_tgl_hdmi(struct hda_codec *codec)
157520         static const int map[] = {0x4, 0x6, 0x8, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
157521         int ret;
157523 -       ret = intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map));
157524 +       ret = intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 4);
157525         if (!ret) {
157526                 struct hdmi_spec *spec = codec->spec;
157528 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
157529 index a7544b77d3f7..43a63db4ab6a 100644
157530 --- a/sound/pci/hda/patch_realtek.c
157531 +++ b/sound/pci/hda/patch_realtek.c
157532 @@ -395,7 +395,6 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
157533         case 0x10ec0282:
157534         case 0x10ec0283:
157535         case 0x10ec0286:
157536 -       case 0x10ec0287:
157537         case 0x10ec0288:
157538         case 0x10ec0285:
157539         case 0x10ec0298:
157540 @@ -406,6 +405,10 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
157541         case 0x10ec0275:
157542                 alc_update_coef_idx(codec, 0xe, 0, 1<<0);
157543                 break;
157544 +       case 0x10ec0287:
157545 +               alc_update_coef_idx(codec, 0x10, 1<<9, 0);
157546 +               alc_write_coef_idx(codec, 0x8, 0x4ab7);
157547 +               break;
157548         case 0x10ec0293:
157549                 alc_update_coef_idx(codec, 0xa, 1<<13, 0);
157550                 break;
157551 @@ -2470,13 +2473,13 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
157552                       ALC882_FIXUP_ACER_ASPIRE_8930G),
157553         SND_PCI_QUIRK(0x1025, 0x0146, "Acer Aspire 6935G",
157554                       ALC882_FIXUP_ACER_ASPIRE_8930G),
157555 +       SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G",
157556 +                     ALC882_FIXUP_ACER_ASPIRE_4930G),
157557 +       SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210),
157558         SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G",
157559                       ALC882_FIXUP_ACER_ASPIRE_4930G),
157560         SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G",
157561                       ALC882_FIXUP_ACER_ASPIRE_4930G),
157562 -       SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G",
157563 -                     ALC882_FIXUP_ACER_ASPIRE_4930G),
157564 -       SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210),
157565         SND_PCI_QUIRK(0x1025, 0x021e, "Acer Aspire 5739G",
157566                       ALC882_FIXUP_ACER_ASPIRE_4930G),
157567         SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE),
157568 @@ -2489,11 +2492,11 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
157569         SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
157570         SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
157571         SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
157572 +       SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
157573 +       SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
157574         SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
157575         SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
157576         SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP),
157577 -       SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
157578 -       SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
157580         /* All Apple entries are in codec SSIDs */
157581         SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
157582 @@ -2536,9 +2539,19 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
157583         SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
157584         SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
157585         SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
157586 +       SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
157587 +       SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
157588 +       SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
157589 +       SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
157590 +       SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
157591 +       SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
157592 +       SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
157593 +       SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
157594 +       SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
157595 +       SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
157596         SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
157597         SND_PCI_QUIRK(0x1558, 0x9506, "Clevo P955HQ", ALC1220_FIXUP_CLEVO_P950),
157598 -       SND_PCI_QUIRK(0x1558, 0x950A, "Clevo P955H[PR]", ALC1220_FIXUP_CLEVO_P950),
157599 +       SND_PCI_QUIRK(0x1558, 0x950a, "Clevo P955H[PR]", ALC1220_FIXUP_CLEVO_P950),
157600         SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
157601         SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
157602         SND_PCI_QUIRK(0x1558, 0x95e3, "Clevo P955[ER]T", ALC1220_FIXUP_CLEVO_P950),
157603 @@ -2548,14 +2561,6 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
157604         SND_PCI_QUIRK(0x1558, 0x96e1, "Clevo P960[ER][CDFN]-K", ALC1220_FIXUP_CLEVO_P950),
157605         SND_PCI_QUIRK(0x1558, 0x97e1, "Clevo P970[ER][CDFN]", ALC1220_FIXUP_CLEVO_P950),
157606         SND_PCI_QUIRK(0x1558, 0x97e2, "Clevo P970RC-M", ALC1220_FIXUP_CLEVO_P950),
157607 -       SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
157608 -       SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
157609 -       SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
157610 -       SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
157611 -       SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
157612 -       SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
157613 -       SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
157614 -       SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
157615         SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
157616         SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
157617         SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
157618 @@ -4329,6 +4334,35 @@ static void alc245_fixup_hp_x360_amp(struct hda_codec *codec,
157619         }
157622 +/* toggle GPIO2 at each time stream is started; we use PREPARE state instead */
157623 +static void alc274_hp_envy_pcm_hook(struct hda_pcm_stream *hinfo,
157624 +                                   struct hda_codec *codec,
157625 +                                   struct snd_pcm_substream *substream,
157626 +                                   int action)
157628 +       switch (action) {
157629 +       case HDA_GEN_PCM_ACT_PREPARE:
157630 +               alc_update_gpio_data(codec, 0x04, true);
157631 +               break;
157632 +       case HDA_GEN_PCM_ACT_CLEANUP:
157633 +               alc_update_gpio_data(codec, 0x04, false);
157634 +               break;
157635 +       }
157638 +static void alc274_fixup_hp_envy_gpio(struct hda_codec *codec,
157639 +                                     const struct hda_fixup *fix,
157640 +                                     int action)
157642 +       struct alc_spec *spec = codec->spec;
157644 +       if (action == HDA_FIXUP_ACT_PROBE) {
157645 +               spec->gpio_mask |= 0x04;
157646 +               spec->gpio_dir |= 0x04;
157647 +               spec->gen.pcm_playback_hook = alc274_hp_envy_pcm_hook;
157648 +       }
157651  static void alc_update_coef_led(struct hda_codec *codec,
157652                                 struct alc_coef_led *led,
157653                                 bool polarity, bool on)
157654 @@ -4438,6 +4472,25 @@ static void alc236_fixup_hp_mute_led(struct hda_codec *codec,
157655         alc236_fixup_hp_coef_micmute_led(codec, fix, action);
157658 +static void alc236_fixup_hp_micmute_led_vref(struct hda_codec *codec,
157659 +                               const struct hda_fixup *fix, int action)
157661 +       struct alc_spec *spec = codec->spec;
157663 +       if (action == HDA_FIXUP_ACT_PRE_PROBE) {
157664 +               spec->cap_mute_led_nid = 0x1a;
157665 +               snd_hda_gen_add_micmute_led_cdev(codec, vref_micmute_led_set);
157666 +               codec->power_filter = led_power_filter;
157667 +       }
157670 +static void alc236_fixup_hp_mute_led_micmute_vref(struct hda_codec *codec,
157671 +                               const struct hda_fixup *fix, int action)
157673 +       alc236_fixup_hp_mute_led_coefbit(codec, fix, action);
157674 +       alc236_fixup_hp_micmute_led_vref(codec, fix, action);
157677  #if IS_REACHABLE(CONFIG_INPUT)
157678  static void gpio2_mic_hotkey_event(struct hda_codec *codec,
157679                                    struct hda_jack_callback *event)
157680 @@ -5667,6 +5720,18 @@ static void alc_fixup_tpt470_dacs(struct hda_codec *codec,
157681                 spec->gen.preferred_dacs = preferred_pairs;
157684 +static void alc295_fixup_asus_dacs(struct hda_codec *codec,
157685 +                                  const struct hda_fixup *fix, int action)
157687 +       static const hda_nid_t preferred_pairs[] = {
157688 +               0x17, 0x02, 0x21, 0x03, 0
157689 +       };
157690 +       struct alc_spec *spec = codec->spec;
157692 +       if (action == HDA_FIXUP_ACT_PRE_PROBE)
157693 +               spec->gen.preferred_dacs = preferred_pairs;
157696  static void alc_shutup_dell_xps13(struct hda_codec *codec)
157698         struct alc_spec *spec = codec->spec;
157699 @@ -6182,6 +6247,35 @@ static void alc294_fixup_gx502_hp(struct hda_codec *codec,
157700         }
157703 +static void alc294_gu502_toggle_output(struct hda_codec *codec,
157704 +                                      struct hda_jack_callback *cb)
157706 +       /* Windows sets 0x10 to 0x8420 for Node 0x20 which is
157707 +        * responsible from changes between speakers and headphones
157708 +        */
157709 +       if (snd_hda_jack_detect_state(codec, 0x21) == HDA_JACK_PRESENT)
157710 +               alc_write_coef_idx(codec, 0x10, 0x8420);
157711 +       else
157712 +               alc_write_coef_idx(codec, 0x10, 0x0a20);
157715 +static void alc294_fixup_gu502_hp(struct hda_codec *codec,
157716 +                                 const struct hda_fixup *fix, int action)
157718 +       if (!is_jack_detectable(codec, 0x21))
157719 +               return;
157721 +       switch (action) {
157722 +       case HDA_FIXUP_ACT_PRE_PROBE:
157723 +               snd_hda_jack_detect_enable_callback(codec, 0x21,
157724 +                               alc294_gu502_toggle_output);
157725 +               break;
157726 +       case HDA_FIXUP_ACT_INIT:
157727 +               alc294_gu502_toggle_output(codec, NULL);
157728 +               break;
157729 +       }
157732  static void  alc285_fixup_hp_gpio_amp_init(struct hda_codec *codec,
157733                               const struct hda_fixup *fix, int action)
157735 @@ -6232,6 +6326,9 @@ static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
157736  /* for alc295_fixup_hp_top_speakers */
157737  #include "hp_x360_helper.c"
157739 +/* for alc285_fixup_ideapad_s740_coef() */
157740 +#include "ideapad_s740_helper.c"
157742  enum {
157743         ALC269_FIXUP_GPIO2,
157744         ALC269_FIXUP_SONY_VAIO,
157745 @@ -6396,10 +6493,14 @@ enum {
157746         ALC294_FIXUP_ASUS_GX502_HP,
157747         ALC294_FIXUP_ASUS_GX502_PINS,
157748         ALC294_FIXUP_ASUS_GX502_VERBS,
157749 +       ALC294_FIXUP_ASUS_GU502_HP,
157750 +       ALC294_FIXUP_ASUS_GU502_PINS,
157751 +       ALC294_FIXUP_ASUS_GU502_VERBS,
157752         ALC285_FIXUP_HP_GPIO_LED,
157753         ALC285_FIXUP_HP_MUTE_LED,
157754         ALC236_FIXUP_HP_GPIO_LED,
157755         ALC236_FIXUP_HP_MUTE_LED,
157756 +       ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
157757         ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
157758         ALC295_FIXUP_ASUS_MIC_NO_PRESENCE,
157759         ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS,
157760 @@ -6415,10 +6516,13 @@ enum {
157761         ALC269_FIXUP_LEMOTE_A1802,
157762         ALC269_FIXUP_LEMOTE_A190X,
157763         ALC256_FIXUP_INTEL_NUC8_RUGGED,
157764 +       ALC233_FIXUP_INTEL_NUC8_DMIC,
157765 +       ALC233_FIXUP_INTEL_NUC8_BOOST,
157766         ALC256_FIXUP_INTEL_NUC10,
157767         ALC255_FIXUP_XIAOMI_HEADSET_MIC,
157768         ALC274_FIXUP_HP_MIC,
157769         ALC274_FIXUP_HP_HEADSET_MIC,
157770 +       ALC274_FIXUP_HP_ENVY_GPIO,
157771         ALC256_FIXUP_ASUS_HPE,
157772         ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
157773         ALC287_FIXUP_HP_GPIO_LED,
157774 @@ -6427,6 +6531,10 @@ enum {
157775         ALC282_FIXUP_ACER_DISABLE_LINEOUT,
157776         ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST,
157777         ALC256_FIXUP_ACER_HEADSET_MIC,
157778 +       ALC285_FIXUP_IDEAPAD_S740_COEF,
157779 +       ALC295_FIXUP_ASUS_DACS,
157780 +       ALC295_FIXUP_HP_OMEN,
157781 +       ALC285_FIXUP_HP_SPECTRE_X360,
157784  static const struct hda_fixup alc269_fixups[] = {
157785 @@ -7136,6 +7244,16 @@ static const struct hda_fixup alc269_fixups[] = {
157786                 .type = HDA_FIXUP_FUNC,
157787                 .v.func = alc233_fixup_lenovo_line2_mic_hotkey,
157788         },
157789 +       [ALC233_FIXUP_INTEL_NUC8_DMIC] = {
157790 +               .type = HDA_FIXUP_FUNC,
157791 +               .v.func = alc_fixup_inv_dmic,
157792 +               .chained = true,
157793 +               .chain_id = ALC233_FIXUP_INTEL_NUC8_BOOST,
157794 +       },
157795 +       [ALC233_FIXUP_INTEL_NUC8_BOOST] = {
157796 +               .type = HDA_FIXUP_FUNC,
157797 +               .v.func = alc269_fixup_limit_int_mic_boost
157798 +       },
157799         [ALC255_FIXUP_DELL_SPK_NOISE] = {
157800                 .type = HDA_FIXUP_FUNC,
157801                 .v.func = alc_fixup_disable_aamix,
157802 @@ -7619,6 +7737,35 @@ static const struct hda_fixup alc269_fixups[] = {
157803                 .type = HDA_FIXUP_FUNC,
157804                 .v.func = alc294_fixup_gx502_hp,
157805         },
157806 +       [ALC294_FIXUP_ASUS_GU502_PINS] = {
157807 +               .type = HDA_FIXUP_PINS,
157808 +               .v.pins = (const struct hda_pintbl[]) {
157809 +                       { 0x19, 0x01a11050 }, /* rear HP mic */
157810 +                       { 0x1a, 0x01a11830 }, /* rear external mic */
157811 +                       { 0x21, 0x012110f0 }, /* rear HP out */
157812 +                       { }
157813 +               },
157814 +               .chained = true,
157815 +               .chain_id = ALC294_FIXUP_ASUS_GU502_VERBS
157816 +       },
157817 +       [ALC294_FIXUP_ASUS_GU502_VERBS] = {
157818 +               .type = HDA_FIXUP_VERBS,
157819 +               .v.verbs = (const struct hda_verb[]) {
157820 +                       /* set 0x15 to HP-OUT ctrl */
157821 +                       { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0 },
157822 +                       /* unmute the 0x15 amp */
157823 +                       { 0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000 },
157824 +                       /* set 0x1b to HP-OUT */
157825 +                       { 0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24 },
157826 +                       { }
157827 +               },
157828 +               .chained = true,
157829 +               .chain_id = ALC294_FIXUP_ASUS_GU502_HP
157830 +       },
157831 +       [ALC294_FIXUP_ASUS_GU502_HP] = {
157832 +               .type = HDA_FIXUP_FUNC,
157833 +               .v.func = alc294_fixup_gu502_hp,
157834 +       },
157835         [ALC294_FIXUP_ASUS_COEF_1B] = {
157836                 .type = HDA_FIXUP_VERBS,
157837                 .v.verbs = (const struct hda_verb[]) {
157838 @@ -7646,6 +7793,10 @@ static const struct hda_fixup alc269_fixups[] = {
157839                 .type = HDA_FIXUP_FUNC,
157840                 .v.func = alc236_fixup_hp_mute_led,
157841         },
157842 +       [ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF] = {
157843 +               .type = HDA_FIXUP_FUNC,
157844 +               .v.func = alc236_fixup_hp_mute_led_micmute_vref,
157845 +       },
157846         [ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET] = {
157847                 .type = HDA_FIXUP_VERBS,
157848                 .v.verbs = (const struct hda_verb[]) {
157849 @@ -7844,6 +7995,10 @@ static const struct hda_fixup alc269_fixups[] = {
157850                 .chained = true,
157851                 .chain_id = ALC274_FIXUP_HP_MIC
157852         },
157853 +       [ALC274_FIXUP_HP_ENVY_GPIO] = {
157854 +               .type = HDA_FIXUP_FUNC,
157855 +               .v.func = alc274_fixup_hp_envy_gpio,
157856 +       },
157857         [ALC256_FIXUP_ASUS_HPE] = {
157858                 .type = HDA_FIXUP_VERBS,
157859                 .v.verbs = (const struct hda_verb[]) {
157860 @@ -7901,6 +8056,45 @@ static const struct hda_fixup alc269_fixups[] = {
157861                 .chained = true,
157862                 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
157863         },
157864 +       [ALC285_FIXUP_IDEAPAD_S740_COEF] = {
157865 +               .type = HDA_FIXUP_FUNC,
157866 +               .v.func = alc285_fixup_ideapad_s740_coef,
157867 +               .chained = true,
157868 +               .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
157869 +       },
157870 +       [ALC295_FIXUP_ASUS_DACS] = {
157871 +               .type = HDA_FIXUP_FUNC,
157872 +               .v.func = alc295_fixup_asus_dacs,
157873 +       },
157874 +       [ALC295_FIXUP_HP_OMEN] = {
157875 +               .type = HDA_FIXUP_PINS,
157876 +               .v.pins = (const struct hda_pintbl[]) {
157877 +                       { 0x12, 0xb7a60130 },
157878 +                       { 0x13, 0x40000000 },
157879 +                       { 0x14, 0x411111f0 },
157880 +                       { 0x16, 0x411111f0 },
157881 +                       { 0x17, 0x90170110 },
157882 +                       { 0x18, 0x411111f0 },
157883 +                       { 0x19, 0x02a11030 },
157884 +                       { 0x1a, 0x411111f0 },
157885 +                       { 0x1b, 0x04a19030 },
157886 +                       { 0x1d, 0x40600001 },
157887 +                       { 0x1e, 0x411111f0 },
157888 +                       { 0x21, 0x03211020 },
157889 +                       {}
157890 +               },
157891 +               .chained = true,
157892 +               .chain_id = ALC269_FIXUP_HP_LINE1_MIC1_LED,
157893 +       },
157894 +       [ALC285_FIXUP_HP_SPECTRE_X360] = {
157895 +               .type = HDA_FIXUP_PINS,
157896 +               .v.pins = (const struct hda_pintbl[]) {
157897 +                       { 0x14, 0x90170110 }, /* enable top speaker */
157898 +                       {}
157899 +               },
157900 +               .chained = true,
157901 +               .chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1,
157902 +       },
157905  static const struct snd_pci_quirk alc269_fixup_tbl[] = {
157906 @@ -7909,12 +8103,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
157907         SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
157908         SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700),
157909         SND_PCI_QUIRK(0x1025, 0x072d, "Acer Aspire V5-571G", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
157910 -       SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
157911         SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
157912         SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
157913         SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
157914         SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
157915         SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
157916 +       SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
157917         SND_PCI_QUIRK(0x1025, 0x0840, "Acer Aspire E1", ALC269VB_FIXUP_ASPIRE_E1_COEF),
157918         SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK),
157919         SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
157920 @@ -7970,8 +8164,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
157921         SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP),
157922         SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
157923         SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
157924 -       SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
157925         SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
157926 +       SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
157927         SND_PCI_QUIRK(0x1028, 0x080c, "Dell WYSE", ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE),
157928         SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
157929         SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
157930 @@ -7981,8 +8175,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
157931         SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
157932         SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
157933         SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
157934 -       SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
157935         SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
157936 +       SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
157937         SND_PCI_QUIRK(0x1028, 0x098d, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
157938         SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
157939         SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
157940 @@ -7993,35 +8187,18 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
157941         SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
157942         SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
157943         SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
157944 -       SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY),
157945 -       /* ALC282 */
157946         SND_PCI_QUIRK(0x103c, 0x21f9, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
157947         SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
157948         SND_PCI_QUIRK(0x103c, 0x2214, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
157949 +       SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
157950 +       SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
157951 +       SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
157952 +       SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
157953         SND_PCI_QUIRK(0x103c, 0x2236, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
157954         SND_PCI_QUIRK(0x103c, 0x2237, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
157955         SND_PCI_QUIRK(0x103c, 0x2238, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
157956         SND_PCI_QUIRK(0x103c, 0x2239, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
157957         SND_PCI_QUIRK(0x103c, 0x224b, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
157958 -       SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
157959 -       SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
157960 -       SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
157961 -       SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
157962 -       SND_PCI_QUIRK(0x103c, 0x2271, "HP", ALC286_FIXUP_HP_GPIO_LED),
157963 -       SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC280_FIXUP_HP_DOCK_PINS),
157964 -       SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC280_FIXUP_HP_DOCK_PINS),
157965 -       SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
157966 -       SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
157967 -       SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
157968 -       SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
157969 -       SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
157970 -       SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC280_FIXUP_HP_9480M),
157971 -       SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
157972 -       SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
157973 -       /* ALC290 */
157974 -       SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
157975 -       SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
157976 -       SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
157977         SND_PCI_QUIRK(0x103c, 0x2253, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
157978         SND_PCI_QUIRK(0x103c, 0x2254, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
157979         SND_PCI_QUIRK(0x103c, 0x2255, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
157980 @@ -8029,28 +8206,45 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
157981         SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
157982         SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
157983         SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED),
157984 +       SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY),
157985         SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
157986         SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
157987         SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
157988         SND_PCI_QUIRK(0x103c, 0x2265, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
157989 +       SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
157990 +       SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
157991 +       SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
157992 +       SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
157993 +       SND_PCI_QUIRK(0x103c, 0x2271, "HP", ALC286_FIXUP_HP_GPIO_LED),
157994         SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
157995 +       SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC280_FIXUP_HP_DOCK_PINS),
157996         SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
157997 +       SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC280_FIXUP_HP_DOCK_PINS),
157998         SND_PCI_QUIRK(0x103c, 0x2278, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
157999         SND_PCI_QUIRK(0x103c, 0x227f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158000         SND_PCI_QUIRK(0x103c, 0x2282, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158001         SND_PCI_QUIRK(0x103c, 0x228b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158002         SND_PCI_QUIRK(0x103c, 0x228e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158003 +       SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158004 +       SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158005 +       SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158006 +       SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158007 +       SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158008         SND_PCI_QUIRK(0x103c, 0x22c5, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158009         SND_PCI_QUIRK(0x103c, 0x22c7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158010         SND_PCI_QUIRK(0x103c, 0x22c8, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158011 -       SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158012 +       SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158013 +       SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC280_FIXUP_HP_9480M),
158014 +       SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
158015 +       SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
158016         SND_PCI_QUIRK(0x103c, 0x2334, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158017         SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158018         SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158019         SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158020 -       SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
158021         SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
158022         SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
158023 +       SND_PCI_QUIRK(0x103c, 0x8077, "HP", ALC256_FIXUP_HP_HEADSET_MIC),
158024 +       SND_PCI_QUIRK(0x103c, 0x8158, "HP", ALC256_FIXUP_HP_HEADSET_MIC),
158025         SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
158026         SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
158027         SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
158028 @@ -8059,10 +8253,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
158029         SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
158030         SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
158031         SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
158032 +       SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
158033         SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
158034 +       SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
158035         SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
158036 +       SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
158037         SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
158038         SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
158039 +       SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
158040         SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
158041         SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
158042         SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
158043 @@ -8087,16 +8285,19 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
158044         SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
158045         SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
158046         SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
158047 +       SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
158048         SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
158049         SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
158050         SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
158051 -       SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
158052         SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
158053 +       SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
158054         SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
158055         SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
158056         SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
158057         SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
158058 +       SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
158059         SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
158060 +       SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
158061         SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
158062         SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
158063         SND_PCI_QUIRK(0x1043, 0x194e, "ASUS UX563FD", ALC294_FIXUP_ASUS_HPE),
158064 @@ -8109,31 +8310,32 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
158065         SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
158066         SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
158067         SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
158068 -       SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
158069         SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
158070         SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
158071         SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
158072 +       SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
158073 +       SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
158074         SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
158075 -       SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
158076         SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
158077         SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
158078         SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
158079         SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
158080         SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
158081         SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101),
158082 -       SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
158083 -       SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
158084         SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2),
158085         SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
158086         SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
158087         SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX),
158088 +       SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
158089 +       SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
158090         SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
158091         SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT),
158092         SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
158093 -       SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
158094         SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
158095 +       SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
158096         SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
158097         SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
158098 +       SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
158099         SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
158100         SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
158101         SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
158102 @@ -8143,9 +8345,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
158103         SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
158104         SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
158105         SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
158106 -       SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
158107         SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
158108         SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
158109 +       SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
158110         SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
158111         SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
158112         SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
158113 @@ -8168,12 +8370,19 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
158114         SND_PCI_QUIRK(0x1558, 0x50b8, "Clevo NK50SZ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158115         SND_PCI_QUIRK(0x1558, 0x50d5, "Clevo NP50D5", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158116         SND_PCI_QUIRK(0x1558, 0x50f0, "Clevo NH50A[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158117 +       SND_PCI_QUIRK(0x1558, 0x50f2, "Clevo NH50E[PR]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158118         SND_PCI_QUIRK(0x1558, 0x50f3, "Clevo NH58DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158119 +       SND_PCI_QUIRK(0x1558, 0x50f5, "Clevo NH55EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158120 +       SND_PCI_QUIRK(0x1558, 0x50f6, "Clevo NH55DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158121         SND_PCI_QUIRK(0x1558, 0x5101, "Clevo S510WU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158122         SND_PCI_QUIRK(0x1558, 0x5157, "Clevo W517GU1", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158123         SND_PCI_QUIRK(0x1558, 0x51a1, "Clevo NS50MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158124         SND_PCI_QUIRK(0x1558, 0x70a1, "Clevo NB70T[HJK]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158125         SND_PCI_QUIRK(0x1558, 0x70b3, "Clevo NK70SB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158126 +       SND_PCI_QUIRK(0x1558, 0x70f2, "Clevo NH79EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158127 +       SND_PCI_QUIRK(0x1558, 0x70f3, "Clevo NH77DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158128 +       SND_PCI_QUIRK(0x1558, 0x70f4, "Clevo NH77EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158129 +       SND_PCI_QUIRK(0x1558, 0x70f6, "Clevo NH77DPQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158130         SND_PCI_QUIRK(0x1558, 0x8228, "Clevo NR40BU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158131         SND_PCI_QUIRK(0x1558, 0x8520, "Clevo NH50D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158132         SND_PCI_QUIRK(0x1558, 0x8521, "Clevo NH77D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158133 @@ -8191,9 +8400,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
158134         SND_PCI_QUIRK(0x1558, 0x8a51, "Clevo NH70RCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158135         SND_PCI_QUIRK(0x1558, 0x8d50, "Clevo NH55RCQ-M", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158136         SND_PCI_QUIRK(0x1558, 0x951d, "Clevo N950T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158137 +       SND_PCI_QUIRK(0x1558, 0x9600, "Clevo N960K[PR]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158138         SND_PCI_QUIRK(0x1558, 0x961d, "Clevo N960S[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158139         SND_PCI_QUIRK(0x1558, 0x971d, "Clevo N970T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158140         SND_PCI_QUIRK(0x1558, 0xa500, "Clevo NL53RU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158141 +       SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL5XNU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158142 +       SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158143 +       SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158144 +       SND_PCI_QUIRK(0x1558, 0xb022, "Clevo NH77D[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158145 +       SND_PCI_QUIRK(0x1558, 0xc018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158146 +       SND_PCI_QUIRK(0x1558, 0xc019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158147 +       SND_PCI_QUIRK(0x1558, 0xc022, "Clevo NH77[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
158148         SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
158149         SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
158150         SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
158151 @@ -8201,9 +8418,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
158152         SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
158153         SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
158154         SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
158155 +       SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
158156         SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST),
158157         SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
158158 -       SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
158159         SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
158160         SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
158161         SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
158162 @@ -8244,9 +8461,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
158163         SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
158164         SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
158165         SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
158166 +       SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
158167         SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
158168         SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
158169         SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
158170 +       SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
158171         SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
158172         SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
158173         SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
158174 @@ -8265,20 +8484,19 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
158175         SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
158176         SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
158177         SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
158178 -       SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
158179         SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
158180         SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
158181         SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20),
158182         SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
158183         SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101),
158184         SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
158185 +       SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
158186 +       SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
158187         SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
158188         SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
158189         SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
158190         SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
158191 -       SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
158192 -       SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
158193 -       SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
158194 +       SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
158195         SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
158196         SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
158198 @@ -8457,6 +8675,8 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
158199         {.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"},
158200         {.id = ALC274_FIXUP_HP_MIC, .name = "alc274-hp-mic-detect"},
158201         {.id = ALC245_FIXUP_HP_X360_AMP, .name = "alc245-hp-x360-amp"},
158202 +       {.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"},
158203 +       {.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"},
158204         {}
158206  #define ALC225_STANDARD_PINS \
158207 @@ -8733,12 +8953,17 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
158208                 {0x12, 0x90a60130},
158209                 {0x19, 0x03a11020},
158210                 {0x21, 0x0321101f}),
158211 -       SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
158212 +       SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
158213 +               {0x12, 0x90a60130},
158214                 {0x14, 0x90170110},
158215                 {0x19, 0x04a11040},
158216                 {0x21, 0x04211020}),
158217         SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
158218 -               {0x12, 0x90a60130},
158219 +               {0x14, 0x90170110},
158220 +               {0x19, 0x04a11040},
158221 +               {0x1d, 0x40600001},
158222 +               {0x21, 0x04211020}),
158223 +       SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
158224                 {0x14, 0x90170110},
158225                 {0x19, 0x04a11040},
158226                 {0x21, 0x04211020}),
158227 @@ -9224,8 +9449,7 @@ static const struct snd_pci_quirk alc861_fixup_tbl[] = {
158228         SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", ALC861_FIXUP_ASUS_A6RP),
158229         SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", ALC861_FIXUP_AMP_VREF_0F),
158230         SND_PCI_QUIRK(0x1462, 0x7254, "HP DX2200", ALC861_FIXUP_NO_JACK_DETECT),
158231 -       SND_PCI_QUIRK(0x1584, 0x2b01, "Haier W18", ALC861_FIXUP_AMP_VREF_0F),
158232 -       SND_PCI_QUIRK(0x1584, 0x0000, "Uniwill ECS M31EI", ALC861_FIXUP_AMP_VREF_0F),
158233 +       SND_PCI_QUIRK_VENDOR(0x1584, "Haier/Uniwill", ALC861_FIXUP_AMP_VREF_0F),
158234         SND_PCI_QUIRK(0x1734, 0x10c7, "FSC Amilo Pi1505", ALC861_FIXUP_FSC_AMILO_PI1505),
158235         {}
158237 @@ -10020,6 +10244,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
158238         SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
158239         SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC),
158240         SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
158241 +       SND_PCI_QUIRK(0x1025, 0x0566, "Acer Aspire Ethos 8951G", ALC669_FIXUP_ACER_ASPIRE_ETHOS),
158242         SND_PCI_QUIRK(0x1025, 0x123c, "Acer Nitro N50-600", ALC662_FIXUP_ACER_NITRO_HEADSET_MODE),
158243         SND_PCI_QUIRK(0x1025, 0x124e, "Acer 2660G", ALC662_FIXUP_ACER_X2660G_HEADSET_MODE),
158244         SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
158245 @@ -10036,9 +10261,9 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
158246         SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
158247         SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
158248         SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
158249 -       SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
158250         SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
158251         SND_PCI_QUIRK(0x1043, 0x12ff, "ASUS G751", ALC668_FIXUP_ASUS_G751),
158252 +       SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
158253         SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
158254         SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
158255         SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
158256 @@ -10058,7 +10283,6 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
158257         SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON),
158258         SND_PCI_QUIRK(0x1b35, 0x1234, "CZC ET26", ALC662_FIXUP_CZC_ET26),
158259         SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
158260 -       SND_PCI_QUIRK(0x1025, 0x0566, "Acer Aspire Ethos 8951G", ALC669_FIXUP_ACER_ASPIRE_ETHOS),
158262  #if 0
158263         /* Below is a quirk table taken from the old code.
158264 diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
158265 index 35903d1a1cbd..5b124c4ad572 100644
158266 --- a/sound/pci/intel8x0.c
158267 +++ b/sound/pci/intel8x0.c
158268 @@ -331,6 +331,7 @@ struct ichdev {
158269         unsigned int ali_slot;                  /* ALI DMA slot */
158270         struct ac97_pcm *pcm;
158271         int pcm_open_flag;
158272 +       unsigned int prepared:1;
158273         unsigned int suspended: 1;
158276 @@ -691,6 +692,9 @@ static inline void snd_intel8x0_update(struct intel8x0 *chip, struct ichdev *ich
158277         int status, civ, i, step;
158278         int ack = 0;
158280 +       if (!ichdev->prepared || ichdev->suspended)
158281 +               return;
158283         spin_lock_irqsave(&chip->reg_lock, flags);
158284         status = igetbyte(chip, port + ichdev->roff_sr);
158285         civ = igetbyte(chip, port + ICH_REG_OFF_CIV);
158286 @@ -881,6 +885,7 @@ static int snd_intel8x0_hw_params(struct snd_pcm_substream *substream,
158287         if (ichdev->pcm_open_flag) {
158288                 snd_ac97_pcm_close(ichdev->pcm);
158289                 ichdev->pcm_open_flag = 0;
158290 +               ichdev->prepared = 0;
158291         }
158292         err = snd_ac97_pcm_open(ichdev->pcm, params_rate(hw_params),
158293                                 params_channels(hw_params),
158294 @@ -902,6 +907,7 @@ static int snd_intel8x0_hw_free(struct snd_pcm_substream *substream)
158295         if (ichdev->pcm_open_flag) {
158296                 snd_ac97_pcm_close(ichdev->pcm);
158297                 ichdev->pcm_open_flag = 0;
158298 +               ichdev->prepared = 0;
158299         }
158300         return 0;
158302 @@ -976,6 +982,7 @@ static int snd_intel8x0_pcm_prepare(struct snd_pcm_substream *substream)
158303                         ichdev->pos_shift = (runtime->sample_bits > 16) ? 2 : 1;
158304         }
158305         snd_intel8x0_setup_periods(chip, ichdev);
158306 +       ichdev->prepared = 1;
158307         return 0;
158310 diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
158311 index cdc4b6106252..159c40ec680d 100644
158312 --- a/sound/pci/maestro3.c
158313 +++ b/sound/pci/maestro3.c
158314 @@ -1990,7 +1990,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
158315                 outw(0, io + GPIO_DATA);
158316                 outw(dir | GPO_PRIMARY_AC97, io + GPIO_DIRECTION);
158318 -               schedule_timeout_uninterruptible(msecs_to_jiffies(delay1));
158319 +               schedule_msec_hrtimeout_uninterruptible((delay1));
158321                 outw(GPO_PRIMARY_AC97, io + GPIO_DATA);
158322                 udelay(5);
158323 @@ -1998,7 +1998,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
158324                 outw(IO_SRAM_ENABLE | SERIAL_AC_LINK_ENABLE, io + RING_BUS_CTRL_A);
158325                 outw(~0, io + GPIO_MASK);
158327 -               schedule_timeout_uninterruptible(msecs_to_jiffies(delay2));
158328 +               schedule_msec_hrtimeout_uninterruptible((delay2));
158330                 if (! snd_m3_try_read_vendor(chip))
158331                         break;
158332 diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
158333 index 4cf879c42dc4..720297cbdf87 100644
158334 --- a/sound/pci/rme9652/hdsp.c
158335 +++ b/sound/pci/rme9652/hdsp.c
158336 @@ -5390,7 +5390,8 @@ static int snd_hdsp_free(struct hdsp *hdsp)
158337         if (hdsp->port)
158338                 pci_release_regions(hdsp->pci);
158340 -       pci_disable_device(hdsp->pci);
158341 +       if (pci_is_enabled(hdsp->pci))
158342 +               pci_disable_device(hdsp->pci);
158343         return 0;
158346 diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
158347 index 8d900c132f0f..97a0bff96b28 100644
158348 --- a/sound/pci/rme9652/hdspm.c
158349 +++ b/sound/pci/rme9652/hdspm.c
158350 @@ -6883,7 +6883,8 @@ static int snd_hdspm_free(struct hdspm * hdspm)
158351         if (hdspm->port)
158352                 pci_release_regions(hdspm->pci);
158354 -       pci_disable_device(hdspm->pci);
158355 +       if (pci_is_enabled(hdspm->pci))
158356 +               pci_disable_device(hdspm->pci);
158357         return 0;
158360 diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
158361 index 4df992e846f2..7a4d395abcee 100644
158362 --- a/sound/pci/rme9652/rme9652.c
158363 +++ b/sound/pci/rme9652/rme9652.c
158364 @@ -1731,7 +1731,8 @@ static int snd_rme9652_free(struct snd_rme9652 *rme9652)
158365         if (rme9652->port)
158366                 pci_release_regions(rme9652->pci);
158368 -       pci_disable_device(rme9652->pci);
158369 +       if (pci_is_enabled(rme9652->pci))
158370 +               pci_disable_device(rme9652->pci);
158371         return 0;
158374 diff --git a/sound/soc/codecs/ak5558.c b/sound/soc/codecs/ak5558.c
158375 index 85bdd0534180..80b3b162ca5b 100644
158376 --- a/sound/soc/codecs/ak5558.c
158377 +++ b/sound/soc/codecs/ak5558.c
158378 @@ -272,7 +272,7 @@ static void ak5558_power_off(struct ak5558_priv *ak5558)
158379         if (!ak5558->reset_gpiod)
158380                 return;
158382 -       gpiod_set_value_cansleep(ak5558->reset_gpiod, 0);
158383 +       gpiod_set_value_cansleep(ak5558->reset_gpiod, 1);
158384         usleep_range(1000, 2000);
158387 @@ -281,7 +281,7 @@ static void ak5558_power_on(struct ak5558_priv *ak5558)
158388         if (!ak5558->reset_gpiod)
158389                 return;
158391 -       gpiod_set_value_cansleep(ak5558->reset_gpiod, 1);
158392 +       gpiod_set_value_cansleep(ak5558->reset_gpiod, 0);
158393         usleep_range(1000, 2000);
158396 diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
158397 index 8abe232ca4a4..ff23a7d4d2ac 100644
158398 --- a/sound/soc/codecs/rt286.c
158399 +++ b/sound/soc/codecs/rt286.c
158400 @@ -171,6 +171,9 @@ static bool rt286_readable_register(struct device *dev, unsigned int reg)
158401         case RT286_PROC_COEF:
158402         case RT286_SET_AMP_GAIN_ADC_IN1:
158403         case RT286_SET_AMP_GAIN_ADC_IN2:
158404 +       case RT286_SET_GPIO_MASK:
158405 +       case RT286_SET_GPIO_DIRECTION:
158406 +       case RT286_SET_GPIO_DATA:
158407         case RT286_SET_POWER(RT286_DAC_OUT1):
158408         case RT286_SET_POWER(RT286_DAC_OUT2):
158409         case RT286_SET_POWER(RT286_ADC_IN1):
158410 @@ -1117,12 +1120,11 @@ static const struct dmi_system_id force_combo_jack_table[] = {
158411         { }
158414 -static const struct dmi_system_id dmi_dell_dino[] = {
158415 +static const struct dmi_system_id dmi_dell[] = {
158416         {
158417 -               .ident = "Dell Dino",
158418 +               .ident = "Dell",
158419                 .matches = {
158420                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
158421 -                       DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343")
158422                 }
158423         },
158424         { }
158425 @@ -1133,7 +1135,7 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
158427         struct rt286_platform_data *pdata = dev_get_platdata(&i2c->dev);
158428         struct rt286_priv *rt286;
158429 -       int i, ret, val;
158430 +       int i, ret, vendor_id;
158432         rt286 = devm_kzalloc(&i2c->dev, sizeof(*rt286),
158433                                 GFP_KERNEL);
158434 @@ -1149,14 +1151,15 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
158435         }
158437         ret = regmap_read(rt286->regmap,
158438 -               RT286_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val);
158439 +               RT286_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &vendor_id);
158440         if (ret != 0) {
158441                 dev_err(&i2c->dev, "I2C error %d\n", ret);
158442                 return ret;
158443         }
158444 -       if (val != RT286_VENDOR_ID && val != RT288_VENDOR_ID) {
158445 +       if (vendor_id != RT286_VENDOR_ID && vendor_id != RT288_VENDOR_ID) {
158446                 dev_err(&i2c->dev,
158447 -                       "Device with ID register %#x is not rt286\n", val);
158448 +                       "Device with ID register %#x is not rt286\n",
158449 +                       vendor_id);
158450                 return -ENODEV;
158451         }
158453 @@ -1180,8 +1183,8 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
158454         if (pdata)
158455                 rt286->pdata = *pdata;
158457 -       if (dmi_check_system(force_combo_jack_table) ||
158458 -               dmi_check_system(dmi_dell_dino))
158459 +       if ((vendor_id == RT288_VENDOR_ID && dmi_check_system(dmi_dell)) ||
158460 +               dmi_check_system(force_combo_jack_table))
158461                 rt286->pdata.cbj_en = true;
158463         regmap_write(rt286->regmap, RT286_SET_AUDIO_POWER, AC_PWRST_D3);
158464 @@ -1220,7 +1223,7 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
158465         regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL3, 0xf777, 0x4737);
158466         regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL4, 0x00ff, 0x003f);
158468 -       if (dmi_check_system(dmi_dell_dino)) {
158469 +       if (vendor_id == RT288_VENDOR_ID && dmi_check_system(dmi_dell)) {
158470                 regmap_update_bits(rt286->regmap,
158471                         RT286_SET_GPIO_MASK, 0x40, 0x40);
158472                 regmap_update_bits(rt286->regmap,
158473 diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
158474 index 653da3eaf355..d77d12902594 100644
158475 --- a/sound/soc/codecs/rt5631.c
158476 +++ b/sound/soc/codecs/rt5631.c
158477 @@ -417,7 +417,7 @@ static void onebit_depop_mute_stage(struct snd_soc_component *component, int ena
158478         hp_zc = snd_soc_component_read(component, RT5631_INT_ST_IRQ_CTRL_2);
158479         snd_soc_component_write(component, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff);
158480         if (enable) {
158481 -               schedule_timeout_uninterruptible(msecs_to_jiffies(10));
158482 +               schedule_msec_hrtimeout_uninterruptible((10));
158483                 /* config one-bit depop parameter */
158484                 rt5631_write_index(component, RT5631_SPK_INTL_CTRL, 0x307f);
158485                 snd_soc_component_update_bits(component, RT5631_HP_OUT_VOL,
158486 @@ -529,7 +529,7 @@ static void depop_seq_mute_stage(struct snd_soc_component *component, int enable
158487         hp_zc = snd_soc_component_read(component, RT5631_INT_ST_IRQ_CTRL_2);
158488         snd_soc_component_write(component, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff);
158489         if (enable) {
158490 -               schedule_timeout_uninterruptible(msecs_to_jiffies(10));
158491 +               schedule_msec_hrtimeout_uninterruptible((10));
158493                 /* config depop sequence parameter */
158494                 rt5631_write_index(component, RT5631_SPK_INTL_CTRL, 0x302f);
158495 diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
158496 index 4063aac2a443..dd69d874bad2 100644
158497 --- a/sound/soc/codecs/rt5670.c
158498 +++ b/sound/soc/codecs/rt5670.c
158499 @@ -2980,6 +2980,18 @@ static const struct dmi_system_id dmi_platform_intel_quirks[] = {
158500                                                  RT5670_GPIO1_IS_IRQ |
158501                                                  RT5670_JD_MODE3),
158502         },
158503 +       {
158504 +               .callback = rt5670_quirk_cb,
158505 +               .ident = "Dell Venue 10 Pro 5055",
158506 +               .matches = {
158507 +                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
158508 +                       DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
158509 +               },
158510 +               .driver_data = (unsigned long *)(RT5670_DMIC_EN |
158511 +                                                RT5670_DMIC2_INR |
158512 +                                                RT5670_GPIO1_IS_IRQ |
158513 +                                                RT5670_JD_MODE1),
158514 +       },
158515         {
158516                 .callback = rt5670_quirk_cb,
158517                 .ident = "Aegex 10 tablet (RU2)",
158518 diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
158519 index f04f88c8d425..b689f26fc4be 100644
158520 --- a/sound/soc/codecs/tlv320aic32x4.c
158521 +++ b/sound/soc/codecs/tlv320aic32x4.c
158522 @@ -577,12 +577,12 @@ static const struct regmap_range_cfg aic32x4_regmap_pages[] = {
158523                 .window_start = 0,
158524                 .window_len = 128,
158525                 .range_min = 0,
158526 -               .range_max = AIC32X4_RMICPGAVOL,
158527 +               .range_max = AIC32X4_REFPOWERUP,
158528         },
158531  const struct regmap_config aic32x4_regmap_config = {
158532 -       .max_register = AIC32X4_RMICPGAVOL,
158533 +       .max_register = AIC32X4_REFPOWERUP,
158534         .ranges = aic32x4_regmap_pages,
158535         .num_ranges = ARRAY_SIZE(aic32x4_regmap_pages),
158537 @@ -1243,6 +1243,10 @@ int aic32x4_probe(struct device *dev, struct regmap *regmap)
158538         if (ret)
158539                 goto err_disable_regulators;
158541 +       ret = aic32x4_register_clocks(dev, aic32x4->mclk_name);
158542 +       if (ret)
158543 +               goto err_disable_regulators;
158545         ret = devm_snd_soc_register_component(dev,
158546                         &soc_component_dev_aic32x4, &aic32x4_dai, 1);
158547         if (ret) {
158548 @@ -1250,10 +1254,6 @@ int aic32x4_probe(struct device *dev, struct regmap *regmap)
158549                 goto err_disable_regulators;
158550         }
158552 -       ret = aic32x4_register_clocks(dev, aic32x4->mclk_name);
158553 -       if (ret)
158554 -               goto err_disable_regulators;
158556         return 0;
158558  err_disable_regulators:
158559 diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
158560 index 15d42ce3b21d..897fced9589b 100644
158561 --- a/sound/soc/codecs/wm8350.c
158562 +++ b/sound/soc/codecs/wm8350.c
158563 @@ -234,10 +234,10 @@ static void wm8350_pga_work(struct work_struct *work)
158564                     out2->ramp == WM8350_RAMP_UP) {
158565                         /* delay is longer over 0dB as increases are larger */
158566                         if (i >= WM8350_OUTn_0dB)
158567 -                               schedule_timeout_interruptible(msecs_to_jiffies
158568 +                               schedule_msec_hrtimeout_interruptible(
158569                                                                (2));
158570                         else
158571 -                               schedule_timeout_interruptible(msecs_to_jiffies
158572 +                               schedule_msec_hrtimeout_interruptible(
158573                                                                (1));
158574                 } else
158575                         udelay(50);     /* doesn't matter if we delay longer */
158576 @@ -1121,7 +1121,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component,
158577                                          (platform->dis_out4 << 6));
158579                         /* wait for discharge */
158580 -                       schedule_timeout_interruptible(msecs_to_jiffies
158581 +                       schedule_msec_hrtimeout_interruptible(
158582                                                        (platform->
158583                                                         cap_discharge_msecs));
158585 @@ -1137,7 +1137,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component,
158586                                          WM8350_VBUFEN);
158588                         /* wait for vmid */
158589 -                       schedule_timeout_interruptible(msecs_to_jiffies
158590 +                       schedule_msec_hrtimeout_interruptible(
158591                                                        (platform->
158592                                                         vmid_charge_msecs));
158594 @@ -1188,7 +1188,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component,
158595                 wm8350_reg_write(wm8350, WM8350_POWER_MGMT_1, pm1);
158597                 /* wait */
158598 -               schedule_timeout_interruptible(msecs_to_jiffies
158599 +               schedule_msec_hrtimeout_interruptible(
158600                                                (platform->
158601                                                 vmid_discharge_msecs));
158603 @@ -1206,7 +1206,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component,
158604                                  pm1 | WM8350_OUTPUT_DRAIN_EN);
158606                 /* wait */
158607 -               schedule_timeout_interruptible(msecs_to_jiffies
158608 +               schedule_msec_hrtimeout_interruptible(
158609                                                (platform->drain_msecs));
158611                 pm1 &= ~WM8350_BIASEN;
158612 diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
158613 index a9a6d766a176..45bf31de6282 100644
158614 --- a/sound/soc/codecs/wm8900.c
158615 +++ b/sound/soc/codecs/wm8900.c
158616 @@ -1104,7 +1104,7 @@ static int wm8900_set_bias_level(struct snd_soc_component *component,
158617                 /* Need to let things settle before stopping the clock
158618                  * to ensure that restart works, see "Stopping the
158619                  * master clock" in the datasheet. */
158620 -               schedule_timeout_interruptible(msecs_to_jiffies(1));
158621 +               schedule_msec_hrtimeout_interruptible(1);
158622                 snd_soc_component_write(component, WM8900_REG_POWER2,
158623                              WM8900_REG_POWER2_SYSCLK_ENA);
158624                 break;
158625 diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
158626 index cda9cd935d4f..9e621a254392 100644
158627 --- a/sound/soc/codecs/wm8960.c
158628 +++ b/sound/soc/codecs/wm8960.c
158629 @@ -608,10 +608,6 @@ static const int bclk_divs[] = {
158630   *             - lrclk      = sysclk / dac_divs
158631   *             - 10 * bclk  = sysclk / bclk_divs
158632   *
158633 - *     If we cannot find an exact match for (sysclk, lrclk, bclk)
158634 - *     triplet, we relax the bclk such that bclk is chosen as the
158635 - *     closest available frequency greater than expected bclk.
158637   * @wm8960: codec private data
158638   * @mclk: MCLK used to derive sysclk
158639   * @sysclk_idx: sysclk_divs index for found sysclk
158640 @@ -629,7 +625,7 @@ int wm8960_configure_sysclk(struct wm8960_priv *wm8960, int mclk,
158642         int sysclk, bclk, lrclk;
158643         int i, j, k;
158644 -       int diff, closest = mclk;
158645 +       int diff;
158647         /* marker for no match */
158648         *bclk_idx = -1;
158649 @@ -653,12 +649,6 @@ int wm8960_configure_sysclk(struct wm8960_priv *wm8960, int mclk,
158650                                         *bclk_idx = k;
158651                                         break;
158652                                 }
158653 -                               if (diff > 0 && closest > diff) {
158654 -                                       *sysclk_idx = i;
158655 -                                       *dac_idx = j;
158656 -                                       *bclk_idx = k;
158657 -                                       closest = diff;
158658 -                               }
158659                         }
158660                         if (k != ARRAY_SIZE(bclk_divs))
158661                                 break;
158662 diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
158663 index e0ce32dd4a81..eb91c0282aad 100644
158664 --- a/sound/soc/codecs/wm9713.c
158665 +++ b/sound/soc/codecs/wm9713.c
158666 @@ -199,7 +199,7 @@ static int wm9713_voice_shutdown(struct snd_soc_dapm_widget *w,
158668         /* Gracefully shut down the voice interface. */
158669         snd_soc_component_update_bits(component, AC97_HANDSET_RATE, 0x0f00, 0x0200);
158670 -       schedule_timeout_interruptible(msecs_to_jiffies(1));
158671 +       schedule_msec_hrtimeout_interruptible(1);
158672         snd_soc_component_update_bits(component, AC97_HANDSET_RATE, 0x0f00, 0x0f00);
158673         snd_soc_component_update_bits(component, AC97_EXTENDED_MID, 0x1000, 0x1000);
158675 @@ -868,7 +868,7 @@ static int wm9713_set_pll(struct snd_soc_component *component,
158676         wm9713->pll_in = freq_in;
158678         /* wait 10ms AC97 link frames for the link to stabilise */
158679 -       schedule_timeout_interruptible(msecs_to_jiffies(10));
158680 +       schedule_msec_hrtimeout_interruptible((10));
158681         return 0;
158684 diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
158685 index 8c5cdcdc8713..e81b5cf0d37a 100644
158686 --- a/sound/soc/generic/audio-graph-card.c
158687 +++ b/sound/soc/generic/audio-graph-card.c
158688 @@ -380,7 +380,7 @@ static int graph_dai_link_of(struct asoc_simple_priv *priv,
158689         struct device_node *top = dev->of_node;
158690         struct asoc_simple_dai *cpu_dai;
158691         struct asoc_simple_dai *codec_dai;
158692 -       int ret, single_cpu;
158693 +       int ret, single_cpu = 0;
158695         /* Do it only CPU turn */
158696         if (!li->cpu)
158697 diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
158698 index 75365c7bb393..d916ec69c24f 100644
158699 --- a/sound/soc/generic/simple-card.c
158700 +++ b/sound/soc/generic/simple-card.c
158701 @@ -258,7 +258,7 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv,
158702         struct device_node *plat = NULL;
158703         char prop[128];
158704         char *prefix = "";
158705 -       int ret, single_cpu;
158706 +       int ret, single_cpu = 0;
158708         /*
158709          *       |CPU   |Codec   : turn
158710 diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
158711 index 4e0248d2accc..7c5038803be7 100644
158712 --- a/sound/soc/intel/Makefile
158713 +++ b/sound/soc/intel/Makefile
158714 @@ -5,7 +5,7 @@ obj-$(CONFIG_SND_SOC) += common/
158715  # Platform Support
158716  obj-$(CONFIG_SND_SST_ATOM_HIFI2_PLATFORM) += atom/
158717  obj-$(CONFIG_SND_SOC_INTEL_CATPT) += catpt/
158718 -obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += skylake/
158719 +obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON) += skylake/
158720  obj-$(CONFIG_SND_SOC_INTEL_KEEMBAY) += keembay/
158722  # Machine support
158723 diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
158724 index 5d48cc359c3d..22912cab5e63 100644
158725 --- a/sound/soc/intel/boards/bytcr_rt5640.c
158726 +++ b/sound/soc/intel/boards/bytcr_rt5640.c
158727 @@ -482,6 +482,9 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
158728                         DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TAF"),
158729                 },
158730                 .driver_data = (void *)(BYT_RT5640_IN1_MAP |
158731 +                                       BYT_RT5640_JD_SRC_JD2_IN4N |
158732 +                                       BYT_RT5640_OVCD_TH_2000UA |
158733 +                                       BYT_RT5640_OVCD_SF_0P75 |
158734                                         BYT_RT5640_MONO_SPEAKER |
158735                                         BYT_RT5640_DIFF_MIC |
158736                                         BYT_RT5640_SSP0_AIF2 |
158737 @@ -515,6 +518,23 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
158738                                         BYT_RT5640_SSP0_AIF1 |
158739                                         BYT_RT5640_MCLK_EN),
158740         },
158741 +       {
158742 +               /* Chuwi Hi8 (CWI509) */
158743 +               .matches = {
158744 +                       DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
158745 +                       DMI_MATCH(DMI_BOARD_NAME, "BYT-PA03C"),
158746 +                       DMI_MATCH(DMI_SYS_VENDOR, "ilife"),
158747 +                       DMI_MATCH(DMI_PRODUCT_NAME, "S806"),
158748 +               },
158749 +               .driver_data = (void *)(BYT_RT5640_IN1_MAP |
158750 +                                       BYT_RT5640_JD_SRC_JD2_IN4N |
158751 +                                       BYT_RT5640_OVCD_TH_2000UA |
158752 +                                       BYT_RT5640_OVCD_SF_0P75 |
158753 +                                       BYT_RT5640_MONO_SPEAKER |
158754 +                                       BYT_RT5640_DIFF_MIC |
158755 +                                       BYT_RT5640_SSP0_AIF1 |
158756 +                                       BYT_RT5640_MCLK_EN),
158757 +       },
158758         {
158759                 .matches = {
158760                         DMI_MATCH(DMI_SYS_VENDOR, "Circuitco"),
158761 diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c
158762 index cc9a2509ace2..e0149cf6127d 100644
158763 --- a/sound/soc/intel/boards/kbl_da7219_max98927.c
158764 +++ b/sound/soc/intel/boards/kbl_da7219_max98927.c
158765 @@ -282,11 +282,33 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
158766         struct snd_interval *chan = hw_param_interval(params,
158767                         SNDRV_PCM_HW_PARAM_CHANNELS);
158768         struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
158769 -       struct snd_soc_dpcm *dpcm = container_of(
158770 -                       params, struct snd_soc_dpcm, hw_params);
158771 -       struct snd_soc_dai_link *fe_dai_link = dpcm->fe->dai_link;
158772 -       struct snd_soc_dai_link *be_dai_link = dpcm->be->dai_link;
158773 +       struct snd_soc_dpcm *dpcm, *rtd_dpcm = NULL;
158775 +       /*
158776 +        * The following loop will be called only for playback stream
158777 +        * In this platform, there is only one playback device on every SSP
158778 +        */
158779 +       for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_PLAYBACK, dpcm) {
158780 +               rtd_dpcm = dpcm;
158781 +               break;
158782 +       }
158784 +       /*
158785 +        * This following loop will be called only for capture stream
158786 +        * In this platform, there is only one capture device on every SSP
158787 +        */
158788 +       for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_CAPTURE, dpcm) {
158789 +               rtd_dpcm = dpcm;
158790 +               break;
158791 +       }
158793 +       if (!rtd_dpcm)
158794 +               return -EINVAL;
158796 +       /*
158797 +        * The above 2 loops are mutually exclusive based on the stream direction,
158798 +        * thus rtd_dpcm variable will never be overwritten
158799 +        */
158800         /*
158801          * Topology for kblda7219m98373 & kblmax98373 supports only S24_LE,
158802          * where as kblda7219m98927 & kblmax98927 supports S16_LE by default.
158803 @@ -309,9 +331,9 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
158804         /*
158805          * The ADSP will convert the FE rate to 48k, stereo, 24 bit
158806          */
158807 -       if (!strcmp(fe_dai_link->name, "Kbl Audio Port") ||
158808 -           !strcmp(fe_dai_link->name, "Kbl Audio Headset Playback") ||
158809 -           !strcmp(fe_dai_link->name, "Kbl Audio Capture Port")) {
158810 +       if (!strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Port") ||
158811 +           !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Headset Playback") ||
158812 +           !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Capture Port")) {
158813                 rate->min = rate->max = 48000;
158814                 chan->min = chan->max = 2;
158815                 snd_mask_none(fmt);
158816 @@ -322,7 +344,7 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
158817          * The speaker on the SSP0 supports S16_LE and not S24_LE.
158818          * thus changing the mask here
158819          */
158820 -       if (!strcmp(be_dai_link->name, "SSP0-Codec"))
158821 +       if (!strcmp(rtd_dpcm->be->dai_link->name, "SSP0-Codec"))
158822                 snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
158824         return 0;
158825 diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
158826 index 8adce6417b02..ecd3f90f4bbe 100644
158827 --- a/sound/soc/intel/boards/sof_sdw.c
158828 +++ b/sound/soc/intel/boards/sof_sdw.c
158829 @@ -187,6 +187,17 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
158830                                         SOF_RT715_DAI_ID_FIX |
158831                                         SOF_SDW_FOUR_SPK),
158832         },
158833 +       /* AlderLake devices */
158834 +       {
158835 +               .callback = sof_sdw_quirk_cb,
158836 +               .matches = {
158837 +                       DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
158838 +                       DMI_MATCH(DMI_PRODUCT_NAME, "Alder Lake Client Platform"),
158839 +               },
158840 +               .driver_data = (void *)(SOF_RT711_JD_SRC_JD1 |
158841 +                                       SOF_SDW_TGL_HDMI |
158842 +                                       SOF_SDW_PCH_DMIC),
158843 +       },
158844         {}
158847 diff --git a/sound/soc/intel/boards/sof_wm8804.c b/sound/soc/intel/boards/sof_wm8804.c
158848 index a46ba13e8eb0..6a181e45143d 100644
158849 --- a/sound/soc/intel/boards/sof_wm8804.c
158850 +++ b/sound/soc/intel/boards/sof_wm8804.c
158851 @@ -124,7 +124,11 @@ static int sof_wm8804_hw_params(struct snd_pcm_substream *substream,
158852         }
158854         snd_soc_dai_set_clkdiv(codec_dai, WM8804_MCLK_DIV, mclk_div);
158855 -       snd_soc_dai_set_pll(codec_dai, 0, 0, sysclk, mclk_freq);
158856 +       ret = snd_soc_dai_set_pll(codec_dai, 0, 0, sysclk, mclk_freq);
158857 +       if (ret < 0) {
158858 +               dev_err(rtd->card->dev, "Failed to set WM8804 PLL\n");
158859 +               return ret;
158860 +       }
158862         ret = snd_soc_dai_set_sysclk(codec_dai, WM8804_TX_CLKSRC_PLL,
158863                                      sysclk, SND_SOC_CLOCK_OUT);
158864 diff --git a/sound/soc/intel/skylake/Makefile b/sound/soc/intel/skylake/Makefile
158865 index dd39149b89b1..1c4649bccec5 100644
158866 --- a/sound/soc/intel/skylake/Makefile
158867 +++ b/sound/soc/intel/skylake/Makefile
158868 @@ -7,7 +7,7 @@ ifdef CONFIG_DEBUG_FS
158869    snd-soc-skl-objs += skl-debug.o
158870  endif
158872 -obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += snd-soc-skl.o
158873 +obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON) += snd-soc-skl.o
158875  #Skylake Clock device support
158876  snd-soc-skl-ssp-clk-objs := skl-ssp-clk.o
158877 diff --git a/sound/soc/qcom/qdsp6/q6afe-clocks.c b/sound/soc/qcom/qdsp6/q6afe-clocks.c
158878 index f0362f061652..9431656283cd 100644
158879 --- a/sound/soc/qcom/qdsp6/q6afe-clocks.c
158880 +++ b/sound/soc/qcom/qdsp6/q6afe-clocks.c
158881 @@ -11,33 +11,29 @@
158882  #include <linux/slab.h>
158883  #include "q6afe.h"
158885 -#define Q6AFE_CLK(id) &(struct q6afe_clk) {            \
158886 +#define Q6AFE_CLK(id) {                                        \
158887                 .clk_id = id,                           \
158888                 .afe_clk_id     = Q6AFE_##id,           \
158889                 .name = #id,                            \
158890 -               .attributes = LPASS_CLK_ATTRIBUTE_COUPLE_NO, \
158891                 .rate = 19200000,                       \
158892 -               .hw.init = &(struct clk_init_data) {    \
158893 -                       .ops = &clk_q6afe_ops,          \
158894 -                       .name = #id,                    \
158895 -               },                                      \
158896         }
158898 -#define Q6AFE_VOTE_CLK(id, blkid, n) &(struct q6afe_clk) { \
158899 +#define Q6AFE_VOTE_CLK(id, blkid, n) {                 \
158900                 .clk_id = id,                           \
158901                 .afe_clk_id = blkid,                    \
158902 -               .name = #n,                             \
158903 -               .hw.init = &(struct clk_init_data) {    \
158904 -                       .ops = &clk_vote_q6afe_ops,     \
158905 -                       .name = #id,                    \
158906 -               },                                      \
158907 +               .name = n,                              \
158908         }
158910 -struct q6afe_clk {
158911 -       struct device *dev;
158912 +struct q6afe_clk_init {
158913         int clk_id;
158914         int afe_clk_id;
158915         char *name;
158916 +       int rate;
158919 +struct q6afe_clk {
158920 +       struct device *dev;
158921 +       int afe_clk_id;
158922         int attributes;
158923         int rate;
158924         uint32_t handle;
158925 @@ -48,8 +44,7 @@ struct q6afe_clk {
158927  struct q6afe_cc {
158928         struct device *dev;
158929 -       struct q6afe_clk **clks;
158930 -       int num_clks;
158931 +       struct q6afe_clk *clks[Q6AFE_MAX_CLK_ID];
158934  static int clk_q6afe_prepare(struct clk_hw *hw)
158935 @@ -105,7 +100,7 @@ static int clk_vote_q6afe_block(struct clk_hw *hw)
158936         struct q6afe_clk *clk = to_q6afe_clk(hw);
158938         return q6afe_vote_lpass_core_hw(clk->dev, clk->afe_clk_id,
158939 -                                       clk->name, &clk->handle);
158940 +                                       clk_hw_get_name(&clk->hw), &clk->handle);
158943  static void clk_unvote_q6afe_block(struct clk_hw *hw)
158944 @@ -120,84 +115,76 @@ static const struct clk_ops clk_vote_q6afe_ops = {
158945         .unprepare      = clk_unvote_q6afe_block,
158948 -static struct q6afe_clk *q6afe_clks[Q6AFE_MAX_CLK_ID] = {
158949 -       [LPASS_CLK_ID_PRI_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_MI2S_IBIT),
158950 -       [LPASS_CLK_ID_PRI_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_MI2S_EBIT),
158951 -       [LPASS_CLK_ID_SEC_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_MI2S_IBIT),
158952 -       [LPASS_CLK_ID_SEC_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_MI2S_EBIT),
158953 -       [LPASS_CLK_ID_TER_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_MI2S_IBIT),
158954 -       [LPASS_CLK_ID_TER_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_MI2S_EBIT),
158955 -       [LPASS_CLK_ID_QUAD_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_MI2S_IBIT),
158956 -       [LPASS_CLK_ID_QUAD_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_MI2S_EBIT),
158957 -       [LPASS_CLK_ID_SPEAKER_I2S_IBIT] =
158958 -                               Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_IBIT),
158959 -       [LPASS_CLK_ID_SPEAKER_I2S_EBIT] =
158960 -                               Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_EBIT),
158961 -       [LPASS_CLK_ID_SPEAKER_I2S_OSR] =
158962 -                               Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_OSR),
158963 -       [LPASS_CLK_ID_QUI_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_IBIT),
158964 -       [LPASS_CLK_ID_QUI_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_EBIT),
158965 -       [LPASS_CLK_ID_SEN_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEN_MI2S_IBIT),
158966 -       [LPASS_CLK_ID_SEN_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEN_MI2S_EBIT),
158967 -       [LPASS_CLK_ID_INT0_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT0_MI2S_IBIT),
158968 -       [LPASS_CLK_ID_INT1_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT1_MI2S_IBIT),
158969 -       [LPASS_CLK_ID_INT2_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT2_MI2S_IBIT),
158970 -       [LPASS_CLK_ID_INT3_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT3_MI2S_IBIT),
158971 -       [LPASS_CLK_ID_INT4_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT4_MI2S_IBIT),
158972 -       [LPASS_CLK_ID_INT5_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT5_MI2S_IBIT),
158973 -       [LPASS_CLK_ID_INT6_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT6_MI2S_IBIT),
158974 -       [LPASS_CLK_ID_QUI_MI2S_OSR] = Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_OSR),
158975 -       [LPASS_CLK_ID_PRI_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_PCM_IBIT),
158976 -       [LPASS_CLK_ID_PRI_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_PCM_EBIT),
158977 -       [LPASS_CLK_ID_SEC_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_PCM_IBIT),
158978 -       [LPASS_CLK_ID_SEC_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_PCM_EBIT),
158979 -       [LPASS_CLK_ID_TER_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_PCM_IBIT),
158980 -       [LPASS_CLK_ID_TER_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_PCM_EBIT),
158981 -       [LPASS_CLK_ID_QUAD_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_PCM_IBIT),
158982 -       [LPASS_CLK_ID_QUAD_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_PCM_EBIT),
158983 -       [LPASS_CLK_ID_QUIN_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_PCM_IBIT),
158984 -       [LPASS_CLK_ID_QUIN_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_PCM_EBIT),
158985 -       [LPASS_CLK_ID_QUI_PCM_OSR] = Q6AFE_CLK(LPASS_CLK_ID_QUI_PCM_OSR),
158986 -       [LPASS_CLK_ID_PRI_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_TDM_IBIT),
158987 -       [LPASS_CLK_ID_PRI_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_TDM_EBIT),
158988 -       [LPASS_CLK_ID_SEC_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_TDM_IBIT),
158989 -       [LPASS_CLK_ID_SEC_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_TDM_EBIT),
158990 -       [LPASS_CLK_ID_TER_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_TDM_IBIT),
158991 -       [LPASS_CLK_ID_TER_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_TDM_EBIT),
158992 -       [LPASS_CLK_ID_QUAD_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_TDM_IBIT),
158993 -       [LPASS_CLK_ID_QUAD_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_TDM_EBIT),
158994 -       [LPASS_CLK_ID_QUIN_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_IBIT),
158995 -       [LPASS_CLK_ID_QUIN_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_EBIT),
158996 -       [LPASS_CLK_ID_QUIN_TDM_OSR] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_OSR),
158997 -       [LPASS_CLK_ID_MCLK_1] = Q6AFE_CLK(LPASS_CLK_ID_MCLK_1),
158998 -       [LPASS_CLK_ID_MCLK_2] = Q6AFE_CLK(LPASS_CLK_ID_MCLK_2),
158999 -       [LPASS_CLK_ID_MCLK_3] = Q6AFE_CLK(LPASS_CLK_ID_MCLK_3),
159000 -       [LPASS_CLK_ID_MCLK_4] = Q6AFE_CLK(LPASS_CLK_ID_MCLK_4),
159001 -       [LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE] =
159002 -               Q6AFE_CLK(LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE),
159003 -       [LPASS_CLK_ID_INT_MCLK_0] = Q6AFE_CLK(LPASS_CLK_ID_INT_MCLK_0),
159004 -       [LPASS_CLK_ID_INT_MCLK_1] = Q6AFE_CLK(LPASS_CLK_ID_INT_MCLK_1),
159005 -       [LPASS_CLK_ID_WSA_CORE_MCLK] = Q6AFE_CLK(LPASS_CLK_ID_WSA_CORE_MCLK),
159006 -       [LPASS_CLK_ID_WSA_CORE_NPL_MCLK] =
159007 -                               Q6AFE_CLK(LPASS_CLK_ID_WSA_CORE_NPL_MCLK),
159008 -       [LPASS_CLK_ID_VA_CORE_MCLK] = Q6AFE_CLK(LPASS_CLK_ID_VA_CORE_MCLK),
159009 -       [LPASS_CLK_ID_TX_CORE_MCLK] = Q6AFE_CLK(LPASS_CLK_ID_TX_CORE_MCLK),
159010 -       [LPASS_CLK_ID_TX_CORE_NPL_MCLK] =
159011 -                       Q6AFE_CLK(LPASS_CLK_ID_TX_CORE_NPL_MCLK),
159012 -       [LPASS_CLK_ID_RX_CORE_MCLK] = Q6AFE_CLK(LPASS_CLK_ID_RX_CORE_MCLK),
159013 -       [LPASS_CLK_ID_RX_CORE_NPL_MCLK] =
159014 -                               Q6AFE_CLK(LPASS_CLK_ID_RX_CORE_NPL_MCLK),
159015 -       [LPASS_CLK_ID_VA_CORE_2X_MCLK] =
159016 -                               Q6AFE_CLK(LPASS_CLK_ID_VA_CORE_2X_MCLK),
159017 -       [LPASS_HW_AVTIMER_VOTE] = Q6AFE_VOTE_CLK(LPASS_HW_AVTIMER_VOTE,
159018 -                                                Q6AFE_LPASS_CORE_AVTIMER_BLOCK,
159019 -                                                "LPASS_AVTIMER_MACRO"),
159020 -       [LPASS_HW_MACRO_VOTE] = Q6AFE_VOTE_CLK(LPASS_HW_MACRO_VOTE,
159021 -                                               Q6AFE_LPASS_CORE_HW_MACRO_BLOCK,
159022 -                                               "LPASS_HW_MACRO"),
159023 -       [LPASS_HW_DCODEC_VOTE] = Q6AFE_VOTE_CLK(LPASS_HW_DCODEC_VOTE,
159024 -                                       Q6AFE_LPASS_CORE_HW_DCODEC_BLOCK,
159025 -                                       "LPASS_HW_DCODEC"),
159026 +static const struct q6afe_clk_init q6afe_clks[] = {
159027 +       Q6AFE_CLK(LPASS_CLK_ID_PRI_MI2S_IBIT),
159028 +       Q6AFE_CLK(LPASS_CLK_ID_PRI_MI2S_EBIT),
159029 +       Q6AFE_CLK(LPASS_CLK_ID_SEC_MI2S_IBIT),
159030 +       Q6AFE_CLK(LPASS_CLK_ID_SEC_MI2S_EBIT),
159031 +       Q6AFE_CLK(LPASS_CLK_ID_TER_MI2S_IBIT),
159032 +       Q6AFE_CLK(LPASS_CLK_ID_TER_MI2S_EBIT),
159033 +       Q6AFE_CLK(LPASS_CLK_ID_QUAD_MI2S_IBIT),
159034 +       Q6AFE_CLK(LPASS_CLK_ID_QUAD_MI2S_EBIT),
159035 +       Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_IBIT),
159036 +       Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_EBIT),
159037 +       Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_OSR),
159038 +       Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_IBIT),
159039 +       Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_EBIT),
159040 +       Q6AFE_CLK(LPASS_CLK_ID_SEN_MI2S_IBIT),
159041 +       Q6AFE_CLK(LPASS_CLK_ID_SEN_MI2S_EBIT),
159042 +       Q6AFE_CLK(LPASS_CLK_ID_INT0_MI2S_IBIT),
159043 +       Q6AFE_CLK(LPASS_CLK_ID_INT1_MI2S_IBIT),
159044 +       Q6AFE_CLK(LPASS_CLK_ID_INT2_MI2S_IBIT),
159045 +       Q6AFE_CLK(LPASS_CLK_ID_INT3_MI2S_IBIT),
159046 +       Q6AFE_CLK(LPASS_CLK_ID_INT4_MI2S_IBIT),
159047 +       Q6AFE_CLK(LPASS_CLK_ID_INT5_MI2S_IBIT),
159048 +       Q6AFE_CLK(LPASS_CLK_ID_INT6_MI2S_IBIT),
159049 +       Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_OSR),
159050 +       Q6AFE_CLK(LPASS_CLK_ID_PRI_PCM_IBIT),
159051 +       Q6AFE_CLK(LPASS_CLK_ID_PRI_PCM_EBIT),
159052 +       Q6AFE_CLK(LPASS_CLK_ID_SEC_PCM_IBIT),
159053 +       Q6AFE_CLK(LPASS_CLK_ID_SEC_PCM_EBIT),
159054 +       Q6AFE_CLK(LPASS_CLK_ID_TER_PCM_IBIT),
159055 +       Q6AFE_CLK(LPASS_CLK_ID_TER_PCM_EBIT),
159056 +       Q6AFE_CLK(LPASS_CLK_ID_QUAD_PCM_IBIT),
159057 +       Q6AFE_CLK(LPASS_CLK_ID_QUAD_PCM_EBIT),
159058 +       Q6AFE_CLK(LPASS_CLK_ID_QUIN_PCM_IBIT),
159059 +       Q6AFE_CLK(LPASS_CLK_ID_QUIN_PCM_EBIT),
159060 +       Q6AFE_CLK(LPASS_CLK_ID_QUI_PCM_OSR),
159061 +       Q6AFE_CLK(LPASS_CLK_ID_PRI_TDM_IBIT),
159062 +       Q6AFE_CLK(LPASS_CLK_ID_PRI_TDM_EBIT),
159063 +       Q6AFE_CLK(LPASS_CLK_ID_SEC_TDM_IBIT),
159064 +       Q6AFE_CLK(LPASS_CLK_ID_SEC_TDM_EBIT),
159065 +       Q6AFE_CLK(LPASS_CLK_ID_TER_TDM_IBIT),
159066 +       Q6AFE_CLK(LPASS_CLK_ID_TER_TDM_EBIT),
159067 +       Q6AFE_CLK(LPASS_CLK_ID_QUAD_TDM_IBIT),
159068 +       Q6AFE_CLK(LPASS_CLK_ID_QUAD_TDM_EBIT),
159069 +       Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_IBIT),
159070 +       Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_EBIT),
159071 +       Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_OSR),
159072 +       Q6AFE_CLK(LPASS_CLK_ID_MCLK_1),
159073 +       Q6AFE_CLK(LPASS_CLK_ID_MCLK_2),
159074 +       Q6AFE_CLK(LPASS_CLK_ID_MCLK_3),
159075 +       Q6AFE_CLK(LPASS_CLK_ID_MCLK_4),
159076 +       Q6AFE_CLK(LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE),
159077 +       Q6AFE_CLK(LPASS_CLK_ID_INT_MCLK_0),
159078 +       Q6AFE_CLK(LPASS_CLK_ID_INT_MCLK_1),
159079 +       Q6AFE_CLK(LPASS_CLK_ID_WSA_CORE_MCLK),
159080 +       Q6AFE_CLK(LPASS_CLK_ID_WSA_CORE_NPL_MCLK),
159081 +       Q6AFE_CLK(LPASS_CLK_ID_VA_CORE_MCLK),
159082 +       Q6AFE_CLK(LPASS_CLK_ID_TX_CORE_MCLK),
159083 +       Q6AFE_CLK(LPASS_CLK_ID_TX_CORE_NPL_MCLK),
159084 +       Q6AFE_CLK(LPASS_CLK_ID_RX_CORE_MCLK),
159085 +       Q6AFE_CLK(LPASS_CLK_ID_RX_CORE_NPL_MCLK),
159086 +       Q6AFE_CLK(LPASS_CLK_ID_VA_CORE_2X_MCLK),
159087 +       Q6AFE_VOTE_CLK(LPASS_HW_AVTIMER_VOTE,
159088 +                      Q6AFE_LPASS_CORE_AVTIMER_BLOCK,
159089 +                      "LPASS_AVTIMER_MACRO"),
159090 +       Q6AFE_VOTE_CLK(LPASS_HW_MACRO_VOTE,
159091 +                      Q6AFE_LPASS_CORE_HW_MACRO_BLOCK,
159092 +                      "LPASS_HW_MACRO"),
159093 +       Q6AFE_VOTE_CLK(LPASS_HW_DCODEC_VOTE,
159094 +                      Q6AFE_LPASS_CORE_HW_DCODEC_BLOCK,
159095 +                      "LPASS_HW_DCODEC"),
159098  static struct clk_hw *q6afe_of_clk_hw_get(struct of_phandle_args *clkspec,
159099 @@ -207,7 +194,7 @@ static struct clk_hw *q6afe_of_clk_hw_get(struct of_phandle_args *clkspec,
159100         unsigned int idx = clkspec->args[0];
159101         unsigned int attr = clkspec->args[1];
159103 -       if (idx >= cc->num_clks || attr > LPASS_CLK_ATTRIBUTE_COUPLE_DIVISOR) {
159104 +       if (idx >= Q6AFE_MAX_CLK_ID || attr > LPASS_CLK_ATTRIBUTE_COUPLE_DIVISOR) {
159105                 dev_err(cc->dev, "Invalid clk specifier (%d, %d)\n", idx, attr);
159106                 return ERR_PTR(-EINVAL);
159107         }
159108 @@ -230,20 +217,36 @@ static int q6afe_clock_dev_probe(struct platform_device *pdev)
159109         if (!cc)
159110                 return -ENOMEM;
159112 -       cc->clks = &q6afe_clks[0];
159113 -       cc->num_clks = ARRAY_SIZE(q6afe_clks);
159114 +       cc->dev = dev;
159115         for (i = 0; i < ARRAY_SIZE(q6afe_clks); i++) {
159116 -               if (!q6afe_clks[i])
159117 -                       continue;
159118 +               unsigned int id = q6afe_clks[i].clk_id;
159119 +               struct clk_init_data init = {
159120 +                       .name =  q6afe_clks[i].name,
159121 +               };
159122 +               struct q6afe_clk *clk;
159124 +               clk = devm_kzalloc(dev, sizeof(*clk), GFP_KERNEL);
159125 +               if (!clk)
159126 +                       return -ENOMEM;
159128 +               clk->dev = dev;
159129 +               clk->afe_clk_id = q6afe_clks[i].afe_clk_id;
159130 +               clk->rate = q6afe_clks[i].rate;
159131 +               clk->hw.init = &init;
159133 +               if (clk->rate)
159134 +                       init.ops = &clk_q6afe_ops;
159135 +               else
159136 +                       init.ops = &clk_vote_q6afe_ops;
159138 -               q6afe_clks[i]->dev = dev;
159139 +               cc->clks[id] = clk;
159141 -               ret = devm_clk_hw_register(dev, &q6afe_clks[i]->hw);
159142 +               ret = devm_clk_hw_register(dev, &clk->hw);
159143                 if (ret)
159144                         return ret;
159145         }
159147 -       ret = of_clk_add_hw_provider(dev->of_node, q6afe_of_clk_hw_get, cc);
159148 +       ret = devm_of_clk_add_hw_provider(dev, q6afe_of_clk_hw_get, cc);
159149         if (ret)
159150                 return ret;
159152 diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c
159153 index cad1cd1bfdf0..4327b72162ec 100644
159154 --- a/sound/soc/qcom/qdsp6/q6afe.c
159155 +++ b/sound/soc/qcom/qdsp6/q6afe.c
159156 @@ -1681,7 +1681,7 @@ int q6afe_unvote_lpass_core_hw(struct device *dev, uint32_t hw_block_id,
159157  EXPORT_SYMBOL(q6afe_unvote_lpass_core_hw);
159159  int q6afe_vote_lpass_core_hw(struct device *dev, uint32_t hw_block_id,
159160 -                            char *client_name, uint32_t *client_handle)
159161 +                            const char *client_name, uint32_t *client_handle)
159163         struct q6afe *afe = dev_get_drvdata(dev->parent);
159164         struct afe_cmd_remote_lpass_core_hw_vote_request *vote_cfg;
159165 diff --git a/sound/soc/qcom/qdsp6/q6afe.h b/sound/soc/qcom/qdsp6/q6afe.h
159166 index 22e10269aa10..3845b56c0ed3 100644
159167 --- a/sound/soc/qcom/qdsp6/q6afe.h
159168 +++ b/sound/soc/qcom/qdsp6/q6afe.h
159169 @@ -236,7 +236,7 @@ int q6afe_port_set_sysclk(struct q6afe_port *port, int clk_id,
159170  int q6afe_set_lpass_clock(struct device *dev, int clk_id, int clk_src,
159171                           int clk_root, unsigned int freq);
159172  int q6afe_vote_lpass_core_hw(struct device *dev, uint32_t hw_block_id,
159173 -                            char *client_name, uint32_t *client_handle);
159174 +                            const char *client_name, uint32_t *client_handle);
159175  int q6afe_unvote_lpass_core_hw(struct device *dev, uint32_t hw_block_id,
159176                                uint32_t client_handle);
159177  #endif /* __Q6AFE_H__ */
159178 diff --git a/sound/soc/samsung/tm2_wm5110.c b/sound/soc/samsung/tm2_wm5110.c
159179 index 9300fef9bf26..125e07f65d2b 100644
159180 --- a/sound/soc/samsung/tm2_wm5110.c
159181 +++ b/sound/soc/samsung/tm2_wm5110.c
159182 @@ -553,7 +553,7 @@ static int tm2_probe(struct platform_device *pdev)
159184                 ret = of_parse_phandle_with_args(dev->of_node, "i2s-controller",
159185                                                  cells_name, i, &args);
159186 -               if (!args.np) {
159187 +               if (ret) {
159188                         dev_err(dev, "i2s-controller property parse error: %d\n", i);
159189                         ret = -EINVAL;
159190                         goto dai_node_put;
159191 diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
159192 index 1029d8d9d800..d2b4632d9c2a 100644
159193 --- a/sound/soc/sh/rcar/core.c
159194 +++ b/sound/soc/sh/rcar/core.c
159195 @@ -1428,8 +1428,75 @@ static int rsnd_hw_params(struct snd_soc_component *component,
159196                 }
159197                 if (io->converted_chan)
159198                         dev_dbg(dev, "convert channels = %d\n", io->converted_chan);
159199 -               if (io->converted_rate)
159200 +               if (io->converted_rate) {
159201 +                       /*
159202 +                        * SRC supports convert rates from params_rate(hw_params)/k_down
159203 +                        * to params_rate(hw_params)*k_up, where k_up is always 6, and
159204 +                        * k_down depends on number of channels and SRC unit.
159205 +                        * So all SRC units can upsample audio up to 6 times regardless
159206 +                        * its number of channels. And all SRC units can downsample
159207 +                        * 2 channel audio up to 6 times too.
159208 +                        */
159209 +                       int k_up = 6;
159210 +                       int k_down = 6;
159211 +                       int channel;
159212 +                       struct rsnd_mod *src_mod = rsnd_io_to_mod_src(io);
159214                         dev_dbg(dev, "convert rate     = %d\n", io->converted_rate);
159216 +                       channel = io->converted_chan ? io->converted_chan :
159217 +                                 params_channels(hw_params);
159219 +                       switch (rsnd_mod_id(src_mod)) {
159220 +                       /*
159221 +                        * SRC0 can downsample 4, 6 and 8 channel audio up to 4 times.
159222 +                        * SRC1, SRC3 and SRC4 can downsample 4 channel audio
159223 +                        * up to 4 times.
159224 +                        * SRC1, SRC3 and SRC4 can downsample 6 and 8 channel audio
159225 +                        * no more than twice.
159226 +                        */
159227 +                       case 1:
159228 +                       case 3:
159229 +                       case 4:
159230 +                               if (channel > 4) {
159231 +                                       k_down = 2;
159232 +                                       break;
159233 +                               }
159234 +                               fallthrough;
159235 +                       case 0:
159236 +                               if (channel > 2)
159237 +                                       k_down = 4;
159238 +                               break;
159240 +                       /* Other SRC units do not support more than 2 channels */
159241 +                       default:
159242 +                               if (channel > 2)
159243 +                                       return -EINVAL;
159244 +                       }
159246 +                       if (params_rate(hw_params) > io->converted_rate * k_down) {
159247 +                               hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->min =
159248 +                                       io->converted_rate * k_down;
159249 +                               hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->max =
159250 +                                       io->converted_rate * k_down;
159251 +                               hw_params->cmask |= SNDRV_PCM_HW_PARAM_RATE;
159252 +                       } else if (params_rate(hw_params) * k_up < io->converted_rate) {
159253 +                               hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->min =
159254 +                                       (io->converted_rate + k_up - 1) / k_up;
159255 +                               hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->max =
159256 +                                       (io->converted_rate + k_up - 1) / k_up;
159257 +                               hw_params->cmask |= SNDRV_PCM_HW_PARAM_RATE;
159258 +                       }
159260 +                       /*
159261 +                        * TBD: Max SRC input and output rates also depend on number
159262 +                        * of channels and SRC unit:
159263 +                        * SRC1, SRC3 and SRC4 do not support more than 128kHz
159264 +                        * for 6 channel and 96kHz for 8 channel audio.
159265 +                        * Perhaps this function should return EINVAL if the input or
159266 +                        * the output rate exceeds the limitation.
159267 +                        */
159268 +               }
159269         }
159271         return rsnd_dai_call(hw_params, io, substream, hw_params);
159272 diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
159273 index d0ded427a836..042207c11651 100644
159274 --- a/sound/soc/sh/rcar/ssi.c
159275 +++ b/sound/soc/sh/rcar/ssi.c
159276 @@ -507,10 +507,15 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
159277                          struct rsnd_priv *priv)
159279         struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
159280 +       int ret;
159282         if (!rsnd_ssi_is_run_mods(mod, io))
159283                 return 0;
159285 +       ret = rsnd_ssi_master_clk_start(mod, io);
159286 +       if (ret < 0)
159287 +               return ret;
159289         ssi->usrcnt++;
159291         rsnd_mod_power_on(mod);
159292 @@ -792,7 +797,6 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
159293                                                        SSI_SYS_STATUS(i * 2),
159294                                                        0xf << (id * 4));
159295                                         stop = true;
159296 -                                       break;
159297                                 }
159298                         }
159299                         break;
159300 @@ -810,7 +814,6 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
159301                                                 SSI_SYS_STATUS((i * 2) + 1),
159302                                                 0xf << 4);
159303                                         stop = true;
159304 -                                       break;
159305                                 }
159306                         }
159307                         break;
159308 @@ -1060,13 +1063,6 @@ static int rsnd_ssi_pio_pointer(struct rsnd_mod *mod,
159309         return 0;
159312 -static int rsnd_ssi_prepare(struct rsnd_mod *mod,
159313 -                           struct rsnd_dai_stream *io,
159314 -                           struct rsnd_priv *priv)
159316 -       return rsnd_ssi_master_clk_start(mod, io);
159319  static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
159320         .name           = SSI_NAME,
159321         .probe          = rsnd_ssi_common_probe,
159322 @@ -1079,7 +1075,6 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
159323         .pointer        = rsnd_ssi_pio_pointer,
159324         .pcm_new        = rsnd_ssi_pcm_new,
159325         .hw_params      = rsnd_ssi_hw_params,
159326 -       .prepare        = rsnd_ssi_prepare,
159327         .get_status     = rsnd_ssi_get_status,
159330 @@ -1166,7 +1161,6 @@ static struct rsnd_mod_ops rsnd_ssi_dma_ops = {
159331         .pcm_new        = rsnd_ssi_pcm_new,
159332         .fallback       = rsnd_ssi_fallback,
159333         .hw_params      = rsnd_ssi_hw_params,
159334 -       .prepare        = rsnd_ssi_prepare,
159335         .get_status     = rsnd_ssi_get_status,
159338 diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
159339 index 246a5e32e22a..b4810266f5e5 100644
159340 --- a/sound/soc/soc-compress.c
159341 +++ b/sound/soc/soc-compress.c
159342 @@ -153,7 +153,9 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
159343         fe->dpcm[stream].state = SND_SOC_DPCM_STATE_OPEN;
159344         fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
159346 +       mutex_lock_nested(&fe->card->pcm_mutex, fe->card->pcm_subclass);
159347         snd_soc_runtime_activate(fe, stream);
159348 +       mutex_unlock(&fe->card->pcm_mutex);
159350         mutex_unlock(&fe->card->mutex);
159352 @@ -181,7 +183,9 @@ static int soc_compr_free_fe(struct snd_compr_stream *cstream)
159354         mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
159356 +       mutex_lock_nested(&fe->card->pcm_mutex, fe->card->pcm_subclass);
159357         snd_soc_runtime_deactivate(fe, stream);
159358 +       mutex_unlock(&fe->card->pcm_mutex);
159360         fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
159362 diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
159363 index b005f9eadd71..2f75a449c45c 100644
159364 --- a/sound/soc/soc-dapm.c
159365 +++ b/sound/soc/soc-dapm.c
159366 @@ -154,7 +154,7 @@ static void dapm_assert_locked(struct snd_soc_dapm_context *dapm)
159367  static void pop_wait(u32 pop_time)
159369         if (pop_time)
159370 -               schedule_timeout_uninterruptible(msecs_to_jiffies(pop_time));
159371 +               schedule_msec_hrtimeout_uninterruptible((pop_time));
159374  __printf(3, 4)
159375 diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
159376 index 6740df541508..3d22c1be6f3d 100644
159377 --- a/sound/soc/tegra/tegra30_i2s.c
159378 +++ b/sound/soc/tegra/tegra30_i2s.c
159379 @@ -58,8 +58,18 @@ static int tegra30_i2s_runtime_resume(struct device *dev)
159380         }
159382         regcache_cache_only(i2s->regmap, false);
159383 +       regcache_mark_dirty(i2s->regmap);
159385 +       ret = regcache_sync(i2s->regmap);
159386 +       if (ret)
159387 +               goto disable_clocks;
159389         return 0;
159391 +disable_clocks:
159392 +       clk_disable_unprepare(i2s->clk_i2s);
159394 +       return ret;
159397  static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
159398 @@ -551,37 +561,11 @@ static int tegra30_i2s_platform_remove(struct platform_device *pdev)
159399         return 0;
159402 -#ifdef CONFIG_PM_SLEEP
159403 -static int tegra30_i2s_suspend(struct device *dev)
159405 -       struct tegra30_i2s *i2s = dev_get_drvdata(dev);
159407 -       regcache_mark_dirty(i2s->regmap);
159409 -       return 0;
159412 -static int tegra30_i2s_resume(struct device *dev)
159414 -       struct tegra30_i2s *i2s = dev_get_drvdata(dev);
159415 -       int ret;
159417 -       ret = pm_runtime_get_sync(dev);
159418 -       if (ret < 0) {
159419 -               pm_runtime_put(dev);
159420 -               return ret;
159421 -       }
159422 -       ret = regcache_sync(i2s->regmap);
159423 -       pm_runtime_put(dev);
159425 -       return ret;
159427 -#endif
159429  static const struct dev_pm_ops tegra30_i2s_pm_ops = {
159430         SET_RUNTIME_PM_OPS(tegra30_i2s_runtime_suspend,
159431                            tegra30_i2s_runtime_resume, NULL)
159432 -       SET_SYSTEM_SLEEP_PM_OPS(tegra30_i2s_suspend, tegra30_i2s_resume)
159433 +       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
159434 +                               pm_runtime_force_resume)
159437  static struct platform_driver tegra30_i2s_driver = {
159438 diff --git a/sound/usb/card.c b/sound/usb/card.c
159439 index 0826a437f8fc..7b7526d3a56e 100644
159440 --- a/sound/usb/card.c
159441 +++ b/sound/usb/card.c
159442 @@ -181,9 +181,8 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
159443                                 ctrlif, interface);
159444                         return -EINVAL;
159445                 }
159446 -               usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
159448 -               return 0;
159449 +               return usb_driver_claim_interface(&usb_audio_driver, iface,
159450 +                                                 USB_AUDIO_IFACE_UNUSED);
159451         }
159453         if ((altsd->bInterfaceClass != USB_CLASS_AUDIO &&
159454 @@ -203,7 +202,8 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
159456         if (! snd_usb_parse_audio_interface(chip, interface)) {
159457                 usb_set_interface(dev, interface, 0); /* reset the current interface */
159458 -               usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
159459 +               return usb_driver_claim_interface(&usb_audio_driver, iface,
159460 +                                                 USB_AUDIO_IFACE_UNUSED);
159461         }
159463         return 0;
159464 @@ -862,7 +862,7 @@ static void usb_audio_disconnect(struct usb_interface *intf)
159465         struct snd_card *card;
159466         struct list_head *p;
159468 -       if (chip == (void *)-1L)
159469 +       if (chip == USB_AUDIO_IFACE_UNUSED)
159470                 return;
159472         card = chip->card;
159473 @@ -992,7 +992,7 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
159474         struct usb_mixer_interface *mixer;
159475         struct list_head *p;
159477 -       if (chip == (void *)-1L)
159478 +       if (chip == USB_AUDIO_IFACE_UNUSED)
159479                 return 0;
159481         if (!chip->num_suspended_intf++) {
159482 @@ -1022,7 +1022,7 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
159483         struct list_head *p;
159484         int err = 0;
159486 -       if (chip == (void *)-1L)
159487 +       if (chip == USB_AUDIO_IFACE_UNUSED)
159488                 return 0;
159490         atomic_inc(&chip->active); /* avoid autopm */
159491 diff --git a/sound/usb/clock.c b/sound/usb/clock.c
159492 index a746802d0ac3..17bbde73d4d1 100644
159493 --- a/sound/usb/clock.c
159494 +++ b/sound/usb/clock.c
159495 @@ -296,7 +296,7 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
159497         selector = snd_usb_find_clock_selector(chip->ctrl_intf, entity_id);
159498         if (selector) {
159499 -               int ret, i, cur;
159500 +               int ret, i, cur, err;
159502                 if (selector->bNrInPins == 1) {
159503                         ret = 1;
159504 @@ -324,13 +324,17 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
159505                 ret = __uac_clock_find_source(chip, fmt,
159506                                               selector->baCSourceID[ret - 1],
159507                                               visited, validate);
159508 +               if (ret > 0) {
159509 +                       err = uac_clock_selector_set_val(chip, entity_id, cur);
159510 +                       if (err < 0)
159511 +                               return err;
159512 +               }
159514                 if (!validate || ret > 0 || !chip->autoclock)
159515                         return ret;
159517                 /* The current clock source is invalid, try others. */
159518                 for (i = 1; i <= selector->bNrInPins; i++) {
159519 -                       int err;
159521                         if (i == cur)
159522                                 continue;
159524 @@ -396,7 +400,7 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip,
159526         selector = snd_usb_find_clock_selector_v3(chip->ctrl_intf, entity_id);
159527         if (selector) {
159528 -               int ret, i, cur;
159529 +               int ret, i, cur, err;
159531                 /* the entity ID we are looking for is a selector.
159532                  * find out what it currently selects */
159533 @@ -418,6 +422,12 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip,
159534                 ret = __uac3_clock_find_source(chip, fmt,
159535                                                selector->baCSourceID[ret - 1],
159536                                                visited, validate);
159537 +               if (ret > 0) {
159538 +                       err = uac_clock_selector_set_val(chip, entity_id, cur);
159539 +                       if (err < 0)
159540 +                               return err;
159541 +               }
159543                 if (!validate || ret > 0 || !chip->autoclock)
159544                         return ret;
159546 diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
159547 index 102d53515a76..933586a895e7 100644
159548 --- a/sound/usb/endpoint.c
159549 +++ b/sound/usb/endpoint.c
159550 @@ -1442,11 +1442,11 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep)
159551         if (snd_BUG_ON(!atomic_read(&ep->running)))
159552                 return;
159554 -       if (ep->sync_source)
159555 -               WRITE_ONCE(ep->sync_source->sync_sink, NULL);
159557 -       if (!atomic_dec_return(&ep->running))
159558 +       if (!atomic_dec_return(&ep->running)) {
159559 +               if (ep->sync_source)
159560 +                       WRITE_ONCE(ep->sync_source->sync_sink, NULL);
159561                 stop_urbs(ep, false);
159562 +       }
159565  /**
159566 diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
159567 index a030dd65eb28..9602929b7de9 100644
159568 --- a/sound/usb/line6/driver.c
159569 +++ b/sound/usb/line6/driver.c
159570 @@ -699,6 +699,10 @@ static int line6_init_cap_control(struct usb_line6 *line6)
159571                 line6->buffer_message = kmalloc(LINE6_MIDI_MESSAGE_MAXLEN, GFP_KERNEL);
159572                 if (!line6->buffer_message)
159573                         return -ENOMEM;
159575 +               ret = line6_init_midi(line6);
159576 +               if (ret < 0)
159577 +                       return ret;
159578         } else {
159579                 ret = line6_hwdep_init(line6);
159580                 if (ret < 0)
159581 diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
159582 index fdbdfb7bce92..fa8e8faf3eb3 100644
159583 --- a/sound/usb/line6/pcm.c
159584 +++ b/sound/usb/line6/pcm.c
159585 @@ -127,7 +127,7 @@ static void line6_wait_clear_audio_urbs(struct snd_line6_pcm *line6pcm,
159586                 if (!alive)
159587                         break;
159588                 set_current_state(TASK_UNINTERRUPTIBLE);
159589 -               schedule_timeout(1);
159590 +               schedule_min_hrtimeout();
159591         } while (--timeout > 0);
159592         if (alive)
159593                 dev_err(line6pcm->line6->ifcdev,
159594 diff --git a/sound/usb/line6/pod.c b/sound/usb/line6/pod.c
159595 index cd44cb5f1310..16e644330c4d 100644
159596 --- a/sound/usb/line6/pod.c
159597 +++ b/sound/usb/line6/pod.c
159598 @@ -376,11 +376,6 @@ static int pod_init(struct usb_line6 *line6,
159599         if (err < 0)
159600                 return err;
159602 -       /* initialize MIDI subsystem: */
159603 -       err = line6_init_midi(line6);
159604 -       if (err < 0)
159605 -               return err;
159607         /* initialize PCM subsystem: */
159608         err = line6_init_pcm(line6, &pod_pcm_properties);
159609         if (err < 0)
159610 diff --git a/sound/usb/line6/variax.c b/sound/usb/line6/variax.c
159611 index ed158f04de80..c2245aa93b08 100644
159612 --- a/sound/usb/line6/variax.c
159613 +++ b/sound/usb/line6/variax.c
159614 @@ -159,7 +159,6 @@ static int variax_init(struct usb_line6 *line6,
159615                        const struct usb_device_id *id)
159617         struct usb_line6_variax *variax = line6_to_variax(line6);
159618 -       int err;
159620         line6->process_message = line6_variax_process_message;
159621         line6->disconnect = line6_variax_disconnect;
159622 @@ -172,11 +171,6 @@ static int variax_init(struct usb_line6 *line6,
159623         if (variax->buffer_activate == NULL)
159624                 return -ENOMEM;
159626 -       /* initialize MIDI subsystem: */
159627 -       err = line6_init_midi(&variax->line6);
159628 -       if (err < 0)
159629 -               return err;
159631         /* initiate startup procedure: */
159632         schedule_delayed_work(&line6->startup_work,
159633                               msecs_to_jiffies(VARIAX_STARTUP_DELAY1));
159634 diff --git a/sound/usb/midi.c b/sound/usb/midi.c
159635 index 0c23fa6d8525..fa91290ad89d 100644
159636 --- a/sound/usb/midi.c
159637 +++ b/sound/usb/midi.c
159638 @@ -1332,7 +1332,7 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi *umidi,
159640   error:
159641         snd_usbmidi_in_endpoint_delete(ep);
159642 -       return -ENOMEM;
159643 +       return err;
159647 @@ -1889,8 +1889,12 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi,
159648                 ms_ep = find_usb_ms_endpoint_descriptor(hostep);
159649                 if (!ms_ep)
159650                         continue;
159651 +               if (ms_ep->bLength <= sizeof(*ms_ep))
159652 +                       continue;
159653                 if (ms_ep->bNumEmbMIDIJack > 0x10)
159654                         continue;
159655 +               if (ms_ep->bLength < sizeof(*ms_ep) + ms_ep->bNumEmbMIDIJack)
159656 +                       continue;
159657                 if (usb_endpoint_dir_out(ep)) {
159658                         if (endpoints[epidx].out_ep) {
159659                                 if (++epidx >= MIDI_MAX_ENDPOINTS) {
159660 diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
159661 index 646deb6244b1..c5794e83fd80 100644
159662 --- a/sound/usb/mixer_maps.c
159663 +++ b/sound/usb/mixer_maps.c
159664 @@ -337,6 +337,13 @@ static const struct usbmix_name_map bose_companion5_map[] = {
159665         { 0 }   /* terminator */
159668 +/* Sennheiser Communications Headset [PC 8], the dB value is reported as -6 negative maximum  */
159669 +static const struct usbmix_dB_map sennheiser_pc8_dB = {-9500, 0};
159670 +static const struct usbmix_name_map sennheiser_pc8_map[] = {
159671 +       { 9, NULL, .dB = &sennheiser_pc8_dB },
159672 +       { 0 }   /* terminator */
159676   * Dell usb dock with ALC4020 codec had a firmware problem where it got
159677   * screwed up when zero volume is passed; just skip it as a workaround
159678 @@ -593,6 +600,11 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
159679                 .id = USB_ID(0x17aa, 0x1046),
159680                 .map = lenovo_p620_rear_map,
159681         },
159682 +       {
159683 +               /* Sennheiser Communications Headset [PC 8] */
159684 +               .id = USB_ID(0x1395, 0x0025),
159685 +               .map = sennheiser_pc8_map,
159686 +       },
159687         { 0 } /* terminator */
159690 diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
159691 index 1165a5ac60f2..8a8fe2b980a1 100644
159692 --- a/sound/usb/quirks-table.h
159693 +++ b/sound/usb/quirks-table.h
159694 @@ -2376,6 +2376,16 @@ YAMAHA_DEVICE(0x7010, "UB99"),
159695         }
159699 +       USB_DEVICE_VENDOR_SPEC(0x0944, 0x0204),
159700 +       .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
159701 +               .vendor_name = "KORG, Inc.",
159702 +               /* .product_name = "ToneLab EX", */
159703 +               .ifnum = 3,
159704 +               .type = QUIRK_MIDI_STANDARD_INTERFACE,
159705 +       }
159708  /* AKAI devices */
159710         USB_DEVICE(0x09e8, 0x0062),
159711 @@ -3817,6 +3827,69 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
159712                 }
159713         }
159716 +       /*
159717 +        * Pioneer DJ DJM-850
159718 +        * 8 channels playback and 8 channels capture @ 44.1/48/96kHz S24LE
159719 +        * Playback on EP 0x05
159720 +        * Capture on EP 0x86
159721 +        */
159722 +       USB_DEVICE_VENDOR_SPEC(0x08e4, 0x0163),
159723 +       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
159724 +               .ifnum = QUIRK_ANY_INTERFACE,
159725 +               .type = QUIRK_COMPOSITE,
159726 +               .data = (const struct snd_usb_audio_quirk[]) {
159727 +                       {
159728 +                               .ifnum = 0,
159729 +                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
159730 +                               .data = &(const struct audioformat) {
159731 +                                       .formats = SNDRV_PCM_FMTBIT_S24_3LE,
159732 +                                       .channels = 8,
159733 +                                       .iface = 0,
159734 +                                       .altsetting = 1,
159735 +                                       .altset_idx = 1,
159736 +                                       .endpoint = 0x05,
159737 +                                       .ep_attr = USB_ENDPOINT_XFER_ISOC|
159738 +                                           USB_ENDPOINT_SYNC_ASYNC|
159739 +                                               USB_ENDPOINT_USAGE_DATA,
159740 +                                       .rates = SNDRV_PCM_RATE_44100|
159741 +                                               SNDRV_PCM_RATE_48000|
159742 +                                               SNDRV_PCM_RATE_96000,
159743 +                                       .rate_min = 44100,
159744 +                                       .rate_max = 96000,
159745 +                                       .nr_rates = 3,
159746 +                                       .rate_table = (unsigned int[]) { 44100, 48000, 96000 }
159747 +                               }
159748 +                       },
159749 +                       {
159750 +                               .ifnum = 0,
159751 +                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
159752 +                               .data = &(const struct audioformat) {
159753 +                                       .formats = SNDRV_PCM_FMTBIT_S24_3LE,
159754 +                                       .channels = 8,
159755 +                                       .iface = 0,
159756 +                                       .altsetting = 1,
159757 +                                       .altset_idx = 1,
159758 +                                       .endpoint = 0x86,
159759 +                                       .ep_idx = 1,
159760 +                                       .ep_attr = USB_ENDPOINT_XFER_ISOC|
159761 +                                               USB_ENDPOINT_SYNC_ASYNC|
159762 +                                               USB_ENDPOINT_USAGE_DATA,
159763 +                                       .rates = SNDRV_PCM_RATE_44100|
159764 +                                               SNDRV_PCM_RATE_48000|
159765 +                                               SNDRV_PCM_RATE_96000,
159766 +                                       .rate_min = 44100,
159767 +                                       .rate_max = 96000,
159768 +                                       .nr_rates = 3,
159769 +                                       .rate_table = (unsigned int[]) { 44100, 48000, 96000 }
159770 +                               }
159771 +                       },
159772 +                       {
159773 +                               .ifnum = -1
159774 +                       }
159775 +               }
159776 +       }
159779         /*
159780          * Pioneer DJ DJM-450
159781 diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
159782 index 176437a441e6..8b8bee3c3dd6 100644
159783 --- a/sound/usb/quirks.c
159784 +++ b/sound/usb/quirks.c
159785 @@ -55,8 +55,12 @@ static int create_composite_quirk(struct snd_usb_audio *chip,
159786                 if (!iface)
159787                         continue;
159788                 if (quirk->ifnum != probed_ifnum &&
159789 -                   !usb_interface_claimed(iface))
159790 -                       usb_driver_claim_interface(driver, iface, (void *)-1L);
159791 +                   !usb_interface_claimed(iface)) {
159792 +                       err = usb_driver_claim_interface(driver, iface,
159793 +                                                        USB_AUDIO_IFACE_UNUSED);
159794 +                       if (err < 0)
159795 +                               return err;
159796 +               }
159797         }
159799         return 0;
159800 @@ -426,8 +430,12 @@ static int create_autodetect_quirks(struct snd_usb_audio *chip,
159801                         continue;
159803                 err = create_autodetect_quirk(chip, iface, driver);
159804 -               if (err >= 0)
159805 -                       usb_driver_claim_interface(driver, iface, (void *)-1L);
159806 +               if (err >= 0) {
159807 +                       err = usb_driver_claim_interface(driver, iface,
159808 +                                                        USB_AUDIO_IFACE_UNUSED);
159809 +                       if (err < 0)
159810 +                               return err;
159811 +               }
159812         }
159814         return 0;
159815 @@ -1503,6 +1511,10 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
159816         case USB_ID(0x2b73, 0x0013): /* Pioneer DJM-450 */
159817                 pioneer_djm_set_format_quirk(subs, 0x0082);
159818                 break;
159819 +       case USB_ID(0x08e4, 0x017f): /* Pioneer DJM-750 */
159820 +       case USB_ID(0x08e4, 0x0163): /* Pioneer DJM-850 */
159821 +               pioneer_djm_set_format_quirk(subs, 0x0086);
159822 +               break;
159823         }
159826 diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
159827 index 60b9dd7df6bb..8794c8658ab9 100644
159828 --- a/sound/usb/usbaudio.h
159829 +++ b/sound/usb/usbaudio.h
159830 @@ -61,6 +61,8 @@ struct snd_usb_audio {
159831         struct media_intf_devnode *ctl_intf_media_devnode;
159834 +#define USB_AUDIO_IFACE_UNUSED ((void *)-1L)
159836  #define usb_audio_err(chip, fmt, args...) \
159837         dev_err(&(chip)->dev->dev, fmt, ##args)
159838  #define usb_audio_warn(chip, fmt, args...) \
159839 diff --git a/tools/arch/x86/include/asm/unistd_64.h b/tools/arch/x86/include/asm/unistd_64.h
159840 index 4205ed4158bf..b65c51e8d675 100644
159841 --- a/tools/arch/x86/include/asm/unistd_64.h
159842 +++ b/tools/arch/x86/include/asm/unistd_64.h
159843 @@ -17,3 +17,15 @@
159844  #ifndef __NR_setns
159845  #define __NR_setns 308
159846  #endif
159848 +#ifndef __NR_futex_wait
159849 +# define __NR_futex_wait 443
159850 +#endif
159852 +#ifndef __NR_futex_wake
159853 +# define __NR_futex_wake 444
159854 +#endif
159856 +#ifndef __NR_futex_requeue
159857 +# define __NR_futex_requeue 446
159858 +#endif
159859 diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
159860 index fe9e7b3a4b50..1326fff3629b 100644
159861 --- a/tools/bpf/bpftool/btf.c
159862 +++ b/tools/bpf/bpftool/btf.c
159863 @@ -538,6 +538,7 @@ static int do_dump(int argc, char **argv)
159864                         NEXT_ARG();
159865                         if (argc < 1) {
159866                                 p_err("expecting value for 'format' option\n");
159867 +                               err = -EINVAL;
159868                                 goto done;
159869                         }
159870                         if (strcmp(*argv, "c") == 0) {
159871 @@ -547,11 +548,13 @@ static int do_dump(int argc, char **argv)
159872                         } else {
159873                                 p_err("unrecognized format specifier: '%s', possible values: raw, c",
159874                                       *argv);
159875 +                               err = -EINVAL;
159876                                 goto done;
159877                         }
159878                         NEXT_ARG();
159879                 } else {
159880                         p_err("unrecognized option: '%s'", *argv);
159881 +                       err = -EINVAL;
159882                         goto done;
159883                 }
159884         }
159885 diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
159886 index b86f450e6fce..d9afb730136a 100644
159887 --- a/tools/bpf/bpftool/main.c
159888 +++ b/tools/bpf/bpftool/main.c
159889 @@ -276,7 +276,7 @@ static int do_batch(int argc, char **argv)
159890         int n_argc;
159891         FILE *fp;
159892         char *cp;
159893 -       int err;
159894 +       int err = 0;
159895         int i;
159897         if (argc < 2) {
159898 @@ -370,7 +370,6 @@ static int do_batch(int argc, char **argv)
159899         } else {
159900                 if (!json_output)
159901                         printf("processed %d commands\n", lines);
159902 -               err = 0;
159903         }
159904  err_close:
159905         if (fp != stdin)
159906 diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
159907 index b400364ee054..09ae0381205b 100644
159908 --- a/tools/bpf/bpftool/map.c
159909 +++ b/tools/bpf/bpftool/map.c
159910 @@ -100,7 +100,7 @@ static int do_dump_btf(const struct btf_dumper *d,
159911                        void *value)
159913         __u32 value_id;
159914 -       int ret;
159915 +       int ret = 0;
159917         /* start of key-value pair */
159918         jsonw_start_object(d->jw);
159919 diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
159920 index ce58cff99b66..2a6adca37fe9 100644
159921 --- a/tools/include/uapi/asm-generic/unistd.h
159922 +++ b/tools/include/uapi/asm-generic/unistd.h
159923 @@ -864,8 +864,17 @@ __SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
159924  #define __NR_mount_setattr 442
159925  __SYSCALL(__NR_mount_setattr, sys_mount_setattr)
159927 +#define __NR_futex_wait 443
159928 +__SYSCALL(__NR_futex_wait, sys_futex_wait)
159930 +#define __NR_futex_wake 444
159931 +__SYSCALL(__NR_futex_wake, sys_futex_wake)
159933 +#define __NR_futex_waitv 445
159934 +__SC_COMP(__NR_futex_waitv, sys_futex_waitv, compat_sys_futex_waitv)
159936  #undef __NR_syscalls
159937 -#define __NR_syscalls 443
159938 +#define __NR_syscalls 446
159941   * 32 bit systems traditionally used different
159942 diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
159943 index 53b3e199fb25..09ebe3db5f2f 100644
159944 --- a/tools/lib/bpf/bpf_core_read.h
159945 +++ b/tools/lib/bpf/bpf_core_read.h
159946 @@ -88,11 +88,19 @@ enum bpf_enum_value_kind {
159947         const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \
159948         unsigned long long val;                                               \
159949                                                                               \
159950 +       /* This is a so-called barrier_var() operation that makes specified   \
159951 +        * variable "a black box" for optimizing compiler.                    \
159952 +        * It forces compiler to perform BYTE_OFFSET relocation on p and use  \
159953 +        * its calculated value in the switch below, instead of applying      \
159954 +        * the same relocation 4 times for each individual memory load.       \
159955 +        */                                                                   \
159956 +       asm volatile("" : "=r"(p) : "0"(p));                                  \
159957 +                                                                             \
159958         switch (__CORE_RELO(s, field, BYTE_SIZE)) {                           \
159959 -       case 1: val = *(const unsigned char *)p;                              \
159960 -       case 2: val = *(const unsigned short *)p;                             \
159961 -       case 4: val = *(const unsigned int *)p;                               \
159962 -       case 8: val = *(const unsigned long long *)p;                         \
159963 +       case 1: val = *(const unsigned char *)p; break;                       \
159964 +       case 2: val = *(const unsigned short *)p; break;                      \
159965 +       case 4: val = *(const unsigned int *)p; break;                        \
159966 +       case 8: val = *(const unsigned long long *)p; break;                  \
159967         }                                                                     \
159968         val <<= __CORE_RELO(s, field, LSHIFT_U64);                            \
159969         if (__CORE_RELO(s, field, SIGNED))                                    \
159970 diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
159971 index f9ef37707888..1c2e91ee041d 100644
159972 --- a/tools/lib/bpf/bpf_tracing.h
159973 +++ b/tools/lib/bpf/bpf_tracing.h
159974 @@ -413,20 +413,38 @@ typeof(name(0)) name(struct pt_regs *ctx)                             \
159975  }                                                                          \
159976  static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
159978 +#define ___bpf_fill0(arr, p, x) do {} while (0)
159979 +#define ___bpf_fill1(arr, p, x) arr[p] = x
159980 +#define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args)
159981 +#define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args)
159982 +#define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args)
159983 +#define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args)
159984 +#define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args)
159985 +#define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args)
159986 +#define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args)
159987 +#define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args)
159988 +#define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args)
159989 +#define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args)
159990 +#define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args)
159991 +#define ___bpf_fill(arr, args...) \
159992 +       ___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args)
159995   * BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values
159996   * in a structure.
159997   */
159998 -#define BPF_SEQ_PRINTF(seq, fmt, args...)                                  \
159999 -       ({                                                                  \
160000 -               _Pragma("GCC diagnostic push")                              \
160001 -               _Pragma("GCC diagnostic ignored \"-Wint-conversion\"")      \
160002 -               static const char ___fmt[] = fmt;                           \
160003 -               unsigned long long ___param[] = { args };                   \
160004 -               _Pragma("GCC diagnostic pop")                               \
160005 -               int ___ret = bpf_seq_printf(seq, ___fmt, sizeof(___fmt),    \
160006 -                                           ___param, sizeof(___param));    \
160007 -               ___ret;                                                     \
160008 -       })
160009 +#define BPF_SEQ_PRINTF(seq, fmt, args...)                      \
160010 +({                                                             \
160011 +       static const char ___fmt[] = fmt;                       \
160012 +       unsigned long long ___param[___bpf_narg(args)];         \
160013 +                                                               \
160014 +       _Pragma("GCC diagnostic push")                          \
160015 +       _Pragma("GCC diagnostic ignored \"-Wint-conversion\"")  \
160016 +       ___bpf_fill(___param, args);                            \
160017 +       _Pragma("GCC diagnostic pop")                           \
160018 +                                                               \
160019 +       bpf_seq_printf(seq, ___fmt, sizeof(___fmt),             \
160020 +                      ___param, sizeof(___param));             \
160023  #endif
160024 diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
160025 index 1237bcd1dd17..5b8a6ea44b38 100644
160026 --- a/tools/lib/bpf/btf.h
160027 +++ b/tools/lib/bpf/btf.h
160028 @@ -173,6 +173,7 @@ struct btf_dump_emit_type_decl_opts {
160029         int indent_level;
160030         /* strip all the const/volatile/restrict mods */
160031         bool strip_mods;
160032 +       size_t :0;
160034  #define btf_dump_emit_type_decl_opts__last_field strip_mods
160036 diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
160037 index 3c35eb401931..3d690d4e785c 100644
160038 --- a/tools/lib/bpf/libbpf.h
160039 +++ b/tools/lib/bpf/libbpf.h
160040 @@ -507,6 +507,7 @@ struct xdp_link_info {
160041  struct bpf_xdp_set_link_opts {
160042         size_t sz;
160043         int old_fd;
160044 +       size_t :0;
160046  #define bpf_xdp_set_link_opts__last_field old_fd
160048 diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
160049 index e7a8d847161f..1d80ad4e0de8 100644
160050 --- a/tools/lib/bpf/ringbuf.c
160051 +++ b/tools/lib/bpf/ringbuf.c
160052 @@ -202,9 +202,11 @@ static inline int roundup_len(__u32 len)
160053         return (len + 7) / 8 * 8;
160056 -static int ringbuf_process_ring(struct ring* r)
160057 +static int64_t ringbuf_process_ring(struct ring* r)
160059 -       int *len_ptr, len, err, cnt = 0;
160060 +       int *len_ptr, len, err;
160061 +       /* 64-bit to avoid overflow in case of extreme application behavior */
160062 +       int64_t cnt = 0;
160063         unsigned long cons_pos, prod_pos;
160064         bool got_new_data;
160065         void *sample;
160066 @@ -244,12 +246,14 @@ static int ringbuf_process_ring(struct ring* r)
160069  /* Consume available ring buffer(s) data without event polling.
160070 - * Returns number of records consumed across all registered ring buffers, or
160071 - * negative number if any of the callbacks return error.
160072 + * Returns number of records consumed across all registered ring buffers (or
160073 + * INT_MAX, whichever is less), or negative number if any of the callbacks
160074 + * return error.
160075   */
160076  int ring_buffer__consume(struct ring_buffer *rb)
160078 -       int i, err, res = 0;
160079 +       int64_t err, res = 0;
160080 +       int i;
160082         for (i = 0; i < rb->ring_cnt; i++) {
160083                 struct ring *ring = &rb->rings[i];
160084 @@ -259,18 +263,24 @@ int ring_buffer__consume(struct ring_buffer *rb)
160085                         return err;
160086                 res += err;
160087         }
160088 +       if (res > INT_MAX)
160089 +               return INT_MAX;
160090         return res;
160093  /* Poll for available data and consume records, if any are available.
160094 - * Returns number of records consumed, or negative number, if any of the
160095 - * registered callbacks returned error.
160096 + * Returns number of records consumed (or INT_MAX, whichever is less), or
160097 + * negative number, if any of the registered callbacks returned error.
160098   */
160099  int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
160101 -       int i, cnt, err, res = 0;
160102 +       int i, cnt;
160103 +       int64_t err, res = 0;
160105         cnt = epoll_wait(rb->epoll_fd, rb->events, rb->ring_cnt, timeout_ms);
160106 +       if (cnt < 0)
160107 +               return -errno;
160109         for (i = 0; i < cnt; i++) {
160110                 __u32 ring_id = rb->events[i].data.fd;
160111                 struct ring *ring = &rb->rings[ring_id];
160112 @@ -280,7 +290,9 @@ int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
160113                         return err;
160114                 res += err;
160115         }
160116 -       return cnt < 0 ? -errno : res;
160117 +       if (res > INT_MAX)
160118 +               return INT_MAX;
160119 +       return res;
160122  /* Get an fd that can be used to sleep until data is available in the ring(s) */
160123 diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
160124 index d82054225fcc..4d0c02ba3f7d 100644
160125 --- a/tools/lib/perf/include/perf/event.h
160126 +++ b/tools/lib/perf/include/perf/event.h
160127 @@ -8,6 +8,8 @@
160128  #include <linux/bpf.h>
160129  #include <sys/types.h> /* pid_t */
160131 +#define event_contains(obj, mem) ((obj).header.size > offsetof(typeof(obj), mem))
160133  struct perf_record_mmap {
160134         struct perf_event_header header;
160135         __u32                    pid, tid;
160136 @@ -346,8 +348,9 @@ struct perf_record_time_conv {
160137         __u64                    time_zero;
160138         __u64                    time_cycles;
160139         __u64                    time_mask;
160140 -       bool                     cap_user_time_zero;
160141 -       bool                     cap_user_time_short;
160142 +       __u8                     cap_user_time_zero;
160143 +       __u8                     cap_user_time_short;
160144 +       __u8                     reserved[6];   /* For alignment */
160147  struct perf_record_header_feature {
160148 diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
160149 index d8e59d31399a..c955cd683e22 100644
160150 --- a/tools/perf/Makefile.config
160151 +++ b/tools/perf/Makefile.config
160152 @@ -530,6 +530,7 @@ ifndef NO_LIBELF
160153        ifdef LIBBPF_DYNAMIC
160154          ifeq ($(feature-libbpf), 1)
160155            EXTLIBS += -lbpf
160156 +          $(call detected,CONFIG_LIBBPF_DYNAMIC)
160157          else
160158            dummy := $(error Error: No libbpf devel library found, please install libbpf-devel);
160159          endif
160160 diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
160161 index 7bf01cbe582f..86d1b0fae558 100644
160162 --- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
160163 +++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
160164 @@ -364,6 +364,10 @@
160165  440    common  process_madvise         sys_process_madvise
160166  441    common  epoll_pwait2            sys_epoll_pwait2
160167  442    common  mount_setattr           sys_mount_setattr
160168 +443    common  futex_wait              sys_futex_wait
160169 +444    common  futex_wake              sys_futex_wake
160170 +445    common  futex_waitv             sys_futex_waitv
160171 +446    common  futex_requeue           sys_futex_requeue
160174  # Due to a historical design error, certain syscalls are numbered differently
160175 diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
160176 index eac36afab2b3..12346844b354 100644
160177 --- a/tools/perf/bench/bench.h
160178 +++ b/tools/perf/bench/bench.h
160179 @@ -38,9 +38,13 @@ int bench_mem_memcpy(int argc, const char **argv);
160180  int bench_mem_memset(int argc, const char **argv);
160181  int bench_mem_find_bit(int argc, const char **argv);
160182  int bench_futex_hash(int argc, const char **argv);
160183 +int bench_futex2_hash(int argc, const char **argv);
160184  int bench_futex_wake(int argc, const char **argv);
160185 +int bench_futex2_wake(int argc, const char **argv);
160186  int bench_futex_wake_parallel(int argc, const char **argv);
160187 +int bench_futex2_wake_parallel(int argc, const char **argv);
160188  int bench_futex_requeue(int argc, const char **argv);
160189 +int bench_futex2_requeue(int argc, const char **argv);
160190  /* pi futexes */
160191  int bench_futex_lock_pi(int argc, const char **argv);
160192  int bench_epoll_wait(int argc, const char **argv);
160193 diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c
160194 index b65373ce5c4f..1068749af40c 100644
160195 --- a/tools/perf/bench/futex-hash.c
160196 +++ b/tools/perf/bench/futex-hash.c
160197 @@ -33,7 +33,7 @@ static unsigned int nthreads = 0;
160198  static unsigned int nsecs    = 10;
160199  /* amount of futexes per thread */
160200  static unsigned int nfutexes = 1024;
160201 -static bool fshared = false, done = false, silent = false;
160202 +static bool fshared = false, done = false, silent = false, futex2 = false;
160203  static int futex_flag = 0;
160205  struct timeval bench__start, bench__end, bench__runtime;
160206 @@ -85,7 +85,10 @@ static void *workerfn(void *arg)
160207                          * such as internal waitqueue handling, thus enlarging
160208                          * the critical region protected by hb->lock.
160209                          */
160210 -                       ret = futex_wait(&w->futex[i], 1234, NULL, futex_flag);
160211 +                       if (!futex2)
160212 +                               ret = futex_wait(&w->futex[i], 1234, NULL, futex_flag);
160213 +                       else
160214 +                               ret = futex2_wait(&w->futex[i], 1234, futex_flag, NULL);
160215                         if (!silent &&
160216                             (!ret || errno != EAGAIN || errno != EWOULDBLOCK))
160217                                 warn("Non-expected futex return call");
160218 @@ -116,7 +119,7 @@ static void print_summary(void)
160219                (int)bench__runtime.tv_sec);
160222 -int bench_futex_hash(int argc, const char **argv)
160223 +static int __bench_futex_hash(int argc, const char **argv)
160225         int ret = 0;
160226         cpu_set_t cpuset;
160227 @@ -148,7 +151,9 @@ int bench_futex_hash(int argc, const char **argv)
160228         if (!worker)
160229                 goto errmem;
160231 -       if (!fshared)
160232 +       if (futex2)
160233 +               futex_flag = FUTEX_32 | (fshared * FUTEX_SHARED_FLAG);
160234 +       else if (!fshared)
160235                 futex_flag = FUTEX_PRIVATE_FLAG;
160237         printf("Run summary [PID %d]: %d threads, each operating on %d [%s] futexes for %d secs.\n\n",
160238 @@ -228,3 +233,14 @@ int bench_futex_hash(int argc, const char **argv)
160239  errmem:
160240         err(EXIT_FAILURE, "calloc");
160243 +int bench_futex_hash(int argc, const char **argv)
160245 +       return __bench_futex_hash(argc, argv);
160248 +int bench_futex2_hash(int argc, const char **argv)
160250 +       futex2 = true;
160251 +       return __bench_futex_hash(argc, argv);
160253 diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c
160254 index 5fa23295ee5f..6cdd649b54f4 100644
160255 --- a/tools/perf/bench/futex-requeue.c
160256 +++ b/tools/perf/bench/futex-requeue.c
160257 @@ -2,8 +2,8 @@
160259   * Copyright (C) 2013  Davidlohr Bueso <davidlohr@hp.com>
160260   *
160261 - * futex-requeue: Block a bunch of threads on futex1 and requeue them
160262 - *                on futex2, N at a time.
160263 + * futex-requeue: Block a bunch of threads on addr1 and requeue them
160264 + *                on addr2, N at a time.
160265   *
160266   * This program is particularly useful to measure the latency of nthread
160267   * requeues without waking up any tasks -- thus mimicking a regular futex_wait.
160268 @@ -28,7 +28,10 @@
160269  #include <stdlib.h>
160270  #include <sys/time.h>
160272 -static u_int32_t futex1 = 0, futex2 = 0;
160273 +static u_int32_t addr1 = 0, addr2 = 0;
160275 +static struct futex_requeue rq1 = { .uaddr = &addr1, .flags = FUTEX_32 };
160276 +static struct futex_requeue rq2 = { .uaddr = &addr2, .flags = FUTEX_32 };
160279   * How many tasks to requeue at a time.
160280 @@ -37,7 +40,7 @@ static u_int32_t futex1 = 0, futex2 = 0;
160281  static unsigned int nrequeue = 1;
160283  static pthread_t *worker;
160284 -static bool done = false, silent = false, fshared = false;
160285 +static bool done = false, silent = false, fshared = false, futex2 = false;
160286  static pthread_mutex_t thread_lock;
160287  static pthread_cond_t thread_parent, thread_worker;
160288  static struct stats requeuetime_stats, requeued_stats;
160289 @@ -79,7 +82,11 @@ static void *workerfn(void *arg __maybe_unused)
160290         pthread_cond_wait(&thread_worker, &thread_lock);
160291         pthread_mutex_unlock(&thread_lock);
160293 -       futex_wait(&futex1, 0, NULL, futex_flag);
160294 +       if (!futex2)
160295 +               futex_wait(&addr1, 0, NULL, futex_flag);
160296 +       else
160297 +               futex2_wait(&addr1, 0, futex_flag, NULL);
160299         return NULL;
160302 @@ -111,7 +118,7 @@ static void toggle_done(int sig __maybe_unused,
160303         done = true;
160306 -int bench_futex_requeue(int argc, const char **argv)
160307 +static int __bench_futex_requeue(int argc, const char **argv)
160309         int ret = 0;
160310         unsigned int i, j;
160311 @@ -139,15 +146,20 @@ int bench_futex_requeue(int argc, const char **argv)
160312         if (!worker)
160313                 err(EXIT_FAILURE, "calloc");
160315 -       if (!fshared)
160316 +       if (futex2) {
160317 +               futex_flag = FUTEX_32 | (fshared * FUTEX_SHARED_FLAG);
160318 +               rq1.flags |= FUTEX_SHARED_FLAG * fshared;
160319 +               rq2.flags |= FUTEX_SHARED_FLAG * fshared;
160320 +       } else if (!fshared) {
160321                 futex_flag = FUTEX_PRIVATE_FLAG;
160322 +       }
160324         if (nrequeue > nthreads)
160325                 nrequeue = nthreads;
160327         printf("Run summary [PID %d]: Requeuing %d threads (from [%s] %p to %p), "
160328                "%d at a time.\n\n",  getpid(), nthreads,
160329 -              fshared ? "shared":"private", &futex1, &futex2, nrequeue);
160330 +              fshared ? "shared":"private", &addr1, &addr2, nrequeue);
160332         init_stats(&requeued_stats);
160333         init_stats(&requeuetime_stats);
160334 @@ -176,11 +188,15 @@ int bench_futex_requeue(int argc, const char **argv)
160335                 gettimeofday(&start, NULL);
160336                 while (nrequeued < nthreads) {
160337                         /*
160338 -                        * Do not wakeup any tasks blocked on futex1, allowing
160339 +                        * Do not wakeup any tasks blocked on addr1, allowing
160340                          * us to really measure futex_wait functionality.
160341                          */
160342 -                       nrequeued += futex_cmp_requeue(&futex1, 0, &futex2, 0,
160343 -                                                      nrequeue, futex_flag);
160344 +                       if (!futex2)
160345 +                               nrequeued += futex_cmp_requeue(&addr1, 0, &addr2,
160346 +                                                       0, nrequeue, futex_flag);
160347 +                       else
160348 +                               nrequeued += futex2_requeue(&rq1, &rq2,
160349 +                                                       0, nrequeue, 0, 0);
160350                 }
160352                 gettimeofday(&end, NULL);
160353 @@ -194,8 +210,12 @@ int bench_futex_requeue(int argc, const char **argv)
160354                                j + 1, nrequeued, nthreads, runtime.tv_usec / (double)USEC_PER_MSEC);
160355                 }
160357 -               /* everybody should be blocked on futex2, wake'em up */
160358 -               nrequeued = futex_wake(&futex2, nrequeued, futex_flag);
160359 +               /* everybody should be blocked on addr2, wake'em up */
160360 +               if (!futex2)
160361 +                       nrequeued = futex_wake(&addr2, nrequeued, futex_flag);
160362 +               else
160363 +                       nrequeued = futex2_wake(&addr2, nrequeued, futex_flag);
160365                 if (nthreads != nrequeued)
160366                         warnx("couldn't wakeup all tasks (%d/%d)", nrequeued, nthreads);
160368 @@ -220,3 +240,14 @@ int bench_futex_requeue(int argc, const char **argv)
160369         usage_with_options(bench_futex_requeue_usage, options);
160370         exit(EXIT_FAILURE);
160373 +int bench_futex_requeue(int argc, const char **argv)
160375 +       return __bench_futex_requeue(argc, argv);
160378 +int bench_futex2_requeue(int argc, const char **argv)
160380 +       futex2 = true;
160381 +       return __bench_futex_requeue(argc, argv);
160383 diff --git a/tools/perf/bench/futex-wake-parallel.c b/tools/perf/bench/futex-wake-parallel.c
160384 index 6e6f5247e1fe..cac90fc0bfb3 100644
160385 --- a/tools/perf/bench/futex-wake-parallel.c
160386 +++ b/tools/perf/bench/futex-wake-parallel.c
160387 @@ -17,6 +17,12 @@ int bench_futex_wake_parallel(int argc __maybe_unused, const char **argv __maybe
160388         pr_err("%s: pthread_barrier_t unavailable, disabling this test...\n", __func__);
160389         return 0;
160392 +int bench_futex2_wake_parallel(int argc __maybe_unused, const char **argv __maybe_unused)
160394 +       pr_err("%s: pthread_barrier_t unavailable, disabling this test...\n", __func__);
160395 +       return 0;
160397  #else /* HAVE_PTHREAD_BARRIER */
160398  /* For the CLR_() macros */
160399  #include <string.h>
160400 @@ -47,7 +53,7 @@ static unsigned int nwakes = 1;
160401  static u_int32_t futex = 0;
160403  static pthread_t *blocked_worker;
160404 -static bool done = false, silent = false, fshared = false;
160405 +static bool done = false, silent = false, fshared = false, futex2 = false;
160406  static unsigned int nblocked_threads = 0, nwaking_threads = 0;
160407  static pthread_mutex_t thread_lock;
160408  static pthread_cond_t thread_parent, thread_worker;
160409 @@ -78,7 +84,11 @@ static void *waking_workerfn(void *arg)
160411         gettimeofday(&start, NULL);
160413 -       waker->nwoken = futex_wake(&futex, nwakes, futex_flag);
160414 +       if (!futex2)
160415 +               waker->nwoken = futex_wake(&futex, nwakes, futex_flag);
160416 +       else
160417 +               waker->nwoken = futex2_wake(&futex, nwakes, futex_flag);
160419         if (waker->nwoken != nwakes)
160420                 warnx("couldn't wakeup all tasks (%d/%d)",
160421                       waker->nwoken, nwakes);
160422 @@ -129,8 +139,13 @@ static void *blocked_workerfn(void *arg __maybe_unused)
160423         pthread_mutex_unlock(&thread_lock);
160425         while (1) { /* handle spurious wakeups */
160426 -               if (futex_wait(&futex, 0, NULL, futex_flag) != EINTR)
160427 -                       break;
160428 +               if (!futex2) {
160429 +                       if (futex_wait(&futex, 0, NULL, futex_flag) != EINTR)
160430 +                               break;
160431 +               } else {
160432 +                       if (futex2_wait(&futex, 0, futex_flag, NULL) != EINTR)
160433 +                               break;
160434 +               }
160435         }
160437         pthread_exit(NULL);
160438 @@ -217,7 +232,7 @@ static void toggle_done(int sig __maybe_unused,
160439         done = true;
160442 -int bench_futex_wake_parallel(int argc, const char **argv)
160443 +static int __bench_futex_wake_parallel(int argc, const char **argv)
160445         int ret = 0;
160446         unsigned int i, j;
160447 @@ -261,7 +276,9 @@ int bench_futex_wake_parallel(int argc, const char **argv)
160448         if (!blocked_worker)
160449                 err(EXIT_FAILURE, "calloc");
160451 -       if (!fshared)
160452 +       if (futex2)
160453 +               futex_flag = FUTEX_32 | (fshared * FUTEX_SHARED_FLAG);
160454 +       else if (!fshared)
160455                 futex_flag = FUTEX_PRIVATE_FLAG;
160457         printf("Run summary [PID %d]: blocking on %d threads (at [%s] "
160458 @@ -321,4 +338,16 @@ int bench_futex_wake_parallel(int argc, const char **argv)
160459         free(blocked_worker);
160460         return ret;
160463 +int bench_futex_wake_parallel(int argc, const char **argv)
160465 +       return __bench_futex_wake_parallel(argc, argv);
160468 +int bench_futex2_wake_parallel(int argc, const char **argv)
160470 +       futex2 = true;
160471 +       return __bench_futex_wake_parallel(argc, argv);
160474  #endif /* HAVE_PTHREAD_BARRIER */
160475 diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c
160476 index 6d217868f53c..546d2818eed8 100644
160477 --- a/tools/perf/bench/futex-wake.c
160478 +++ b/tools/perf/bench/futex-wake.c
160479 @@ -38,7 +38,7 @@ static u_int32_t futex1 = 0;
160480  static unsigned int nwakes = 1;
160482  pthread_t *worker;
160483 -static bool done = false, silent = false, fshared = false;
160484 +static bool done = false, silent = false, fshared = false, futex2 = false;
160485  static pthread_mutex_t thread_lock;
160486  static pthread_cond_t thread_parent, thread_worker;
160487  static struct stats waketime_stats, wakeup_stats;
160488 @@ -68,8 +68,13 @@ static void *workerfn(void *arg __maybe_unused)
160489         pthread_mutex_unlock(&thread_lock);
160491         while (1) {
160492 -               if (futex_wait(&futex1, 0, NULL, futex_flag) != EINTR)
160493 -                       break;
160494 +               if (!futex2) {
160495 +                       if (futex_wait(&futex1, 0, NULL, futex_flag) != EINTR)
160496 +                               break;
160497 +               } else {
160498 +                       if (futex2_wait(&futex1, 0, futex_flag, NULL) != EINTR)
160499 +                               break;
160500 +               }
160501         }
160503         pthread_exit(NULL);
160504 @@ -117,7 +122,7 @@ static void toggle_done(int sig __maybe_unused,
160505         done = true;
160508 -int bench_futex_wake(int argc, const char **argv)
160509 +static int __bench_futex_wake(int argc, const char **argv)
160511         int ret = 0;
160512         unsigned int i, j;
160513 @@ -147,7 +152,9 @@ int bench_futex_wake(int argc, const char **argv)
160514         if (!worker)
160515                 err(EXIT_FAILURE, "calloc");
160517 -       if (!fshared)
160518 +       if (futex2)
160519 +               futex_flag = FUTEX_32 | (fshared * FUTEX_SHARED_FLAG);
160520 +       else if (!fshared)
160521                 futex_flag = FUTEX_PRIVATE_FLAG;
160523         printf("Run summary [PID %d]: blocking on %d threads (at [%s] futex %p), "
160524 @@ -179,9 +186,14 @@ int bench_futex_wake(int argc, const char **argv)
160526                 /* Ok, all threads are patiently blocked, start waking folks up */
160527                 gettimeofday(&start, NULL);
160528 -               while (nwoken != nthreads)
160529 -                       nwoken += futex_wake(&futex1, nwakes, futex_flag);
160530 +               while (nwoken != nthreads) {
160531 +                       if (!futex2)
160532 +                               nwoken += futex_wake(&futex1, nwakes, futex_flag);
160533 +                       else
160534 +                               nwoken += futex2_wake(&futex1, nwakes, futex_flag);
160535 +               }
160536                 gettimeofday(&end, NULL);
160538                 timersub(&end, &start, &runtime);
160540                 update_stats(&wakeup_stats, nwoken);
160541 @@ -211,3 +223,14 @@ int bench_futex_wake(int argc, const char **argv)
160542         free(worker);
160543         return ret;
160546 +int bench_futex_wake(int argc, const char **argv)
160548 +       return __bench_futex_wake(argc, argv);
160551 +int bench_futex2_wake(int argc, const char **argv)
160553 +       futex2 = true;
160554 +       return __bench_futex_wake(argc, argv);
160556 diff --git a/tools/perf/bench/futex.h b/tools/perf/bench/futex.h
160557 index 31b53cc7d5bc..6b2213cf3f64 100644
160558 --- a/tools/perf/bench/futex.h
160559 +++ b/tools/perf/bench/futex.h
160560 @@ -86,4 +86,51 @@ futex_cmp_requeue(u_int32_t *uaddr, u_int32_t val, u_int32_t *uaddr2, int nr_wak
160561         return futex(uaddr, FUTEX_CMP_REQUEUE, nr_wake, nr_requeue, uaddr2,
160562                  val, opflags);
160566 + * futex2_wait - Wait at uaddr if *uaddr == val, until timo.
160567 + * @uaddr: User address to wait for
160568 + * @val:   Expected value at uaddr
160569 + * @flags: Operation options
160570 + * @timo:  Optional timeout
160572 + * Return: 0 on success, error code otherwise
160573 + */
160574 +static inline int futex2_wait(volatile void *uaddr, unsigned long val,
160575 +                             unsigned long flags, struct timespec *timo)
160577 +       return syscall(__NR_futex_wait, uaddr, val, flags, timo);
160581 + * futex2_wake - Wake a number of waiters waiting at uaddr
160582 + * @uaddr: Address to wake
160583 + * @nr:    Number of waiters to wake
160584 + * @flags: Operation options
160586 + * Return: number of waked futexes
160587 + */
160588 +static inline int futex2_wake(volatile void *uaddr, unsigned int nr, unsigned long flags)
160590 +       return syscall(__NR_futex_wake, uaddr, nr, flags);
160594 + * futex2_requeue - Requeue waiters from an address to another one
160595 + * @uaddr1:     Address where waiters are currently waiting on
160596 + * @uaddr2:     New address to wait
160597 + * @nr_wake:    Number of waiters at uaddr1 to be wake
160598 + * @nr_requeue: After waking nr_wake, number of waiters to be requeued
160599 + * @cmpval:     Expected value at uaddr1
160600 + * @flags: Operation options
160602 + * Return: waked futexes + requeued futexes at uaddr1
160603 + */
160604 +static inline int futex2_requeue(volatile struct futex_requeue *uaddr1,
160605 +                                volatile struct futex_requeue *uaddr2,
160606 +                                unsigned int nr_wake, unsigned int nr_requeue,
160607 +                                unsigned int cmpval, unsigned long flags)
160609 +       return syscall(__NR_futex_requeue, uaddr1, uaddr2, nr_wake, nr_requeue, cmpval, flags);
160611  #endif /* _FUTEX_H */
160612 diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
160613 index 62a7b7420a44..e41a95ad2db6 100644
160614 --- a/tools/perf/builtin-bench.c
160615 +++ b/tools/perf/builtin-bench.c
160616 @@ -12,10 +12,11 @@
160617   *
160618   *  sched ... scheduler and IPC performance
160619   *  syscall ... System call performance
160620 - *  mem   ... memory access performance
160621 - *  numa  ... NUMA scheduling and MM performance
160622 - *  futex ... Futex performance
160623 - *  epoll ... Event poll performance
160624 + *  mem    ... memory access performance
160625 + *  numa   ... NUMA scheduling and MM performance
160626 + *  futex  ... Futex performance
160627 + *  futex2 ... Futex2 performance
160628 + *  epoll  ... Event poll performance
160629   */
160630  #include <subcmd/parse-options.h>
160631  #include "builtin.h"
160632 @@ -75,6 +76,14 @@ static struct bench futex_benchmarks[] = {
160633         { NULL,         NULL,                                           NULL                    }
160636 +static struct bench futex2_benchmarks[] = {
160637 +       { "hash",          "Benchmark for futex2 hash table",            bench_futex2_hash      },
160638 +       { "wake",          "Benchmark for futex2 wake calls",            bench_futex2_wake      },
160639 +       { "wake-parallel", "Benchmark for parallel futex2 wake calls",   bench_futex2_wake_parallel },
160640 +       { "requeue",       "Benchmark for futex2 requeue calls",         bench_futex2_requeue   },
160641 +       { NULL,         NULL,                                           NULL                    }
160644  #ifdef HAVE_EVENTFD_SUPPORT
160645  static struct bench epoll_benchmarks[] = {
160646         { "wait",       "Benchmark epoll concurrent epoll_waits",       bench_epoll_wait        },
160647 @@ -105,6 +114,7 @@ static struct collection collections[] = {
160648         { "numa",       "NUMA scheduling and MM benchmarks",            numa_benchmarks         },
160649  #endif
160650         {"futex",       "Futex stressing benchmarks",                   futex_benchmarks        },
160651 +       {"futex2",      "Futex2 stressing benchmarks",                  futex2_benchmarks        },
160652  #ifdef HAVE_EVENTFD_SUPPORT
160653         {"epoll",       "Epoll stressing benchmarks",                   epoll_benchmarks        },
160654  #endif
160655 diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/cache.json b/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
160656 index 4ea7ec4f496e..008f1683e540 100644
160657 --- a/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
160658 +++ b/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
160659 @@ -275,7 +275,7 @@
160660    {
160661      "EventName": "l2_pf_hit_l2",
160662      "EventCode": "0x70",
160663 -    "BriefDescription": "L2 prefetch hit in L2.",
160664 +    "BriefDescription": "L2 prefetch hit in L2. Use l2_cache_hits_from_l2_hwpf instead.",
160665      "UMask": "0xff"
160666    },
160667    {
160668 diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
160669 index 2cfe2d2f3bfd..3c954543d1ae 100644
160670 --- a/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
160671 +++ b/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
160672 @@ -79,10 +79,10 @@
160673      "UMask": "0x70"
160674    },
160675    {
160676 -    "MetricName": "l2_cache_hits_from_l2_hwpf",
160677 +    "EventName": "l2_cache_hits_from_l2_hwpf",
160678 +    "EventCode": "0x70",
160679      "BriefDescription": "L2 Cache Hits from L2 HWPF",
160680 -    "MetricExpr": "l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
160681 -    "MetricGroup": "l2_cache"
160682 +    "UMask": "0xff"
160683    },
160684    {
160685      "EventName": "l3_accesses",
160686 diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/cache.json b/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
160687 index f61b982f83ca..8ba84a48188d 100644
160688 --- a/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
160689 +++ b/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
160690 @@ -205,7 +205,7 @@
160691    {
160692      "EventName": "l2_pf_hit_l2",
160693      "EventCode": "0x70",
160694 -    "BriefDescription": "L2 prefetch hit in L2.",
160695 +    "BriefDescription": "L2 prefetch hit in L2. Use l2_cache_hits_from_l2_hwpf instead.",
160696      "UMask": "0xff"
160697    },
160698    {
160699 diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
160700 index 2ef91e25e661..1c624cee9ef4 100644
160701 --- a/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
160702 +++ b/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
160703 @@ -79,10 +79,10 @@
160704      "UMask": "0x70"
160705    },
160706    {
160707 -    "MetricName": "l2_cache_hits_from_l2_hwpf",
160708 +    "EventName": "l2_cache_hits_from_l2_hwpf",
160709 +    "EventCode": "0x70",
160710      "BriefDescription": "L2 Cache Hits from L2 HWPF",
160711 -    "MetricExpr": "l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
160712 -    "MetricGroup": "l2_cache"
160713 +    "UMask": "0xff"
160714    },
160715    {
160716      "EventName": "l3_accesses",
160717 diff --git a/tools/perf/trace/beauty/fsconfig.sh b/tools/perf/trace/beauty/fsconfig.sh
160718 index 83fb24df05c9..bc6ef7bb7a5f 100755
160719 --- a/tools/perf/trace/beauty/fsconfig.sh
160720 +++ b/tools/perf/trace/beauty/fsconfig.sh
160721 @@ -10,8 +10,7 @@ fi
160722  linux_mount=${linux_header_dir}/mount.h
160724  printf "static const char *fsconfig_cmds[] = {\n"
160725 -regex='^[[:space:]]*+FSCONFIG_([[:alnum:]_]+)[[:space:]]*=[[:space:]]*([[:digit:]]+)[[:space:]]*,[[:space:]]*.*'
160726 -egrep $regex ${linux_mount} | \
160727 -       sed -r "s/$regex/\2 \1/g"       | \
160728 -       xargs printf "\t[%s] = \"%s\",\n"
160729 +ms='[[:space:]]*'
160730 +sed -nr "s/^${ms}FSCONFIG_([[:alnum:]_]+)${ms}=${ms}([[:digit:]]+)${ms},.*/\t[\2] = \"\1\",/p" \
160731 +       ${linux_mount}
160732  printf "};\n"
160733 diff --git a/tools/perf/util/Build b/tools/perf/util/Build
160734 index e3e12f9d4733..5a296ac69415 100644
160735 --- a/tools/perf/util/Build
160736 +++ b/tools/perf/util/Build
160737 @@ -141,7 +141,14 @@ perf-$(CONFIG_LIBELF) += symbol-elf.o
160738  perf-$(CONFIG_LIBELF) += probe-file.o
160739  perf-$(CONFIG_LIBELF) += probe-event.o
160741 +ifdef CONFIG_LIBBPF_DYNAMIC
160742 +  hashmap := 1
160743 +endif
160744  ifndef CONFIG_LIBBPF
160745 +  hashmap := 1
160746 +endif
160748 +ifdef hashmap
160749  perf-y += hashmap.o
160750  endif
160752 diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
160753 index 9760d8e7b386..917a9c707371 100644
160754 --- a/tools/perf/util/jitdump.c
160755 +++ b/tools/perf/util/jitdump.c
160756 @@ -396,21 +396,31 @@ static pid_t jr_entry_tid(struct jit_buf_desc *jd, union jr_entry *jr)
160758  static uint64_t convert_timestamp(struct jit_buf_desc *jd, uint64_t timestamp)
160760 -       struct perf_tsc_conversion tc;
160761 +       struct perf_tsc_conversion tc = { .time_shift = 0, };
160762 +       struct perf_record_time_conv *time_conv = &jd->session->time_conv;
160764         if (!jd->use_arch_timestamp)
160765                 return timestamp;
160767 -       tc.time_shift          = jd->session->time_conv.time_shift;
160768 -       tc.time_mult           = jd->session->time_conv.time_mult;
160769 -       tc.time_zero           = jd->session->time_conv.time_zero;
160770 -       tc.time_cycles         = jd->session->time_conv.time_cycles;
160771 -       tc.time_mask           = jd->session->time_conv.time_mask;
160772 -       tc.cap_user_time_zero  = jd->session->time_conv.cap_user_time_zero;
160773 -       tc.cap_user_time_short = jd->session->time_conv.cap_user_time_short;
160774 +       tc.time_shift = time_conv->time_shift;
160775 +       tc.time_mult  = time_conv->time_mult;
160776 +       tc.time_zero  = time_conv->time_zero;
160778 -       if (!tc.cap_user_time_zero)
160779 -               return 0;
160780 +       /*
160781 +        * The event TIME_CONV was extended for the fields from "time_cycles"
160782 +        * when supported cap_user_time_short, for backward compatibility,
160783 +        * checks the event size and assigns these extended fields if these
160784 +        * fields are contained in the event.
160785 +        */
160786 +       if (event_contains(*time_conv, time_cycles)) {
160787 +               tc.time_cycles         = time_conv->time_cycles;
160788 +               tc.time_mask           = time_conv->time_mask;
160789 +               tc.cap_user_time_zero  = time_conv->cap_user_time_zero;
160790 +               tc.cap_user_time_short = time_conv->cap_user_time_short;
160792 +               if (!tc.cap_user_time_zero)
160793 +                       return 0;
160794 +       }
160796         return tsc_to_perf_time(timestamp, &tc);
160798 diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
160799 index 859832a82496..e9d4e6f4bdf3 100644
160800 --- a/tools/perf/util/session.c
160801 +++ b/tools/perf/util/session.c
160802 @@ -949,6 +949,19 @@ static void perf_event__stat_round_swap(union perf_event *event,
160803         event->stat_round.time = bswap_64(event->stat_round.time);
160806 +static void perf_event__time_conv_swap(union perf_event *event,
160807 +                                      bool sample_id_all __maybe_unused)
160809 +       event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
160810 +       event->time_conv.time_mult  = bswap_64(event->time_conv.time_mult);
160811 +       event->time_conv.time_zero  = bswap_64(event->time_conv.time_zero);
160813 +       if (event_contains(event->time_conv, time_cycles)) {
160814 +               event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
160815 +               event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
160816 +       }
160819  typedef void (*perf_event__swap_op)(union perf_event *event,
160820                                     bool sample_id_all);
160822 @@ -985,7 +998,7 @@ static perf_event__swap_op perf_event__swap_ops[] = {
160823         [PERF_RECORD_STAT]                = perf_event__stat_swap,
160824         [PERF_RECORD_STAT_ROUND]          = perf_event__stat_round_swap,
160825         [PERF_RECORD_EVENT_UPDATE]        = perf_event__event_update_swap,
160826 -       [PERF_RECORD_TIME_CONV]           = perf_event__all64_swap,
160827 +       [PERF_RECORD_TIME_CONV]           = perf_event__time_conv_swap,
160828         [PERF_RECORD_HEADER_MAX]          = NULL,
160831 diff --git a/tools/perf/util/symbol_fprintf.c b/tools/perf/util/symbol_fprintf.c
160832 index 35c936ce33ef..2664fb65e47a 100644
160833 --- a/tools/perf/util/symbol_fprintf.c
160834 +++ b/tools/perf/util/symbol_fprintf.c
160835 @@ -68,7 +68,7 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso,
160837         for (nd = rb_first_cached(&dso->symbol_names); nd; nd = rb_next(nd)) {
160838                 pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
160839 -               fprintf(fp, "%s\n", pos->sym.name);
160840 +               ret += fprintf(fp, "%s\n", pos->sym.name);
160841         }
160843         return ret;
160844 diff --git a/tools/power/x86/intel-speed-select/isst-display.c b/tools/power/x86/intel-speed-select/isst-display.c
160845 index 8e54ce47648e..3bf1820c0da1 100644
160846 --- a/tools/power/x86/intel-speed-select/isst-display.c
160847 +++ b/tools/power/x86/intel-speed-select/isst-display.c
160848 @@ -25,10 +25,14 @@ static void printcpulist(int str_len, char *str, int mask_size,
160849                         index = snprintf(&str[curr_index],
160850                                          str_len - curr_index, ",");
160851                         curr_index += index;
160852 +                       if (curr_index >= str_len)
160853 +                               break;
160854                 }
160855                 index = snprintf(&str[curr_index], str_len - curr_index, "%d",
160856                                  i);
160857                 curr_index += index;
160858 +               if (curr_index >= str_len)
160859 +                       break;
160860                 first = 0;
160861         }
160863 @@ -64,10 +68,14 @@ static void printcpumask(int str_len, char *str, int mask_size,
160864                 index = snprintf(&str[curr_index], str_len - curr_index, "%08x",
160865                                  mask[i]);
160866                 curr_index += index;
160867 +               if (curr_index >= str_len)
160868 +                       break;
160869                 if (i) {
160870                         strncat(&str[curr_index], ",", str_len - curr_index);
160871                         curr_index++;
160872                 }
160873 +               if (curr_index >= str_len)
160874 +                       break;
160875         }
160877         free(mask);
160878 @@ -185,7 +193,7 @@ static void _isst_pbf_display_information(int cpu, FILE *outf, int level,
160879                                           int disp_level)
160881         char header[256];
160882 -       char value[256];
160883 +       char value[512];
160885         snprintf(header, sizeof(header), "speed-select-base-freq-properties");
160886         format_and_print(outf, disp_level, header, NULL);
160887 @@ -349,7 +357,7 @@ void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
160888                                    struct isst_pkg_ctdp *pkg_dev)
160890         char header[256];
160891 -       char value[256];
160892 +       char value[512];
160893         static int level;
160894         int i;
160896 diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
160897 index a7c4f0772e53..002697021474 100644
160898 --- a/tools/power/x86/turbostat/turbostat.c
160899 +++ b/tools/power/x86/turbostat/turbostat.c
160900 @@ -291,13 +291,16 @@ struct msr_sum_array {
160901  /* The percpu MSR sum array.*/
160902  struct msr_sum_array *per_cpu_msr_sum;
160904 -int idx_to_offset(int idx)
160905 +off_t idx_to_offset(int idx)
160907 -       int offset;
160908 +       off_t offset;
160910         switch (idx) {
160911         case IDX_PKG_ENERGY:
160912 -               offset = MSR_PKG_ENERGY_STATUS;
160913 +               if (do_rapl & RAPL_AMD_F17H)
160914 +                       offset = MSR_PKG_ENERGY_STAT;
160915 +               else
160916 +                       offset = MSR_PKG_ENERGY_STATUS;
160917                 break;
160918         case IDX_DRAM_ENERGY:
160919                 offset = MSR_DRAM_ENERGY_STATUS;
160920 @@ -320,12 +323,13 @@ int idx_to_offset(int idx)
160921         return offset;
160924 -int offset_to_idx(int offset)
160925 +int offset_to_idx(off_t offset)
160927         int idx;
160929         switch (offset) {
160930         case MSR_PKG_ENERGY_STATUS:
160931 +       case MSR_PKG_ENERGY_STAT:
160932                 idx = IDX_PKG_ENERGY;
160933                 break;
160934         case MSR_DRAM_ENERGY_STATUS:
160935 @@ -353,7 +357,7 @@ int idx_valid(int idx)
160937         switch (idx) {
160938         case IDX_PKG_ENERGY:
160939 -               return do_rapl & RAPL_PKG;
160940 +               return do_rapl & (RAPL_PKG | RAPL_AMD_F17H);
160941         case IDX_DRAM_ENERGY:
160942                 return do_rapl & RAPL_DRAM;
160943         case IDX_PP0_ENERGY:
160944 @@ -3272,7 +3276,7 @@ static int update_msr_sum(struct thread_data *t, struct core_data *c, struct pkg
160946         for (i = IDX_PKG_ENERGY; i < IDX_COUNT; i++) {
160947                 unsigned long long msr_cur, msr_last;
160948 -               int offset;
160949 +               off_t offset;
160951                 if (!idx_valid(i))
160952                         continue;
160953 @@ -3281,7 +3285,8 @@ static int update_msr_sum(struct thread_data *t, struct core_data *c, struct pkg
160954                         continue;
160955                 ret = get_msr(cpu, offset, &msr_cur);
160956                 if (ret) {
160957 -                       fprintf(outf, "Can not update msr(0x%x)\n", offset);
160958 +                       fprintf(outf, "Can not update msr(0x%llx)\n",
160959 +                               (unsigned long long)offset);
160960                         continue;
160961                 }
160963 @@ -4817,33 +4822,12 @@ double discover_bclk(unsigned int family, unsigned int model)
160964   * below this value, including the Digital Thermal Sensor (DTS),
160965   * Package Thermal Management Sensor (PTM), and thermal event thresholds.
160966   */
160967 -int read_tcc_activation_temp()
160968 +int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
160970         unsigned long long msr;
160971 -       unsigned int tcc, target_c, offset_c;
160973 -       /* Temperature Target MSR is Nehalem and newer only */
160974 -       if (!do_nhm_platform_info)
160975 -               return 0;
160977 -       if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
160978 -               return 0;
160980 -       target_c = (msr >> 16) & 0xFF;
160982 -       offset_c = (msr >> 24) & 0xF;
160984 -       tcc = target_c - offset_c;
160986 -       if (!quiet)
160987 -               fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C) (%d default - %d offset)\n",
160988 -                       base_cpu, msr, tcc, target_c, offset_c);
160990 -       return tcc;
160992 +       unsigned int target_c_local;
160993 +       int cpu;
160995 -int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
160997         /* tcc_activation_temp is used only for dts or ptm */
160998         if (!(do_dts || do_ptm))
160999                 return 0;
161000 @@ -4852,18 +4836,43 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk
161001         if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
161002                 return 0;
161004 +       cpu = t->cpu_id;
161005 +       if (cpu_migrate(cpu)) {
161006 +               fprintf(outf, "Could not migrate to CPU %d\n", cpu);
161007 +               return -1;
161008 +       }
161010         if (tcc_activation_temp_override != 0) {
161011                 tcc_activation_temp = tcc_activation_temp_override;
161012 -               fprintf(outf, "Using cmdline TCC Target (%d C)\n", tcc_activation_temp);
161013 +               fprintf(outf, "cpu%d: Using cmdline TCC Target (%d C)\n",
161014 +                       cpu, tcc_activation_temp);
161015                 return 0;
161016         }
161018 -       tcc_activation_temp = read_tcc_activation_temp();
161019 -       if (tcc_activation_temp)
161020 -               return 0;
161021 +       /* Temperature Target MSR is Nehalem and newer only */
161022 +       if (!do_nhm_platform_info)
161023 +               goto guess;
161025 +       if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
161026 +               goto guess;
161028 +       target_c_local = (msr >> 16) & 0xFF;
161030 +       if (!quiet)
161031 +               fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
161032 +                       cpu, msr, target_c_local);
161034 +       if (!target_c_local)
161035 +               goto guess;
161037 +       tcc_activation_temp = target_c_local;
161039 +       return 0;
161041 +guess:
161042         tcc_activation_temp = TJMAX_DEFAULT;
161043 -       fprintf(outf, "Guessing tjMax %d C, Please use -T to specify\n", tcc_activation_temp);
161044 +       fprintf(outf, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n",
161045 +               cpu, tcc_activation_temp);
161047         return 0;
161049 diff --git a/tools/spi/Makefile b/tools/spi/Makefile
161050 index ada881afb489..0aa6dbd31fb8 100644
161051 --- a/tools/spi/Makefile
161052 +++ b/tools/spi/Makefile
161053 @@ -25,11 +25,12 @@ include $(srctree)/tools/build/Makefile.include
161055  # We need the following to be outside of kernel tree
161057 -$(OUTPUT)include/linux/spi/spidev.h: ../../include/uapi/linux/spi/spidev.h
161058 +$(OUTPUT)include/linux/spi: ../../include/uapi/linux/spi
161059         mkdir -p $(OUTPUT)include/linux/spi 2>&1 || true
161060         ln -sf $(CURDIR)/../../include/uapi/linux/spi/spidev.h $@
161061 +       ln -sf $(CURDIR)/../../include/uapi/linux/spi/spi.h $@
161063 -prepare: $(OUTPUT)include/linux/spi/spidev.h
161064 +prepare: $(OUTPUT)include/linux/spi
161067  # spidev_test
161068 diff --git a/tools/testing/selftests/arm64/mte/Makefile b/tools/testing/selftests/arm64/mte/Makefile
161069 index 0b3af552632a..df15d44aeb8d 100644
161070 --- a/tools/testing/selftests/arm64/mte/Makefile
161071 +++ b/tools/testing/selftests/arm64/mte/Makefile
161072 @@ -6,9 +6,7 @@ SRCS := $(filter-out mte_common_util.c,$(wildcard *.c))
161073  PROGS := $(patsubst %.c,%,$(SRCS))
161075  #Add mte compiler option
161076 -ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep gcc),)
161077  CFLAGS += -march=armv8.5-a+memtag
161078 -endif
161080  #check if the compiler works well
161081  mte_cc_support := $(shell if ($(CC) $(CFLAGS) -E -x c /dev/null -o /dev/null 2>&1) then echo "1"; fi)
161082 diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.c b/tools/testing/selftests/arm64/mte/mte_common_util.c
161083 index 39f8908988ea..70665ba88cbb 100644
161084 --- a/tools/testing/selftests/arm64/mte/mte_common_util.c
161085 +++ b/tools/testing/selftests/arm64/mte/mte_common_util.c
161086 @@ -278,22 +278,13 @@ int mte_switch_mode(int mte_option, unsigned long incl_mask)
161087         return 0;
161090 -#define ID_AA64PFR1_MTE_SHIFT          8
161091 -#define ID_AA64PFR1_MTE                        2
161093  int mte_default_setup(void)
161095 -       unsigned long hwcaps = getauxval(AT_HWCAP);
161096 +       unsigned long hwcaps2 = getauxval(AT_HWCAP2);
161097         unsigned long en = 0;
161098         int ret;
161100 -       if (!(hwcaps & HWCAP_CPUID)) {
161101 -               ksft_print_msg("FAIL: CPUID registers unavailable\n");
161102 -               return KSFT_FAIL;
161103 -       }
161104 -       /* Read ID_AA64PFR1_EL1 register */
161105 -       asm volatile("mrs %0, id_aa64pfr1_el1" : "=r"(hwcaps) : : "memory");
161106 -       if (((hwcaps >> ID_AA64PFR1_MTE_SHIFT) & MT_TAG_MASK) != ID_AA64PFR1_MTE) {
161107 +       if (!(hwcaps2 & HWCAP2_MTE)) {
161108                 ksft_print_msg("FAIL: MTE features unavailable\n");
161109                 return KSFT_SKIP;
161110         }
161111 diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
161112 index 044bfdcf5b74..76a325862119 100644
161113 --- a/tools/testing/selftests/bpf/Makefile
161114 +++ b/tools/testing/selftests/bpf/Makefile
161115 @@ -221,7 +221,7 @@ $(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile)                \
161116                     DESTDIR=$(HOST_SCRATCH_DIR)/ prefix= all install_headers
161117  endif
161119 -$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) | $(BPFTOOL) $(INCLUDE_DIR)
161120 +$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) | $(INCLUDE_DIR)
161121  ifeq ($(VMLINUX_H),)
161122         $(call msg,GEN,,$@)
161123         $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
161124 @@ -346,7 +346,8 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o:                         \
161126  $(TRUNNER_BPF_SKELS): $(TRUNNER_OUTPUT)/%.skel.h:                      \
161127                       $(TRUNNER_OUTPUT)/%.o                             \
161128 -                     | $(BPFTOOL) $(TRUNNER_OUTPUT)
161129 +                     $(BPFTOOL)                                        \
161130 +                     | $(TRUNNER_OUTPUT)
161131         $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
161132         $(Q)$$(BPFTOOL) gen skeleton $$< > $$@
161133  endif
161134 diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
161135 index 06eb956ff7bb..4b517d76257d 100644
161136 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
161137 +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
161138 @@ -210,11 +210,6 @@ static int duration = 0;
161139         .bpf_obj_file = "test_core_reloc_existence.o",                  \
161140         .btf_src_file = "btf__core_reloc_" #name ".o"                   \
161142 -#define FIELD_EXISTS_ERR_CASE(name) {                                  \
161143 -       FIELD_EXISTS_CASE_COMMON(name),                                 \
161144 -       .fails = true,                                                  \
161147  #define BITFIELDS_CASE_COMMON(objfile, test_name_prefix,  name)                \
161148         .case_name = test_name_prefix#name,                             \
161149         .bpf_obj_file = objfile,                                        \
161150 @@ -222,7 +217,7 @@ static int duration = 0;
161152  #define BITFIELDS_CASE(name, ...) {                                    \
161153         BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o",     \
161154 -                             "direct:", name),                         \
161155 +                             "probed:", name),                         \
161156         .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__,     \
161157         .input_len = sizeof(struct core_reloc_##name),                  \
161158         .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output)       \
161159 @@ -230,7 +225,7 @@ static int duration = 0;
161160         .output_len = sizeof(struct core_reloc_bitfields_output),       \
161161  }, {                                                                   \
161162         BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o",     \
161163 -                             "probed:", name),                         \
161164 +                             "direct:", name),                         \
161165         .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__,     \
161166         .input_len = sizeof(struct core_reloc_##name),                  \
161167         .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output)       \
161168 @@ -550,8 +545,7 @@ static struct core_reloc_test_case test_cases[] = {
161169         ARRAYS_ERR_CASE(arrays___err_too_small),
161170         ARRAYS_ERR_CASE(arrays___err_too_shallow),
161171         ARRAYS_ERR_CASE(arrays___err_non_array),
161172 -       ARRAYS_ERR_CASE(arrays___err_wrong_val_type1),
161173 -       ARRAYS_ERR_CASE(arrays___err_wrong_val_type2),
161174 +       ARRAYS_ERR_CASE(arrays___err_wrong_val_type),
161175         ARRAYS_ERR_CASE(arrays___err_bad_zero_sz_arr),
161177         /* enum/ptr/int handling scenarios */
161178 @@ -642,13 +636,25 @@ static struct core_reloc_test_case test_cases[] = {
161179                 },
161180                 .output_len = sizeof(struct core_reloc_existence_output),
161181         },
161183 -       FIELD_EXISTS_ERR_CASE(existence__err_int_sz),
161184 -       FIELD_EXISTS_ERR_CASE(existence__err_int_type),
161185 -       FIELD_EXISTS_ERR_CASE(existence__err_int_kind),
161186 -       FIELD_EXISTS_ERR_CASE(existence__err_arr_kind),
161187 -       FIELD_EXISTS_ERR_CASE(existence__err_arr_value_type),
161188 -       FIELD_EXISTS_ERR_CASE(existence__err_struct_type),
161189 +       {
161190 +               FIELD_EXISTS_CASE_COMMON(existence___wrong_field_defs),
161191 +               .input = STRUCT_TO_CHAR_PTR(core_reloc_existence___wrong_field_defs) {
161192 +               },
161193 +               .input_len = sizeof(struct core_reloc_existence___wrong_field_defs),
161194 +               .output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) {
161195 +                       .a_exists = 0,
161196 +                       .b_exists = 0,
161197 +                       .c_exists = 0,
161198 +                       .arr_exists = 0,
161199 +                       .s_exists = 0,
161200 +                       .a_value = 0xff000001u,
161201 +                       .b_value = 0xff000002u,
161202 +                       .c_value = 0xff000003u,
161203 +                       .arr_value = 0xff000004u,
161204 +                       .s_value = 0xff000005u,
161205 +               },
161206 +               .output_len = sizeof(struct core_reloc_existence_output),
161207 +       },
161209         /* bitfield relocation checks */
161210         BITFIELDS_CASE(bitfields, {
161211 @@ -857,13 +863,20 @@ void test_core_reloc(void)
161212                           "prog '%s' not found\n", probe_name))
161213                         goto cleanup;
161216 +               if (test_case->btf_src_file) {
161217 +                       err = access(test_case->btf_src_file, R_OK);
161218 +                       if (!ASSERT_OK(err, "btf_src_file"))
161219 +                               goto cleanup;
161220 +               }
161222                 load_attr.obj = obj;
161223                 load_attr.log_level = 0;
161224                 load_attr.target_btf_path = test_case->btf_src_file;
161225                 err = bpf_object__load_xattr(&load_attr);
161226                 if (err) {
161227                         if (!test_case->fails)
161228 -                               CHECK(false, "obj_load", "failed to load prog '%s': %d\n", probe_name, err);
161229 +                               ASSERT_OK(err, "obj_load");
161230                         goto cleanup;
161231                 }
161233 @@ -902,10 +915,8 @@ void test_core_reloc(void)
161234                         goto cleanup;
161235                 }
161237 -               if (test_case->fails) {
161238 -                       CHECK(false, "obj_load_fail", "should fail to load prog '%s'\n", probe_name);
161239 +               if (!ASSERT_FALSE(test_case->fails, "obj_load_should_fail"))
161240                         goto cleanup;
161241 -               }
161243                 equal = memcmp(data->out, test_case->output,
161244                                test_case->output_len) == 0;
161245 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c
161246 deleted file mode 100644
161247 index dd0ffa518f36..000000000000
161248 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c
161249 +++ /dev/null
161250 @@ -1,3 +0,0 @@
161251 -#include "core_reloc_types.h"
161253 -void f(struct core_reloc_existence___err_wrong_arr_kind x) {}
161254 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c
161255 deleted file mode 100644
161256 index bc83372088ad..000000000000
161257 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c
161258 +++ /dev/null
161259 @@ -1,3 +0,0 @@
161260 -#include "core_reloc_types.h"
161262 -void f(struct core_reloc_existence___err_wrong_arr_value_type x) {}
161263 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c
161264 deleted file mode 100644
161265 index 917bec41be08..000000000000
161266 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c
161267 +++ /dev/null
161268 @@ -1,3 +0,0 @@
161269 -#include "core_reloc_types.h"
161271 -void f(struct core_reloc_existence___err_wrong_int_kind x) {}
161272 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c
161273 deleted file mode 100644
161274 index 6ec7e6ec1c91..000000000000
161275 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c
161276 +++ /dev/null
161277 @@ -1,3 +0,0 @@
161278 -#include "core_reloc_types.h"
161280 -void f(struct core_reloc_existence___err_wrong_int_sz x) {}
161281 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c
161282 deleted file mode 100644
161283 index 7bbcacf2b0d1..000000000000
161284 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c
161285 +++ /dev/null
161286 @@ -1,3 +0,0 @@
161287 -#include "core_reloc_types.h"
161289 -void f(struct core_reloc_existence___err_wrong_int_type x) {}
161290 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c
161291 deleted file mode 100644
161292 index f384dd38ec70..000000000000
161293 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c
161294 +++ /dev/null
161295 @@ -1,3 +0,0 @@
161296 -#include "core_reloc_types.h"
161298 -void f(struct core_reloc_existence___err_wrong_struct_type x) {}
161299 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c
161300 new file mode 100644
161301 index 000000000000..d14b496190c3
161302 --- /dev/null
161303 +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c
161304 @@ -0,0 +1,3 @@
161305 +#include "core_reloc_types.h"
161307 +void f(struct core_reloc_existence___wrong_field_defs x) {}
161308 diff --git a/tools/testing/selftests/bpf/progs/core_reloc_types.h b/tools/testing/selftests/bpf/progs/core_reloc_types.h
161309 index 9a2850850121..664eea1013aa 100644
161310 --- a/tools/testing/selftests/bpf/progs/core_reloc_types.h
161311 +++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h
161312 @@ -700,27 +700,11 @@ struct core_reloc_existence___minimal {
161313         int a;
161316 -struct core_reloc_existence___err_wrong_int_sz {
161317 -       short a;
161320 -struct core_reloc_existence___err_wrong_int_type {
161321 +struct core_reloc_existence___wrong_field_defs {
161322 +       void *a;
161323         int b[1];
161326 -struct core_reloc_existence___err_wrong_int_kind {
161327         struct{ int x; } c;
161330 -struct core_reloc_existence___err_wrong_arr_kind {
161331         int arr;
161334 -struct core_reloc_existence___err_wrong_arr_value_type {
161335 -       short arr[1];
161338 -struct core_reloc_existence___err_wrong_struct_type {
161339         int s;
161342 diff --git a/tools/testing/selftests/bpf/verifier/array_access.c b/tools/testing/selftests/bpf/verifier/array_access.c
161343 index 1b138cd2b187..1b1c798e9248 100644
161344 --- a/tools/testing/selftests/bpf/verifier/array_access.c
161345 +++ b/tools/testing/selftests/bpf/verifier/array_access.c
161346 @@ -186,7 +186,7 @@
161347         },
161348         .fixup_map_hash_48b = { 3 },
161349         .errstr_unpriv = "R0 leaks addr",
161350 -       .errstr = "invalid access to map value, value_size=48 off=44 size=8",
161351 +       .errstr = "R0 unbounded memory access",
161352         .result_unpriv = REJECT,
161353         .result = REJECT,
161354         .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
161355 diff --git a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
161356 index 6f3a70df63bc..e00435753008 100644
161357 --- a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
161358 +++ b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
161359 @@ -120,12 +120,13 @@ __mirror_gre_test()
161360         sleep 5
161362         for ((i = 0; i < count; ++i)); do
161363 +               local sip=$(mirror_gre_ipv6_addr 1 $i)::1
161364                 local dip=$(mirror_gre_ipv6_addr 1 $i)::2
161365                 local htun=h3-gt6-$i
161366                 local message
161368                 icmp6_capture_install $htun
161369 -               mirror_test v$h1 "" $dip $htun 100 10
161370 +               mirror_test v$h1 $sip $dip $htun 100 10
161371                 icmp6_capture_uninstall $htun
161372         done
161374 diff --git a/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh
161375 index f813ffefc07e..65f43a7ce9c9 100644
161376 --- a/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh
161377 +++ b/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh
161378 @@ -55,10 +55,6 @@ port_test()
161379               | jq '.[][][] | select(.name=="physical_ports") |.["occ"]')
161381         [[ $occ -eq $max_ports ]]
161382 -       if [[ $should_fail -eq 0 ]]; then
161383 -               check_err $? "Mismatch ports number: Expected $max_ports, got $occ."
161384 -       else
161385 -               check_err_fail $should_fail $? "Reached more ports than expected"
161386 -       fi
161387 +       check_err_fail $should_fail $? "Attempt to create $max_ports ports (actual result $occ)"
161390 diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
161391 index b0cb1aaffdda..33ddd01689be 100644
161392 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
161393 +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
161394 @@ -507,8 +507,8 @@ do_red_test()
161395         check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0."
161396         local diff=$((limit - backlog))
161397         pct=$((100 * diff / limit))
161398 -       ((0 <= pct && pct <= 5))
161399 -       check_err $? "backlog $backlog / $limit expected <= 5% distance"
161400 +       ((0 <= pct && pct <= 10))
161401 +       check_err $? "backlog $backlog / $limit expected <= 10% distance"
161402         log_test "TC $((vlan - 10)): RED backlog > limit"
161404         stop_traffic
161405 diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
161406 index cc0f07e72cf2..aa74be9f47c8 100644
161407 --- a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
161408 +++ b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
161409 @@ -98,11 +98,7 @@ __tc_flower_test()
161410                         jq -r '[ .[] | select(.kind == "flower") |
161411                         .options | .in_hw ]' | jq .[] | wc -l)
161412         [[ $((offload_count - 1)) -eq $count ]]
161413 -       if [[ $should_fail -eq 0 ]]; then
161414 -               check_err $? "Offload mismatch"
161415 -       else
161416 -               check_err_fail $should_fail $? "Offload more than expacted"
161417 -       fi
161418 +       check_err_fail $should_fail $? "Attempt to offload $count rules (actual result $((offload_count - 1)))"
161421  tc_flower_test()
161422 diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile
161423 index cf69b2fcce59..dd61118df66e 100644
161424 --- a/tools/testing/selftests/exec/Makefile
161425 +++ b/tools/testing/selftests/exec/Makefile
161426 @@ -28,8 +28,8 @@ $(OUTPUT)/execveat.denatured: $(OUTPUT)/execveat
161427         cp $< $@
161428         chmod -x $@
161429  $(OUTPUT)/load_address_4096: load_address.c
161430 -       $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000 -pie $< -o $@
161431 +       $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000 -pie -static $< -o $@
161432  $(OUTPUT)/load_address_2097152: load_address.c
161433 -       $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x200000 -pie $< -o $@
161434 +       $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x200000 -pie -static $< -o $@
161435  $(OUTPUT)/load_address_16777216: load_address.c
161436 -       $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000000 -pie $< -o $@
161437 +       $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000000 -pie -static $< -o $@
161438 diff --git a/tools/testing/selftests/futex/functional/.gitignore b/tools/testing/selftests/futex/functional/.gitignore
161439 index 0efcd494daab..af7557e821da 100644
161440 --- a/tools/testing/selftests/futex/functional/.gitignore
161441 +++ b/tools/testing/selftests/futex/functional/.gitignore
161442 @@ -6,3 +6,6 @@ futex_wait_private_mapped_file
161443  futex_wait_timeout
161444  futex_wait_uninitialized_heap
161445  futex_wait_wouldblock
161446 +futex2_wait
161447 +futex2_waitv
161448 +futex2_requeue
161449 diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
161450 index 23207829ec75..3ccb9ea58ddd 100644
161451 --- a/tools/testing/selftests/futex/functional/Makefile
161452 +++ b/tools/testing/selftests/futex/functional/Makefile
161453 @@ -1,10 +1,11 @@
161454  # SPDX-License-Identifier: GPL-2.0
161455 -INCLUDES := -I../include -I../../
161456 +INCLUDES := -I../include -I../../ -I../../../../../usr/include/
161457  CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES)
161458  LDLIBS := -lpthread -lrt
161460  HEADERS := \
161461         ../include/futextest.h \
161462 +       ../include/futex2test.h \
161463         ../include/atomic.h \
161464         ../include/logging.h
161465  TEST_GEN_FILES := \
161466 @@ -14,7 +15,10 @@ TEST_GEN_FILES := \
161467         futex_requeue_pi_signal_restart \
161468         futex_requeue_pi_mismatched_ops \
161469         futex_wait_uninitialized_heap \
161470 -       futex_wait_private_mapped_file
161471 +       futex_wait_private_mapped_file \
161472 +       futex2_wait \
161473 +       futex2_waitv \
161474 +       futex2_requeue
161476  TEST_PROGS := run.sh
161478 diff --git a/tools/testing/selftests/futex/functional/futex2_requeue.c b/tools/testing/selftests/futex/functional/futex2_requeue.c
161479 new file mode 100644
161480 index 000000000000..1bc3704dc8c2
161481 --- /dev/null
161482 +++ b/tools/testing/selftests/futex/functional/futex2_requeue.c
161483 @@ -0,0 +1,164 @@
161484 +// SPDX-License-Identifier: GPL-2.0-or-later
161485 +/******************************************************************************
161487 + *   Copyright Collabora Ltd., 2021
161489 + * DESCRIPTION
161490 + *     Test requeue mechanism of futex2, using 32bit sized futexes.
161492 + * AUTHOR
161493 + *     André Almeida <andrealmeid@collabora.com>
161495 + * HISTORY
161496 + *      2021-Feb-5: Initial version by André <andrealmeid@collabora.com>
161498 + *****************************************************************************/
161500 +#include <errno.h>
161501 +#include <error.h>
161502 +#include <getopt.h>
161503 +#include <stdio.h>
161504 +#include <stdlib.h>
161505 +#include <string.h>
161506 +#include <time.h>
161507 +#include <pthread.h>
161508 +#include <sys/shm.h>
161509 +#include <limits.h>
161510 +#include "futex2test.h"
161511 +#include "logging.h"
161513 +#define TEST_NAME "futex2-wait"
161514 +#define timeout_ns  30000000
161515 +#define WAKE_WAIT_US 10000
161516 +volatile futex_t *f1;
161518 +void usage(char *prog)
161520 +       printf("Usage: %s\n", prog);
161521 +       printf("  -c    Use color\n");
161522 +       printf("  -h    Display this help message\n");
161523 +       printf("  -v L  Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
161524 +              VQUIET, VCRITICAL, VINFO);
161527 +void *waiterfn(void *arg)
161529 +       struct timespec64 to64;
161531 +       /* setting absolute timeout for futex2 */
161532 +       if (gettime64(CLOCK_MONOTONIC, &to64))
161533 +               error("gettime64 failed\n", errno);
161535 +       to64.tv_nsec += timeout_ns;
161537 +       if (to64.tv_nsec >= 1000000000) {
161538 +               to64.tv_sec++;
161539 +               to64.tv_nsec -= 1000000000;
161540 +       }
161542 +       if (futex2_wait(f1, *f1, FUTEX_32, &to64))
161543 +               printf("waiter failed errno %d\n", errno);
161545 +       return NULL;
161548 +int main(int argc, char *argv[])
161550 +       pthread_t waiter[10];
161551 +       int res, ret = RET_PASS;
161552 +       int c, i;
161553 +       volatile futex_t _f1 = 0;
161554 +       volatile futex_t f2 = 0;
161555 +       struct futex_requeue r1, r2;
161557 +       f1 = &_f1;
161559 +       r1.flags = FUTEX_32;
161560 +       r2.flags = FUTEX_32;
161562 +       r1.uaddr = f1;
161563 +       r2.uaddr = &f2;
161565 +       while ((c = getopt(argc, argv, "cht:v:")) != -1) {
161566 +               switch (c) {
161567 +               case 'c':
161568 +                       log_color(1);
161569 +                       break;
161570 +               case 'h':
161571 +                       usage(basename(argv[0]));
161572 +                       exit(0);
161573 +               case 'v':
161574 +                       log_verbosity(atoi(optarg));
161575 +                       break;
161576 +               default:
161577 +                       usage(basename(argv[0]));
161578 +                       exit(1);
161579 +               }
161580 +       }
161582 +       ksft_print_header();
161583 +       ksft_set_plan(2);
161584 +       ksft_print_msg("%s: Test FUTEX2_REQUEUE\n",
161585 +                      basename(argv[0]));
161587 +       /*
161588 +        * Requeue a waiter from f1 to f2, and wake f2.
161589 +        */
161590 +       if (pthread_create(&waiter[0], NULL, waiterfn, NULL))
161591 +               error("pthread_create failed\n", errno);
161593 +       usleep(WAKE_WAIT_US);
161595 +       res = futex2_requeue(&r1, &r2, 0, 1, 0, 0);
161596 +       if (res != 1) {
161597 +               ksft_test_result_fail("futex2_requeue private returned: %d %s\n",
161598 +                                     res ? errno : res,
161599 +                                     res ? strerror(errno) : "");
161600 +               ret = RET_FAIL;
161601 +       }
161604 +       info("Calling private futex2_wake on f2: %u @ %p with val=%u\n", f2, &f2, f2);
161605 +       res = futex2_wake(&f2, 1, FUTEX_32);
161606 +       if (res != 1) {
161607 +               ksft_test_result_fail("futex2_requeue private returned: %d %s\n",
161608 +                                     res ? errno : res,
161609 +                                     res ? strerror(errno) : "");
161610 +               ret = RET_FAIL;
161611 +       } else {
161612 +               ksft_test_result_pass("futex2_requeue simple succeeds\n");
161613 +       }
161616 +       /*
161617 +        * Create 10 waiters at f1. At futex_requeue, wake 3 and requeue 7.
161618 +        * At futex_wake, wake INT_MAX (should be exaclty 7).
161619 +        */
161620 +       for (i = 0; i < 10; i++) {
161621 +               if (pthread_create(&waiter[i], NULL, waiterfn, NULL))
161622 +                       error("pthread_create failed\n", errno);
161623 +       }
161625 +       usleep(WAKE_WAIT_US);
161627 +       res = futex2_requeue(&r1, &r2, 3, 7, 0, 0);
161628 +       if (res != 10) {
161629 +               ksft_test_result_fail("futex2_requeue private returned: %d %s\n",
161630 +                                     res ? errno : res,
161631 +                                     res ? strerror(errno) : "");
161632 +               ret = RET_FAIL;
161633 +       }
161635 +       res = futex2_wake(&f2, INT_MAX, FUTEX_32);
161636 +       if (res != 7) {
161637 +               ksft_test_result_fail("futex2_requeue private returned: %d %s\n",
161638 +                                     res ? errno : res,
161639 +                                     res ? strerror(errno) : "");
161640 +               ret = RET_FAIL;
161641 +       } else {
161642 +               ksft_test_result_pass("futex2_requeue succeeds\n");
161643 +       }
161645 +       ksft_print_cnts();
161646 +       return ret;
161648 diff --git a/tools/testing/selftests/futex/functional/futex2_wait.c b/tools/testing/selftests/futex/functional/futex2_wait.c
161649 new file mode 100644
161650 index 000000000000..4b5416585c79
161651 --- /dev/null
161652 +++ b/tools/testing/selftests/futex/functional/futex2_wait.c
161653 @@ -0,0 +1,209 @@
161654 +// SPDX-License-Identifier: GPL-2.0-or-later
161655 +/******************************************************************************
161657 + *   Copyright Collabora Ltd., 2021
161659 + * DESCRIPTION
161660 + *     Test wait/wake mechanism of futex2, using 32bit sized futexes.
161662 + * AUTHOR
161663 + *     André Almeida <andrealmeid@collabora.com>
161665 + * HISTORY
161666 + *      2021-Feb-5: Initial version by André <andrealmeid@collabora.com>
161668 + *****************************************************************************/
161670 +#include <errno.h>
161671 +#include <error.h>
161672 +#include <getopt.h>
161673 +#include <stdio.h>
161674 +#include <stdlib.h>
161675 +#include <string.h>
161676 +#include <time.h>
161677 +#include <pthread.h>
161678 +#include <sys/shm.h>
161679 +#include <sys/mman.h>
161680 +#include <fcntl.h>
161681 +#include <string.h>
161682 +#include "futex2test.h"
161683 +#include "logging.h"
161685 +#define TEST_NAME "futex2-wait"
161686 +#define timeout_ns  30000000
161687 +#define WAKE_WAIT_US 10000
161688 +#define SHM_PATH "futex2_shm_file"
161689 +futex_t *f1;
161691 +void usage(char *prog)
161693 +       printf("Usage: %s\n", prog);
161694 +       printf("  -c    Use color\n");
161695 +       printf("  -h    Display this help message\n");
161696 +       printf("  -v L  Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
161697 +              VQUIET, VCRITICAL, VINFO);
161700 +void *waiterfn(void *arg)
161702 +       struct timespec64 to64;
161703 +       unsigned int flags = 0;
161705 +       if (arg)
161706 +               flags = *((unsigned int *) arg);
161708 +       /* setting absolute timeout for futex2 */
161709 +       if (gettime64(CLOCK_MONOTONIC, &to64))
161710 +               error("gettime64 failed\n", errno);
161712 +       to64.tv_nsec += timeout_ns;
161714 +       if (to64.tv_nsec >= 1000000000) {
161715 +               to64.tv_sec++;
161716 +               to64.tv_nsec -= 1000000000;
161717 +       }
161719 +       if (futex2_wait(f1, *f1, FUTEX_32 | flags, &to64))
161720 +               printf("waiter failed errno %d\n", errno);
161722 +       return NULL;
161725 +void *waitershm(void *arg)
161727 +       futex2_wait(arg, 0, FUTEX_32 | FUTEX_SHARED_FLAG, NULL);
161729 +       return NULL;
161732 +int main(int argc, char *argv[])
161734 +       pthread_t waiter;
161735 +       unsigned int flags = FUTEX_SHARED_FLAG;
161736 +       int res, ret = RET_PASS;
161737 +       int c;
161738 +       futex_t f_private = 0;
161740 +       f1 = &f_private;
161742 +       while ((c = getopt(argc, argv, "cht:v:")) != -1) {
161743 +               switch (c) {
161744 +               case 'c':
161745 +                       log_color(1);
161746 +                       break;
161747 +               case 'h':
161748 +                       usage(basename(argv[0]));
161749 +                       exit(0);
161750 +               case 'v':
161751 +                       log_verbosity(atoi(optarg));
161752 +                       break;
161753 +               default:
161754 +                       usage(basename(argv[0]));
161755 +                       exit(1);
161756 +               }
161757 +       }
161759 +       ksft_print_header();
161760 +       ksft_set_plan(3);
161761 +       ksft_print_msg("%s: Test FUTEX2_WAIT\n",
161762 +                      basename(argv[0]));
161764 +       /* Testing a private futex */
161765 +       info("Calling private futex2_wait on f1: %u @ %p with val=%u\n", *f1, f1, *f1);
161767 +       if (pthread_create(&waiter, NULL, waiterfn, NULL))
161768 +               error("pthread_create failed\n", errno);
161770 +       usleep(WAKE_WAIT_US);
161772 +       info("Calling private futex2_wake on f1: %u @ %p with val=%u\n", *f1, f1, *f1);
161773 +       res = futex2_wake(f1, 1, FUTEX_32);
161774 +       if (res != 1) {
161775 +               ksft_test_result_fail("futex2_wake private returned: %d %s\n",
161776 +                                     res ? errno : res,
161777 +                                     res ? strerror(errno) : "");
161778 +               ret = RET_FAIL;
161779 +       } else {
161780 +               ksft_test_result_pass("futex2_wake private succeeds\n");
161781 +       }
161783 +       int shm_id = shmget(IPC_PRIVATE, 4096, IPC_CREAT | 0666);
161785 +       if (shm_id < 0) {
161786 +               perror("shmget");
161787 +               exit(1);
161788 +       }
161790 +       /* Testing an anon page shared memory */
161791 +       unsigned int *shared_data = shmat(shm_id, NULL, 0);
161793 +       *shared_data = 0;
161794 +       f1 = shared_data;
161796 +       info("Calling shared futex2_wait on f1: %u @ %p with val=%u\n", *f1, f1, *f1);
161798 +       if (pthread_create(&waiter, NULL, waiterfn, &flags))
161799 +               error("pthread_create failed\n", errno);
161801 +       usleep(WAKE_WAIT_US);
161803 +       info("Calling shared futex2_wake on f1: %u @ %p with val=%u\n", *f1, f1, *f1);
161804 +       res = futex2_wake(f1, 1, FUTEX_32 | FUTEX_SHARED_FLAG);
161805 +       if (res != 1) {
161806 +               ksft_test_result_fail("futex2_wake shared (shmget) returned: %d %s\n",
161807 +                                     res ? errno : res,
161808 +                                     res ? strerror(errno) : "");
161809 +               ret = RET_FAIL;
161810 +       } else {
161811 +               ksft_test_result_pass("futex2_wake shared (shmget) succeeds\n");
161812 +       }
161814 +       shmdt(shared_data);
161816 +       /* Testing a file backed shared memory */
161817 +       void *shm;
161818 +       int fd, pid;
161820 +       f_private = 0;
161822 +       fd = open(SHM_PATH, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
161823 +       if (fd < 0) {
161824 +               perror("open");
161825 +               exit(1);
161826 +       }
161828 +       res = ftruncate(fd, sizeof(f_private));
161829 +       if (res) {
161830 +               perror("ftruncate");
161831 +               exit(1);
161832 +       }
161834 +       shm = mmap(NULL, sizeof(f_private), PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
161835 +       if (shm == MAP_FAILED) {
161836 +               perror("mmap");
161837 +               exit(1);
161838 +       }
161840 +       memcpy(shm, &f_private, sizeof(f_private));
161842 +       pthread_create(&waiter, NULL, waitershm, shm);
161844 +       usleep(WAKE_WAIT_US);
161846 +       res = futex2_wake(shm, 1, FUTEX_32 | FUTEX_SHARED_FLAG);
161847 +       if (res != 1) {
161848 +               ksft_test_result_fail("futex2_wake shared (mmap) returned: %d %s\n",
161849 +                                     res ? errno : res,
161850 +                                     res ? strerror(errno) : "");
161851 +               ret = RET_FAIL;
161852 +       } else {
161853 +               ksft_test_result_pass("futex2_wake shared (mmap) succeeds\n");
161854 +       }
161856 +       munmap(shm, sizeof(f_private));
161858 +       remove(SHM_PATH);
161860 +       ksft_print_cnts();
161861 +       return ret;
161863 diff --git a/tools/testing/selftests/futex/functional/futex2_waitv.c b/tools/testing/selftests/futex/functional/futex2_waitv.c
161864 new file mode 100644
161865 index 000000000000..2f81d296d95d
161866 --- /dev/null
161867 +++ b/tools/testing/selftests/futex/functional/futex2_waitv.c
161868 @@ -0,0 +1,157 @@
161869 +// SPDX-License-Identifier: GPL-2.0-or-later
161870 +/******************************************************************************
161872 + *   Copyright Collabora Ltd., 2021
161874 + * DESCRIPTION
161875 + *     Test waitv/wake mechanism of futex2, using 32bit sized futexes.
161877 + * AUTHOR
161878 + *     André Almeida <andrealmeid@collabora.com>
161880 + * HISTORY
161881 + *      2021-Feb-5: Initial version by André <andrealmeid@collabora.com>
161883 + *****************************************************************************/
161885 +#include <errno.h>
161886 +#include <error.h>
161887 +#include <getopt.h>
161888 +#include <stdio.h>
161889 +#include <stdlib.h>
161890 +#include <string.h>
161891 +#include <time.h>
161892 +#include <pthread.h>
161893 +#include <sys/shm.h>
161894 +#include "futex2test.h"
161895 +#include "logging.h"
161897 +#define TEST_NAME "futex2-wait"
161898 +#define timeout_ns  1000000000
161899 +#define WAKE_WAIT_US 10000
161900 +#define NR_FUTEXES 30
161901 +struct futex_waitv waitv[NR_FUTEXES];
161902 +u_int32_t futexes[NR_FUTEXES] = {0};
161904 +void usage(char *prog)
161906 +       printf("Usage: %s\n", prog);
161907 +       printf("  -c    Use color\n");
161908 +       printf("  -h    Display this help message\n");
161909 +       printf("  -v L  Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
161910 +              VQUIET, VCRITICAL, VINFO);
161913 +void *waiterfn(void *arg)
161915 +       struct timespec64 to64;
161916 +       int res;
161918 +       /* setting absolute timeout for futex2 */
161919 +       if (gettime64(CLOCK_MONOTONIC, &to64))
161920 +               error("gettime64 failed\n", errno);
161922 +       to64.tv_sec++;
161924 +       res = futex2_waitv(waitv, NR_FUTEXES, 0, &to64);
161925 +       if (res < 0) {
161926 +               ksft_test_result_fail("futex2_waitv private returned: %d %s\n",
161927 +                                     res ? errno : res,
161928 +                                     res ? strerror(errno) : "");
161929 +       } else if (res != NR_FUTEXES - 1) {
161930 +               ksft_test_result_fail("futex2_waitv private returned: %d %s\n",
161931 +                                     res ? errno : res,
161932 +                                     res ? strerror(errno) : "");
161933 +       }
161935 +       return NULL;
161938 +int main(int argc, char *argv[])
161940 +       pthread_t waiter;
161941 +       int res, ret = RET_PASS;
161942 +       int c, i;
161944 +       while ((c = getopt(argc, argv, "cht:v:")) != -1) {
161945 +               switch (c) {
161946 +               case 'c':
161947 +                       log_color(1);
161948 +                       break;
161949 +               case 'h':
161950 +                       usage(basename(argv[0]));
161951 +                       exit(0);
161952 +               case 'v':
161953 +                       log_verbosity(atoi(optarg));
161954 +                       break;
161955 +               default:
161956 +                       usage(basename(argv[0]));
161957 +                       exit(1);
161958 +               }
161959 +       }
161961 +       ksft_print_header();
161962 +       ksft_set_plan(2);
161963 +       ksft_print_msg("%s: Test FUTEX2_WAITV\n",
161964 +                      basename(argv[0]));
161966 +       for (i = 0; i < NR_FUTEXES; i++) {
161967 +               waitv[i].uaddr = &futexes[i];
161968 +               waitv[i].flags = FUTEX_32;
161969 +               waitv[i].val = 0;
161970 +       }
161972 +       /* Private waitv */
161973 +       if (pthread_create(&waiter, NULL, waiterfn, NULL))
161974 +               error("pthread_create failed\n", errno);
161976 +       usleep(WAKE_WAIT_US);
161978 +       res = futex2_wake(waitv[NR_FUTEXES - 1].uaddr, 1, FUTEX_32);
161979 +       if (res != 1) {
161980 +               ksft_test_result_fail("futex2_waitv private returned: %d %s\n",
161981 +                                     res ? errno : res,
161982 +                                     res ? strerror(errno) : "");
161983 +               ret = RET_FAIL;
161984 +       } else {
161985 +               ksft_test_result_pass("futex2_waitv private succeeds\n");
161986 +       }
161988 +       /* Shared waitv */
161989 +       for (i = 0; i < NR_FUTEXES; i++) {
161990 +               int shm_id = shmget(IPC_PRIVATE, 4096, IPC_CREAT | 0666);
161992 +               if (shm_id < 0) {
161993 +                       perror("shmget");
161994 +                       exit(1);
161995 +               }
161997 +               unsigned int *shared_data = shmat(shm_id, NULL, 0);
161999 +               *shared_data = 0;
162000 +               waitv[i].uaddr = shared_data;
162001 +               waitv[i].flags = FUTEX_32 | FUTEX_SHARED_FLAG;
162002 +               waitv[i].val = 0;
162003 +       }
162005 +       if (pthread_create(&waiter, NULL, waiterfn, NULL))
162006 +               error("pthread_create failed\n", errno);
162008 +       usleep(WAKE_WAIT_US);
162010 +       res = futex2_wake(waitv[NR_FUTEXES - 1].uaddr, 1, FUTEX_32 | FUTEX_SHARED_FLAG);
162011 +       if (res != 1) {
162012 +               ksft_test_result_fail("futex2_waitv shared returned: %d %s\n",
162013 +                                     res ? errno : res,
162014 +                                     res ? strerror(errno) : "");
162015 +               ret = RET_FAIL;
162016 +       } else {
162017 +               ksft_test_result_pass("futex2_waitv shared succeeds\n");
162018 +       }
162020 +       for (i = 0; i < NR_FUTEXES; i++)
162021 +               shmdt(waitv[i].uaddr);
162023 +       ksft_print_cnts();
162024 +       return ret;
162026 diff --git a/tools/testing/selftests/futex/functional/futex_wait_timeout.c b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
162027 index ee55e6d389a3..b4dffe9e3b44 100644
162028 --- a/tools/testing/selftests/futex/functional/futex_wait_timeout.c
162029 +++ b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
162030 @@ -11,6 +11,7 @@
162031   *
162032   * HISTORY
162033   *      2009-Nov-6: Initial version by Darren Hart <dvhart@linux.intel.com>
162034 + *      2021-Feb-5: Add futex2 test by André <andrealmeid@collabora.com>
162035   *
162036   *****************************************************************************/
162038 @@ -20,7 +21,7 @@
162039  #include <stdlib.h>
162040  #include <string.h>
162041  #include <time.h>
162042 -#include "futextest.h"
162043 +#include "futex2test.h"
162044  #include "logging.h"
162046  #define TEST_NAME "futex-wait-timeout"
162047 @@ -40,7 +41,8 @@ void usage(char *prog)
162048  int main(int argc, char *argv[])
162050         futex_t f1 = FUTEX_INITIALIZER;
162051 -       struct timespec to;
162052 +       struct timespec to = {.tv_sec = 0, .tv_nsec = timeout_ns};
162053 +       struct timespec64 to64;
162054         int res, ret = RET_PASS;
162055         int c;
162057 @@ -65,22 +67,60 @@ int main(int argc, char *argv[])
162058         }
162060         ksft_print_header();
162061 -       ksft_set_plan(1);
162062 +       ksft_set_plan(3);
162063         ksft_print_msg("%s: Block on a futex and wait for timeout\n",
162064                basename(argv[0]));
162065         ksft_print_msg("\tArguments: timeout=%ldns\n", timeout_ns);
162067 -       /* initialize timeout */
162068 -       to.tv_sec = 0;
162069 -       to.tv_nsec = timeout_ns;
162071         info("Calling futex_wait on f1: %u @ %p\n", f1, &f1);
162072         res = futex_wait(&f1, f1, &to, FUTEX_PRIVATE_FLAG);
162073         if (!res || errno != ETIMEDOUT) {
162074 -               fail("futex_wait returned %d\n", ret < 0 ? errno : ret);
162075 +               ksft_test_result_fail("futex_wait returned %d\n", ret < 0 ? errno : ret);
162076 +               ret = RET_FAIL;
162077 +       } else {
162078 +               ksft_test_result_pass("futex_wait timeout succeeds\n");
162079 +       }
162081 +       /* setting absolute monotonic timeout for futex2 */
162082 +       if (gettime64(CLOCK_MONOTONIC, &to64))
162083 +               error("gettime64 failed\n", errno);
162085 +       to64.tv_nsec += timeout_ns;
162087 +       if (to64.tv_nsec >= 1000000000) {
162088 +               to64.tv_sec++;
162089 +               to64.tv_nsec -= 1000000000;
162090 +       }
162092 +       info("Calling futex2_wait on f1: %u @ %p\n", f1, &f1);
162093 +       res = futex2_wait(&f1, f1, FUTEX_32, &to64);
162094 +       if (!res || errno != ETIMEDOUT) {
162095 +               ksft_test_result_fail("futex2_wait monotonic returned %d\n", ret < 0 ? errno : ret);
162096 +               ret = RET_FAIL;
162097 +       } else {
162098 +               ksft_test_result_pass("futex2_wait monotonic timeout succeeds\n");
162099 +       }
162101 +       /* setting absolute realtime timeout for futex2 */
162102 +       if (gettime64(CLOCK_REALTIME, &to64))
162103 +               error("gettime64 failed\n", errno);
162105 +       to64.tv_nsec += timeout_ns;
162107 +       if (to64.tv_nsec >= 1000000000) {
162108 +               to64.tv_sec++;
162109 +               to64.tv_nsec -= 1000000000;
162110 +       }
162112 +       info("Calling futex2_wait on f1: %u @ %p\n", f1, &f1);
162113 +       res = futex2_wait(&f1, f1, FUTEX_32 | FUTEX_CLOCK_REALTIME, &to64);
162114 +       if (!res || errno != ETIMEDOUT) {
162115 +               ksft_test_result_fail("futex2_wait realtime returned %d\n", ret < 0 ? errno : ret);
162116                 ret = RET_FAIL;
162117 +       } else {
162118 +               ksft_test_result_pass("futex2_wait realtime timeout succeeds\n");
162119         }
162121 -       print_result(TEST_NAME, ret);
162122 +       ksft_print_cnts();
162123         return ret;
162125 diff --git a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
162126 index 0ae390ff8164..ed3660090907 100644
162127 --- a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
162128 +++ b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
162129 @@ -12,6 +12,7 @@
162130   *
162131   * HISTORY
162132   *      2009-Nov-14: Initial version by Gowrishankar <gowrishankar.m@in.ibm.com>
162133 + *      2021-Feb-5: Add futex2 test by André <andrealmeid@collabora.com>
162134   *
162135   *****************************************************************************/
162137 @@ -21,7 +22,7 @@
162138  #include <stdlib.h>
162139  #include <string.h>
162140  #include <time.h>
162141 -#include "futextest.h"
162142 +#include "futex2test.h"
162143  #include "logging.h"
162145  #define TEST_NAME "futex-wait-wouldblock"
162146 @@ -39,6 +40,7 @@ void usage(char *prog)
162147  int main(int argc, char *argv[])
162149         struct timespec to = {.tv_sec = 0, .tv_nsec = timeout_ns};
162150 +       struct timespec64 to64;
162151         futex_t f1 = FUTEX_INITIALIZER;
162152         int res, ret = RET_PASS;
162153         int c;
162154 @@ -61,18 +63,41 @@ int main(int argc, char *argv[])
162155         }
162157         ksft_print_header();
162158 -       ksft_set_plan(1);
162159 +       ksft_set_plan(2);
162160         ksft_print_msg("%s: Test the unexpected futex value in FUTEX_WAIT\n",
162161                basename(argv[0]));
162163         info("Calling futex_wait on f1: %u @ %p with val=%u\n", f1, &f1, f1+1);
162164         res = futex_wait(&f1, f1+1, &to, FUTEX_PRIVATE_FLAG);
162165         if (!res || errno != EWOULDBLOCK) {
162166 -               fail("futex_wait returned: %d %s\n",
162167 +               ksft_test_result_fail("futex_wait returned: %d %s\n",
162168                      res ? errno : res, res ? strerror(errno) : "");
162169                 ret = RET_FAIL;
162170 +       } else {
162171 +               ksft_test_result_pass("futex_wait wouldblock succeeds\n");
162172         }
162174 -       print_result(TEST_NAME, ret);
162175 +       /* setting absolute timeout for futex2 */
162176 +       if (gettime64(CLOCK_MONOTONIC, &to64))
162177 +               error("gettime64 failed\n", errno);
162179 +       to64.tv_nsec += timeout_ns;
162181 +       if (to64.tv_nsec >= 1000000000) {
162182 +               to64.tv_sec++;
162183 +               to64.tv_nsec -= 1000000000;
162184 +       }
162186 +       info("Calling futex2_wait on f1: %u @ %p with val=%u\n", f1, &f1, f1+1);
162187 +       res = futex2_wait(&f1, f1+1, FUTEX_32, &to64);
162188 +       if (!res || errno != EWOULDBLOCK) {
162189 +               ksft_test_result_fail("futex2_wait returned: %d %s\n",
162190 +                    res ? errno : res, res ? strerror(errno) : "");
162191 +               ret = RET_FAIL;
162192 +       } else {
162193 +               ksft_test_result_pass("futex2_wait wouldblock succeeds\n");
162194 +       }
162196 +       ksft_print_cnts();
162197         return ret;
162199 diff --git a/tools/testing/selftests/futex/functional/run.sh b/tools/testing/selftests/futex/functional/run.sh
162200 index 1acb6ace1680..18b3883d7236 100755
162201 --- a/tools/testing/selftests/futex/functional/run.sh
162202 +++ b/tools/testing/selftests/futex/functional/run.sh
162203 @@ -73,3 +73,9 @@ echo
162204  echo
162205  ./futex_wait_uninitialized_heap $COLOR
162206  ./futex_wait_private_mapped_file $COLOR
162208 +echo
162209 +./futex2_wait $COLOR
162211 +echo
162212 +./futex2_waitv $COLOR
162213 diff --git a/tools/testing/selftests/futex/include/futex2test.h b/tools/testing/selftests/futex/include/futex2test.h
162214 new file mode 100644
162215 index 000000000000..e2635006b1a9
162216 --- /dev/null
162217 +++ b/tools/testing/selftests/futex/include/futex2test.h
162218 @@ -0,0 +1,121 @@
162219 +/* SPDX-License-Identifier: GPL-2.0-or-later */
162220 +/******************************************************************************
162222 + *   Copyright Collabora Ltd., 2021
162224 + * DESCRIPTION
162225 + *     Futex2 library addons for old futex library
162227 + * AUTHOR
162228 + *     André Almeida <andrealmeid@collabora.com>
162230 + * HISTORY
162231 + *      2021-Feb-5: Initial version by André <andrealmeid@collabora.com>
162233 + *****************************************************************************/
162234 +#include "futextest.h"
162235 +#include <stdio.h>
162237 +#define NSEC_PER_SEC   1000000000L
162239 +#ifndef FUTEX_8
162240 +# define FUTEX_8       0
162241 +#endif
162242 +#ifndef FUTEX_16
162243 +# define FUTEX_16      1
162244 +#endif
162245 +#ifndef FUTEX_32
162246 +# define FUTEX_32      2
162247 +#endif
162249 +#ifndef FUTEX_SHARED_FLAG
162250 +#define FUTEX_SHARED_FLAG 8
162251 +#endif
162253 +#ifndef FUTEX_WAITV_MAX
162254 +#define FUTEX_WAITV_MAX 128
162255 +struct futex_waitv {
162256 +       void *uaddr;
162257 +       unsigned int val;
162258 +       unsigned int flags;
162260 +#endif
162263 + * - Y2038 section for 32-bit applications -
162265 + * Remove this when glibc is ready for y2038. Then, always compile with
162266 + * `-DTIME_BITS=64` or `-D__USE_TIME_BITS64`. glibc will provide both
162267 + * timespec64 and clock_gettime64 so we won't need to define here.
162268 + */
162269 +#if defined(__i386__) || __TIMESIZE == 32
162270 +# define NR_gettime __NR_clock_gettime64
162271 +#else
162272 +# define NR_gettime __NR_clock_gettime
162273 +#endif
162275 +struct timespec64 {
162276 +       long long tv_sec;       /* seconds */
162277 +       long long tv_nsec;      /* nanoseconds */
162280 +int gettime64(clock_t clockid, struct timespec64 *tv)
162282 +       return syscall(NR_gettime, clockid, tv);
162285 + * - End of Y2038 section -
162286 + */
162289 + * futex2_wait - If (*uaddr == val), wait at uaddr until timo
162290 + * @uaddr: User address to wait on
162291 + * @val:   Expected value at uaddr, return if is not equal
162292 + * @flags: Operation flags
162293 + * @timo:  Optional timeout for operation
162294 + */
162295 +static inline int futex2_wait(volatile void *uaddr, unsigned long val,
162296 +                             unsigned long flags, struct timespec64 *timo)
162298 +       return syscall(__NR_futex_wait, uaddr, val, flags, timo);
162302 + * futex2_wake - Wake a number of waiters at uaddr
162303 + * @uaddr: Address to wake
162304 + * @nr:    Number of waiters to wake
162305 + * @flags: Operation flags
162306 + */
162307 +static inline int futex2_wake(volatile void *uaddr, unsigned int nr, unsigned long flags)
162309 +       return syscall(__NR_futex_wake, uaddr, nr, flags);
162313 + * futex2_waitv - Wait at multiple futexes, wake on any
162314 + * @waiters:    Array of waiters
162315 + * @nr_waiters: Length of waiters array
162316 + * @flags: Operation flags
162317 + * @timo:  Optional timeout for operation
162318 + */
162319 +static inline int futex2_waitv(volatile struct futex_waitv *waiters, unsigned long nr_waiters,
162320 +                             unsigned long flags, struct timespec64 *timo)
162322 +       return syscall(__NR_futex_waitv, waiters, nr_waiters, flags, timo);
162326 + * futex2_requeue - Wake futexes at uaddr1 and requeue from uaddr1 to uaddr2
162327 + * @uaddr1:     Original address to wake and requeue from
162328 + * @uaddr2:     Address to requeue to
162329 + * @nr_wake:    Number of futexes to wake at uaddr1 before requeuing
162330 + * @nr_requeue: Number of futexes to requeue from uaddr1 to uaddr2
162331 + * @cmpval:     If (uaddr1->uaddr != cmpval), return immediatally
162332 + * @flgas:      Operation flags
162333 + */
162334 +static inline int futex2_requeue(struct futex_requeue *uaddr1, struct futex_requeue *uaddr2,
162335 +                                unsigned int nr_wake, unsigned int nr_requeue,
162336 +                                unsigned int cmpval, unsigned long flags)
162338 +       return syscall(__NR_futex_requeue, uaddr1, uaddr2, nr_wake, nr_requeue, cmpval, flags);
162340 diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
162341 index bb2752d78fe3..81edbd23d371 100644
162342 --- a/tools/testing/selftests/kvm/dirty_log_test.c
162343 +++ b/tools/testing/selftests/kvm/dirty_log_test.c
162344 @@ -17,6 +17,7 @@
162345  #include <linux/bitmap.h>
162346  #include <linux/bitops.h>
162347  #include <asm/barrier.h>
162348 +#include <linux/atomic.h>
162350  #include "kvm_util.h"
162351  #include "test_util.h"
162352 @@ -137,12 +138,20 @@ static uint64_t host_clear_count;
162353  static uint64_t host_track_next_count;
162355  /* Whether dirty ring reset is requested, or finished */
162356 -static sem_t dirty_ring_vcpu_stop;
162357 -static sem_t dirty_ring_vcpu_cont;
162358 +static sem_t sem_vcpu_stop;
162359 +static sem_t sem_vcpu_cont;
162361 + * This is only set by main thread, and only cleared by vcpu thread.  It is
162362 + * used to request vcpu thread to stop at the next GUEST_SYNC, since GUEST_SYNC
162363 + * is the only place that we'll guarantee both "dirty bit" and "dirty data"
162364 + * will match.  E.g., SIG_IPI won't guarantee that if the vcpu is interrupted
162365 + * after setting dirty bit but before the data is written.
162366 + */
162367 +static atomic_t vcpu_sync_stop_requested;
162369   * This is updated by the vcpu thread to tell the host whether it's a
162370   * ring-full event.  It should only be read until a sem_wait() of
162371 - * dirty_ring_vcpu_stop and before vcpu continues to run.
162372 + * sem_vcpu_stop and before vcpu continues to run.
162373   */
162374  static bool dirty_ring_vcpu_ring_full;
162376 @@ -234,6 +243,17 @@ static void clear_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
162377         kvm_vm_clear_dirty_log(vm, slot, bitmap, 0, num_pages);
162380 +/* Should only be called after a GUEST_SYNC */
162381 +static void vcpu_handle_sync_stop(void)
162383 +       if (atomic_read(&vcpu_sync_stop_requested)) {
162384 +               /* It means main thread is sleeping waiting */
162385 +               atomic_set(&vcpu_sync_stop_requested, false);
162386 +               sem_post(&sem_vcpu_stop);
162387 +               sem_wait_until(&sem_vcpu_cont);
162388 +       }
162391  static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
162393         struct kvm_run *run = vcpu_state(vm, VCPU_ID);
162394 @@ -244,6 +264,8 @@ static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
162395         TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC,
162396                     "Invalid guest sync status: exit_reason=%s\n",
162397                     exit_reason_str(run->exit_reason));
162399 +       vcpu_handle_sync_stop();
162402  static bool dirty_ring_supported(void)
162403 @@ -301,13 +323,13 @@ static void dirty_ring_wait_vcpu(void)
162405         /* This makes sure that hardware PML cache flushed */
162406         vcpu_kick();
162407 -       sem_wait_until(&dirty_ring_vcpu_stop);
162408 +       sem_wait_until(&sem_vcpu_stop);
162411  static void dirty_ring_continue_vcpu(void)
162413         pr_info("Notifying vcpu to continue\n");
162414 -       sem_post(&dirty_ring_vcpu_cont);
162415 +       sem_post(&sem_vcpu_cont);
162418  static void dirty_ring_collect_dirty_pages(struct kvm_vm *vm, int slot,
162419 @@ -361,11 +383,11 @@ static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
162420                 /* Update the flag first before pause */
162421                 WRITE_ONCE(dirty_ring_vcpu_ring_full,
162422                            run->exit_reason == KVM_EXIT_DIRTY_RING_FULL);
162423 -               sem_post(&dirty_ring_vcpu_stop);
162424 +               sem_post(&sem_vcpu_stop);
162425                 pr_info("vcpu stops because %s...\n",
162426                         dirty_ring_vcpu_ring_full ?
162427                         "dirty ring is full" : "vcpu is kicked out");
162428 -               sem_wait_until(&dirty_ring_vcpu_cont);
162429 +               sem_wait_until(&sem_vcpu_cont);
162430                 pr_info("vcpu continues now.\n");
162431         } else {
162432                 TEST_ASSERT(false, "Invalid guest sync status: "
162433 @@ -377,7 +399,7 @@ static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
162434  static void dirty_ring_before_vcpu_join(void)
162436         /* Kick another round of vcpu just to make sure it will quit */
162437 -       sem_post(&dirty_ring_vcpu_cont);
162438 +       sem_post(&sem_vcpu_cont);
162441  struct log_mode {
162442 @@ -505,9 +527,8 @@ static void *vcpu_worker(void *data)
162443          */
162444         sigmask->len = 8;
162445         pthread_sigmask(0, NULL, sigset);
162446 +       sigdelset(sigset, SIG_IPI);
162447         vcpu_ioctl(vm, VCPU_ID, KVM_SET_SIGNAL_MASK, sigmask);
162448 -       sigaddset(sigset, SIG_IPI);
162449 -       pthread_sigmask(SIG_BLOCK, sigset, NULL);
162451         sigemptyset(sigset);
162452         sigaddset(sigset, SIG_IPI);
162453 @@ -768,7 +789,25 @@ static void run_test(enum vm_guest_mode mode, void *arg)
162454                 usleep(p->interval * 1000);
162455                 log_mode_collect_dirty_pages(vm, TEST_MEM_SLOT_INDEX,
162456                                              bmap, host_num_pages);
162458 +               /*
162459 +                * See vcpu_sync_stop_requested definition for details on why
162460 +                * we need to stop vcpu when verify data.
162461 +                */
162462 +               atomic_set(&vcpu_sync_stop_requested, true);
162463 +               sem_wait_until(&sem_vcpu_stop);
162464 +               /*
162465 +                * NOTE: for dirty ring, it's possible that we didn't stop at
162466 +                * GUEST_SYNC but instead we stopped because ring is full;
162467 +                * that's okay too because ring full means we're only missing
162468 +                * the flush of the last page, and since we handle the last
162469 +                * page specially verification will succeed anyway.
162470 +                */
162471 +               assert(host_log_mode == LOG_MODE_DIRTY_RING ||
162472 +                      atomic_read(&vcpu_sync_stop_requested) == false);
162473                 vm_dirty_log_verify(mode, bmap);
162474 +               sem_post(&sem_vcpu_cont);
162476                 iteration++;
162477                 sync_global_to_guest(vm, iteration);
162478         }
162479 @@ -818,9 +857,10 @@ int main(int argc, char *argv[])
162480                 .interval = TEST_HOST_LOOP_INTERVAL,
162481         };
162482         int opt, i;
162483 +       sigset_t sigset;
162485 -       sem_init(&dirty_ring_vcpu_stop, 0, 0);
162486 -       sem_init(&dirty_ring_vcpu_cont, 0, 0);
162487 +       sem_init(&sem_vcpu_stop, 0, 0);
162488 +       sem_init(&sem_vcpu_cont, 0, 0);
162490         guest_modes_append_default();
162492 @@ -876,6 +916,11 @@ int main(int argc, char *argv[])
162494         srandom(time(0));
162496 +       /* Ensure that vCPU threads start with SIG_IPI blocked.  */
162497 +       sigemptyset(&sigset);
162498 +       sigaddset(&sigset, SIG_IPI);
162499 +       pthread_sigmask(SIG_BLOCK, &sigset, NULL);
162501         if (host_log_mode_option == LOG_MODE_ALL) {
162502                 /* Run each log mode */
162503                 for (i = 0; i < LOG_MODE_NUM; i++) {
162504 diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
162505 index a5ce26d548e4..0af84ad48aa7 100644
162506 --- a/tools/testing/selftests/lib.mk
162507 +++ b/tools/testing/selftests/lib.mk
162508 @@ -1,6 +1,10 @@
162509  # This mimics the top-level Makefile. We do it explicitly here so that this
162510  # Makefile can operate with or without the kbuild infrastructure.
162511 +ifneq ($(LLVM),)
162512 +CC := clang
162513 +else
162514  CC := $(CROSS_COMPILE)gcc
162515 +endif
162517  ifeq (0,$(MAKELEVEL))
162518      ifeq ($(OUTPUT),)
162519 @@ -74,7 +78,8 @@ ifdef building_out_of_srctree
162520                 rsync -aq $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
162521         fi
162522         @if [ "X$(TEST_PROGS)" != "X" ]; then \
162523 -               $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(OUTPUT)/$(TEST_PROGS)) ; \
162524 +               $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) \
162525 +                                 $(addprefix $(OUTPUT)/,$(TEST_PROGS))) ; \
162526         else \
162527                 $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS)); \
162528         fi
162529 diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
162530 index c02291e9841e..880e3ab9d088 100755
162531 --- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
162532 +++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
162533 @@ -271,7 +271,7 @@ test_span_gre_fdb_roaming()
162535         while ((RET == 0)); do
162536                 bridge fdb del dev $swp3 $h3mac vlan 555 master 2>/dev/null
162537 -               bridge fdb add dev $swp2 $h3mac vlan 555 master
162538 +               bridge fdb add dev $swp2 $h3mac vlan 555 master static
162539                 sleep 1
162540                 fail_test_span_gre_dir $tundev ingress
162542 diff --git a/tools/testing/selftests/net/forwarding/mirror_lib.sh b/tools/testing/selftests/net/forwarding/mirror_lib.sh
162543 index 13db1cb50e57..6406cd76a19d 100644
162544 --- a/tools/testing/selftests/net/forwarding/mirror_lib.sh
162545 +++ b/tools/testing/selftests/net/forwarding/mirror_lib.sh
162546 @@ -20,6 +20,13 @@ mirror_uninstall()
162547         tc filter del dev $swp1 $direction pref 1000
162550 +is_ipv6()
162552 +       local addr=$1; shift
162554 +       [[ -z ${addr//[0-9a-fA-F:]/} ]]
162557  mirror_test()
162559         local vrf_name=$1; shift
162560 @@ -29,9 +36,17 @@ mirror_test()
162561         local pref=$1; shift
162562         local expect=$1; shift
162564 +       if is_ipv6 $dip; then
162565 +               local proto=-6
162566 +               local type="icmp6 type=128" # Echo request.
162567 +       else
162568 +               local proto=
162569 +               local type="icmp echoreq"
162570 +       fi
162572         local t0=$(tc_rule_stats_get $dev $pref)
162573 -       $MZ $vrf_name ${sip:+-A $sip} -B $dip -a own -b bc -q \
162574 -           -c 10 -d 100msec -t icmp type=8
162575 +       $MZ $proto $vrf_name ${sip:+-A $sip} -B $dip -a own -b bc -q \
162576 +           -c 10 -d 100msec -t $type
162577         sleep 0.5
162578         local t1=$(tc_rule_stats_get $dev $pref)
162579         local delta=$((t1 - t0))
162580 diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
162581 index 39edce4f541c..2674ba20d524 100755
162582 --- a/tools/testing/selftests/net/mptcp/diag.sh
162583 +++ b/tools/testing/selftests/net/mptcp/diag.sh
162584 @@ -5,8 +5,9 @@ rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
162585  ns="ns1-$rndh"
162586  ksft_skip=4
162587  test_cnt=1
162588 +timeout_poll=100
162589 +timeout_test=$((timeout_poll * 2 + 1))
162590  ret=0
162591 -pids=()
162593  flush_pids()
162595 @@ -14,18 +15,14 @@ flush_pids()
162596         # give it some time
162597         sleep 1.1
162599 -       for pid in ${pids[@]}; do
162600 -               [ -d /proc/$pid ] && kill -SIGUSR1 $pid >/dev/null 2>&1
162601 -       done
162602 -       pids=()
162603 +       ip netns pids "${ns}" | xargs --no-run-if-empty kill -SIGUSR1 &>/dev/null
162606  cleanup()
162608 +       ip netns pids "${ns}" | xargs --no-run-if-empty kill -SIGKILL &>/dev/null
162610         ip netns del $ns
162611 -       for pid in ${pids[@]}; do
162612 -               [ -d /proc/$pid ] && kill -9 $pid >/dev/null 2>&1
162613 -       done
162616  ip -Version > /dev/null 2>&1
162617 @@ -79,39 +76,57 @@ trap cleanup EXIT
162618  ip netns add $ns
162619  ip -n $ns link set dev lo up
162621 -echo "a" | ip netns exec $ns ./mptcp_connect -p 10000 -l 0.0.0.0 -t 100 >/dev/null &
162622 +echo "a" | \
162623 +       timeout ${timeout_test} \
162624 +               ip netns exec $ns \
162625 +                       ./mptcp_connect -p 10000 -l -t ${timeout_poll} \
162626 +                               0.0.0.0 >/dev/null &
162627  sleep 0.1
162628 -pids[0]=$!
162629  chk_msk_nr 0 "no msk on netns creation"
162631 -echo "b" | ip netns exec $ns ./mptcp_connect -p 10000 127.0.0.1 -j -t 100 >/dev/null &
162632 +echo "b" | \
162633 +       timeout ${timeout_test} \
162634 +               ip netns exec $ns \
162635 +                       ./mptcp_connect -p 10000 -j -t ${timeout_poll} \
162636 +                               127.0.0.1 >/dev/null &
162637  sleep 0.1
162638 -pids[1]=$!
162639  chk_msk_nr 2 "after MPC handshake "
162640  chk_msk_remote_key_nr 2 "....chk remote_key"
162641  chk_msk_fallback_nr 0 "....chk no fallback"
162642  flush_pids
162645 -echo "a" | ip netns exec $ns ./mptcp_connect -p 10001 -s TCP -l 0.0.0.0 -t 100 >/dev/null &
162646 -pids[0]=$!
162647 +echo "a" | \
162648 +       timeout ${timeout_test} \
162649 +               ip netns exec $ns \
162650 +                       ./mptcp_connect -p 10001 -l -s TCP -t ${timeout_poll} \
162651 +                               0.0.0.0 >/dev/null &
162652  sleep 0.1
162653 -echo "b" | ip netns exec $ns ./mptcp_connect -p 10001 127.0.0.1 -j -t 100 >/dev/null &
162654 -pids[1]=$!
162655 +echo "b" | \
162656 +       timeout ${timeout_test} \
162657 +               ip netns exec $ns \
162658 +                       ./mptcp_connect -p 10001 -j -t ${timeout_poll} \
162659 +                               127.0.0.1 >/dev/null &
162660  sleep 0.1
162661  chk_msk_fallback_nr 1 "check fallback"
162662  flush_pids
162664  NR_CLIENTS=100
162665  for I in `seq 1 $NR_CLIENTS`; do
162666 -       echo "a" | ip netns exec $ns ./mptcp_connect -p $((I+10001)) -l 0.0.0.0 -t 100 -w 10 >/dev/null  &
162667 -       pids[$((I*2))]=$!
162668 +       echo "a" | \
162669 +               timeout ${timeout_test} \
162670 +                       ip netns exec $ns \
162671 +                               ./mptcp_connect -p $((I+10001)) -l -w 10 \
162672 +                                       -t ${timeout_poll} 0.0.0.0 >/dev/null &
162673  done
162674  sleep 0.1
162676  for I in `seq 1 $NR_CLIENTS`; do
162677 -       echo "b" | ip netns exec $ns ./mptcp_connect -p $((I+10001)) 127.0.0.1 -t 100 -w 10 >/dev/null &
162678 -       pids[$((I*2 + 1))]=$!
162679 +       echo "b" | \
162680 +               timeout ${timeout_test} \
162681 +                       ip netns exec $ns \
162682 +                               ./mptcp_connect -p $((I+10001)) -w 10 \
162683 +                                       -t ${timeout_poll} 127.0.0.1 >/dev/null &
162684  done
162685  sleep 1.5
162687 diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
162688 index 10a030b53b23..65b3b983efc2 100755
162689 --- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
162690 +++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
162691 @@ -11,7 +11,8 @@ cin=""
162692  cout=""
162693  ksft_skip=4
162694  capture=false
162695 -timeout=30
162696 +timeout_poll=30
162697 +timeout_test=$((timeout_poll * 2 + 1))
162698  ipv6=true
162699  ethtool_random_on=true
162700  tc_delay="$((RANDOM%50))"
162701 @@ -273,7 +274,7 @@ check_mptcp_disabled()
162702         ip netns exec ${disabled_ns} sysctl -q net.mptcp.enabled=0
162704         local err=0
162705 -       LANG=C ip netns exec ${disabled_ns} ./mptcp_connect -t $timeout -p 10000 -s MPTCP 127.0.0.1 < "$cin" 2>&1 | \
162706 +       LANG=C ip netns exec ${disabled_ns} ./mptcp_connect -p 10000 -s MPTCP 127.0.0.1 < "$cin" 2>&1 | \
162707                 grep -q "^socket: Protocol not available$" && err=1
162708         ip netns delete ${disabled_ns}
162710 @@ -430,14 +431,20 @@ do_transfer()
162711         local stat_cookietx_last=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesSent")
162712         local stat_cookierx_last=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesRecv")
162714 -       ip netns exec ${listener_ns} ./mptcp_connect -t $timeout -l -p $port -s ${srv_proto} $extra_args $local_addr < "$sin" > "$sout" &
162715 +       timeout ${timeout_test} \
162716 +               ip netns exec ${listener_ns} \
162717 +                       ./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
162718 +                               $extra_args $local_addr < "$sin" > "$sout" &
162719         local spid=$!
162721         wait_local_port_listen "${listener_ns}" "${port}"
162723         local start
162724         start=$(date +%s%3N)
162725 -       ip netns exec ${connector_ns} ./mptcp_connect -t $timeout -p $port -s ${cl_proto} $extra_args $connect_addr < "$cin" > "$cout" &
162726 +       timeout ${timeout_test} \
162727 +               ip netns exec ${connector_ns} \
162728 +                       ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
162729 +                               $extra_args $connect_addr < "$cin" > "$cout" &
162730         local cpid=$!
162732         wait $cpid
162733 diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
162734 index ad32240fbfda..43ed99de7734 100755
162735 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
162736 +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
162737 @@ -8,7 +8,8 @@ cin=""
162738  cinsent=""
162739  cout=""
162740  ksft_skip=4
162741 -timeout=30
162742 +timeout_poll=30
162743 +timeout_test=$((timeout_poll * 2 + 1))
162744  mptcp_connect=""
162745  capture=0
162746  do_all_tests=1
162747 @@ -245,17 +246,26 @@ do_transfer()
162748                 local_addr="0.0.0.0"
162749         fi
162751 -       ip netns exec ${listener_ns} $mptcp_connect -t $timeout -l -p $port \
162752 -               -s ${srv_proto} ${local_addr} < "$sin" > "$sout" &
162753 +       timeout ${timeout_test} \
162754 +               ip netns exec ${listener_ns} \
162755 +                       $mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
162756 +                               ${local_addr} < "$sin" > "$sout" &
162757         spid=$!
162759         sleep 1
162761         if [ "$test_link_fail" -eq 0 ];then
162762 -               ip netns exec ${connector_ns} $mptcp_connect -t $timeout -p $port -s ${cl_proto} $connect_addr < "$cin" > "$cout" &
162763 +               timeout ${timeout_test} \
162764 +                       ip netns exec ${connector_ns} \
162765 +                               $mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
162766 +                                       $connect_addr < "$cin" > "$cout" &
162767         else
162768 -               ( cat "$cin" ; sleep 2; link_failure $listener_ns ; cat "$cin" ) | tee "$cinsent" | \
162769 -               ip netns exec ${connector_ns} $mptcp_connect -t $timeout -p $port -s ${cl_proto} $connect_addr > "$cout" &
162770 +               ( cat "$cin" ; sleep 2; link_failure $listener_ns ; cat "$cin" ) | \
162771 +                       tee "$cinsent" | \
162772 +                       timeout ${timeout_test} \
162773 +                               ip netns exec ${connector_ns} \
162774 +                                       $mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
162775 +                                               $connect_addr > "$cout" &
162776         fi
162777         cpid=$!
162779 diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
162780 index f039ee57eb3c..3aeef3bcb101 100755
162781 --- a/tools/testing/selftests/net/mptcp/simult_flows.sh
162782 +++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
162783 @@ -7,7 +7,8 @@ ns2="ns2-$rndh"
162784  ns3="ns3-$rndh"
162785  capture=false
162786  ksft_skip=4
162787 -timeout=30
162788 +timeout_poll=30
162789 +timeout_test=$((timeout_poll * 2 + 1))
162790  test_cnt=1
162791  ret=0
162792  bail=0
162793 @@ -157,14 +158,20 @@ do_transfer()
162794                 sleep 1
162795         fi
162797 -       ip netns exec ${ns3} ./mptcp_connect -jt $timeout -l -p $port 0.0.0.0 < "$sin" > "$sout" &
162798 +       timeout ${timeout_test} \
162799 +               ip netns exec ${ns3} \
162800 +                       ./mptcp_connect -jt ${timeout_poll} -l -p $port \
162801 +                               0.0.0.0 < "$sin" > "$sout" &
162802         local spid=$!
162804         wait_local_port_listen "${ns3}" "${port}"
162806         local start
162807         start=$(date +%s%3N)
162808 -       ip netns exec ${ns1} ./mptcp_connect -jt $timeout -p $port 10.0.3.3 < "$cin" > "$cout" &
162809 +       timeout ${timeout_test} \
162810 +               ip netns exec ${ns1} \
162811 +                       ./mptcp_connect -jt ${timeout_poll} -p $port \
162812 +                               10.0.3.3 < "$cin" > "$cout" &
162813         local cpid=$!
162815         wait $cpid
162816 diff --git a/tools/testing/selftests/powerpc/security/entry_flush.c b/tools/testing/selftests/powerpc/security/entry_flush.c
162817 index 78cf914fa321..68ce377b205e 100644
162818 --- a/tools/testing/selftests/powerpc/security/entry_flush.c
162819 +++ b/tools/testing/selftests/powerpc/security/entry_flush.c
162820 @@ -53,7 +53,7 @@ int entry_flush_test(void)
162822         entry_flush = entry_flush_orig;
162824 -       fd = perf_event_open_counter(PERF_TYPE_RAW, /* L1d miss */ 0x400f0, -1);
162825 +       fd = perf_event_open_counter(PERF_TYPE_HW_CACHE, PERF_L1D_READ_MISS_CONFIG, -1);
162826         FAIL_IF(fd < 0);
162828         p = (char *)memalign(zero_size, CACHELINE_SIZE);
162829 diff --git a/tools/testing/selftests/powerpc/security/flush_utils.h b/tools/testing/selftests/powerpc/security/flush_utils.h
162830 index 07a5eb301466..7a3d60292916 100644
162831 --- a/tools/testing/selftests/powerpc/security/flush_utils.h
162832 +++ b/tools/testing/selftests/powerpc/security/flush_utils.h
162833 @@ -9,6 +9,10 @@
162835  #define CACHELINE_SIZE 128
162837 +#define PERF_L1D_READ_MISS_CONFIG      ((PERF_COUNT_HW_CACHE_L1D) |            \
162838 +                                       (PERF_COUNT_HW_CACHE_OP_READ << 8) |    \
162839 +                                       (PERF_COUNT_HW_CACHE_RESULT_MISS << 16))
162841  void syscall_loop(char *p, unsigned long iterations,
162842                   unsigned long zero_size);
162844 diff --git a/tools/testing/selftests/powerpc/security/rfi_flush.c b/tools/testing/selftests/powerpc/security/rfi_flush.c
162845 index 7565fd786640..f73484a6470f 100644
162846 --- a/tools/testing/selftests/powerpc/security/rfi_flush.c
162847 +++ b/tools/testing/selftests/powerpc/security/rfi_flush.c
162848 @@ -54,7 +54,7 @@ int rfi_flush_test(void)
162850         rfi_flush = rfi_flush_orig;
162852 -       fd = perf_event_open_counter(PERF_TYPE_RAW, /* L1d miss */ 0x400f0, -1);
162853 +       fd = perf_event_open_counter(PERF_TYPE_HW_CACHE, PERF_L1D_READ_MISS_CONFIG, -1);
162854         FAIL_IF(fd < 0);
162856         p = (char *)memalign(zero_size, CACHELINE_SIZE);
162857 diff --git a/tools/testing/selftests/resctrl/Makefile b/tools/testing/selftests/resctrl/Makefile
162858 index d585cc1948cc..6bcee2ec91a9 100644
162859 --- a/tools/testing/selftests/resctrl/Makefile
162860 +++ b/tools/testing/selftests/resctrl/Makefile
162861 @@ -1,5 +1,5 @@
162862  CC = $(CROSS_COMPILE)gcc
162863 -CFLAGS = -g -Wall
162864 +CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2
162865  SRCS=$(wildcard *.c)
162866  OBJS=$(SRCS:.c=.o)
162868 diff --git a/tools/testing/selftests/resctrl/cache.c b/tools/testing/selftests/resctrl/cache.c
162869 index 38dbf4962e33..5922cc1b0386 100644
162870 --- a/tools/testing/selftests/resctrl/cache.c
162871 +++ b/tools/testing/selftests/resctrl/cache.c
162872 @@ -182,7 +182,7 @@ int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
162873         /*
162874          * Measure cache miss from perf.
162875          */
162876 -       if (!strcmp(param->resctrl_val, "cat")) {
162877 +       if (!strncmp(param->resctrl_val, CAT_STR, sizeof(CAT_STR))) {
162878                 ret = get_llc_perf(&llc_perf_miss);
162879                 if (ret < 0)
162880                         return ret;
162881 @@ -192,7 +192,7 @@ int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
162882         /*
162883          * Measure llc occupancy from resctrl.
162884          */
162885 -       if (!strcmp(param->resctrl_val, "cqm")) {
162886 +       if (!strncmp(param->resctrl_val, CQM_STR, sizeof(CQM_STR))) {
162887                 ret = get_llc_occu_resctrl(&llc_occu_resc);
162888                 if (ret < 0)
162889                         return ret;
162890 @@ -234,7 +234,7 @@ int cat_val(struct resctrl_val_param *param)
162891         if (ret)
162892                 return ret;
162894 -       if ((strcmp(resctrl_val, "cat") == 0)) {
162895 +       if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
162896                 ret = initialize_llc_perf();
162897                 if (ret)
162898                         return ret;
162899 @@ -242,7 +242,7 @@ int cat_val(struct resctrl_val_param *param)
162901         /* Test runs until the callback setup() tells the test to stop. */
162902         while (1) {
162903 -               if (strcmp(resctrl_val, "cat") == 0) {
162904 +               if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
162905                         ret = param->setup(1, param);
162906                         if (ret) {
162907                                 ret = 0;
162908 diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c
162909 index 5da43767b973..20823725daca 100644
162910 --- a/tools/testing/selftests/resctrl/cat_test.c
162911 +++ b/tools/testing/selftests/resctrl/cat_test.c
162912 @@ -17,10 +17,10 @@
162913  #define MAX_DIFF_PERCENT       4
162914  #define MAX_DIFF               1000000
162916 -int count_of_bits;
162917 -char cbm_mask[256];
162918 -unsigned long long_mask;
162919 -unsigned long cache_size;
162920 +static int count_of_bits;
162921 +static char cbm_mask[256];
162922 +static unsigned long long_mask;
162923 +static unsigned long cache_size;
162926   * Change schemata. Write schemata to specified
162927 @@ -136,7 +136,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
162928                 return -1;
162930         /* Get default cbm mask for L3/L2 cache */
162931 -       ret = get_cbm_mask(cache_type);
162932 +       ret = get_cbm_mask(cache_type, cbm_mask);
162933         if (ret)
162934                 return ret;
162936 @@ -164,7 +164,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
162937                 return -1;
162939         struct resctrl_val_param param = {
162940 -               .resctrl_val    = "cat",
162941 +               .resctrl_val    = CAT_STR,
162942                 .cpu_no         = cpu_no,
162943                 .mum_resctrlfs  = 0,
162944                 .setup          = cat_setup,
162945 diff --git a/tools/testing/selftests/resctrl/cqm_test.c b/tools/testing/selftests/resctrl/cqm_test.c
162946 index c8756152bd61..271752e9ef5b 100644
162947 --- a/tools/testing/selftests/resctrl/cqm_test.c
162948 +++ b/tools/testing/selftests/resctrl/cqm_test.c
162949 @@ -16,10 +16,10 @@
162950  #define MAX_DIFF               2000000
162951  #define MAX_DIFF_PERCENT       15
162953 -int count_of_bits;
162954 -char cbm_mask[256];
162955 -unsigned long long_mask;
162956 -unsigned long cache_size;
162957 +static int count_of_bits;
162958 +static char cbm_mask[256];
162959 +static unsigned long long_mask;
162960 +static unsigned long cache_size;
162962  static int cqm_setup(int num, ...)
162964 @@ -86,7 +86,7 @@ static int check_results(struct resctrl_val_param *param, int no_of_bits)
162965                 return errno;
162966         }
162968 -       while (fgets(temp, 1024, fp)) {
162969 +       while (fgets(temp, sizeof(temp), fp)) {
162970                 char *token = strtok(temp, ":\t");
162971                 int fields = 0;
162973 @@ -125,7 +125,7 @@ int cqm_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
162974         if (!validate_resctrl_feature_request("cqm"))
162975                 return -1;
162977 -       ret = get_cbm_mask("L3");
162978 +       ret = get_cbm_mask("L3", cbm_mask);
162979         if (ret)
162980                 return ret;
162982 @@ -145,7 +145,7 @@ int cqm_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
162983         }
162985         struct resctrl_val_param param = {
162986 -               .resctrl_val    = "cqm",
162987 +               .resctrl_val    = CQM_STR,
162988                 .ctrlgrp        = "c1",
162989                 .mongrp         = "m1",
162990                 .cpu_no         = cpu_no,
162991 diff --git a/tools/testing/selftests/resctrl/fill_buf.c b/tools/testing/selftests/resctrl/fill_buf.c
162992 index 79c611c99a3d..51e5cf22632f 100644
162993 --- a/tools/testing/selftests/resctrl/fill_buf.c
162994 +++ b/tools/testing/selftests/resctrl/fill_buf.c
162995 @@ -115,7 +115,7 @@ static int fill_cache_read(unsigned char *start_ptr, unsigned char *end_ptr,
162997         while (1) {
162998                 ret = fill_one_span_read(start_ptr, end_ptr);
162999 -               if (!strcmp(resctrl_val, "cat"))
163000 +               if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)))
163001                         break;
163002         }
163004 @@ -134,7 +134,7 @@ static int fill_cache_write(unsigned char *start_ptr, unsigned char *end_ptr,
163006         while (1) {
163007                 fill_one_span_write(start_ptr, end_ptr);
163008 -               if (!strcmp(resctrl_val, "cat"))
163009 +               if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)))
163010                         break;
163011         }
163013 diff --git a/tools/testing/selftests/resctrl/mba_test.c b/tools/testing/selftests/resctrl/mba_test.c
163014 index 7bf8eaa6204b..6449fbd96096 100644
163015 --- a/tools/testing/selftests/resctrl/mba_test.c
163016 +++ b/tools/testing/selftests/resctrl/mba_test.c
163017 @@ -141,7 +141,7 @@ void mba_test_cleanup(void)
163018  int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd)
163020         struct resctrl_val_param param = {
163021 -               .resctrl_val    = "mba",
163022 +               .resctrl_val    = MBA_STR,
163023                 .ctrlgrp        = "c1",
163024                 .mongrp         = "m1",
163025                 .cpu_no         = cpu_no,
163026 diff --git a/tools/testing/selftests/resctrl/mbm_test.c b/tools/testing/selftests/resctrl/mbm_test.c
163027 index 4700f7453f81..ec6cfe01c9c2 100644
163028 --- a/tools/testing/selftests/resctrl/mbm_test.c
163029 +++ b/tools/testing/selftests/resctrl/mbm_test.c
163030 @@ -114,7 +114,7 @@ void mbm_test_cleanup(void)
163031  int mbm_bw_change(int span, int cpu_no, char *bw_report, char **benchmark_cmd)
163033         struct resctrl_val_param param = {
163034 -               .resctrl_val    = "mbm",
163035 +               .resctrl_val    = MBM_STR,
163036                 .ctrlgrp        = "c1",
163037                 .mongrp         = "m1",
163038                 .span           = span,
163039 diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
163040 index 39bf59c6b9c5..9dcc96e1ad3d 100644
163041 --- a/tools/testing/selftests/resctrl/resctrl.h
163042 +++ b/tools/testing/selftests/resctrl/resctrl.h
163043 @@ -28,6 +28,10 @@
163044  #define RESCTRL_PATH           "/sys/fs/resctrl"
163045  #define PHYS_ID_PATH           "/sys/devices/system/cpu/cpu"
163046  #define CBM_MASK_PATH          "/sys/fs/resctrl/info"
163047 +#define L3_PATH                        "/sys/fs/resctrl/info/L3"
163048 +#define MB_PATH                        "/sys/fs/resctrl/info/MB"
163049 +#define L3_MON_PATH            "/sys/fs/resctrl/info/L3_MON"
163050 +#define L3_MON_FEATURES_PATH   "/sys/fs/resctrl/info/L3_MON/mon_features"
163052  #define PARENT_EXIT(err_msg)                   \
163053         do {                                    \
163054 @@ -62,11 +66,16 @@ struct resctrl_val_param {
163055         int             (*setup)(int num, ...);
163058 -pid_t bm_pid, ppid;
163059 -int tests_run;
163060 +#define MBM_STR                        "mbm"
163061 +#define MBA_STR                        "mba"
163062 +#define CQM_STR                        "cqm"
163063 +#define CAT_STR                        "cat"
163065 -char llc_occup_path[1024];
163066 -bool is_amd;
163067 +extern pid_t bm_pid, ppid;
163068 +extern int tests_run;
163070 +extern char llc_occup_path[1024];
163071 +extern bool is_amd;
163073  bool check_resctrlfs_support(void);
163074  int filter_dmesg(void);
163075 @@ -74,7 +83,7 @@ int remount_resctrlfs(bool mum_resctrlfs);
163076  int get_resource_id(int cpu_no, int *resource_id);
163077  int umount_resctrlfs(void);
163078  int validate_bw_report_request(char *bw_report);
163079 -bool validate_resctrl_feature_request(char *resctrl_val);
163080 +bool validate_resctrl_feature_request(const char *resctrl_val);
163081  char *fgrep(FILE *inf, const char *str);
163082  int taskset_benchmark(pid_t bm_pid, int cpu_no);
163083  void run_benchmark(int signum, siginfo_t *info, void *ucontext);
163084 @@ -92,7 +101,7 @@ void tests_cleanup(void);
163085  void mbm_test_cleanup(void);
163086  int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd);
163087  void mba_test_cleanup(void);
163088 -int get_cbm_mask(char *cache_type);
163089 +int get_cbm_mask(char *cache_type, char *cbm_mask);
163090  int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size);
163091  void ctrlc_handler(int signum, siginfo_t *info, void *ptr);
163092  int cat_val(struct resctrl_val_param *param);
163093 diff --git a/tools/testing/selftests/resctrl/resctrl_tests.c b/tools/testing/selftests/resctrl/resctrl_tests.c
163094 index 425cc85ac883..ac2269610aa9 100644
163095 --- a/tools/testing/selftests/resctrl/resctrl_tests.c
163096 +++ b/tools/testing/selftests/resctrl/resctrl_tests.c
163097 @@ -73,7 +73,7 @@ int main(int argc, char **argv)
163098                 }
163099         }
163101 -       while ((c = getopt(argc_new, argv, "ht:b:")) != -1) {
163102 +       while ((c = getopt(argc_new, argv, "ht:b:n:p:")) != -1) {
163103                 char *token;
163105                 switch (c) {
163106 @@ -85,13 +85,13 @@ int main(int argc, char **argv)
163107                         cqm_test = false;
163108                         cat_test = false;
163109                         while (token) {
163110 -                               if (!strcmp(token, "mbm")) {
163111 +                               if (!strncmp(token, MBM_STR, sizeof(MBM_STR))) {
163112                                         mbm_test = true;
163113 -                               } else if (!strcmp(token, "mba")) {
163114 +                               } else if (!strncmp(token, MBA_STR, sizeof(MBA_STR))) {
163115                                         mba_test = true;
163116 -                               } else if (!strcmp(token, "cqm")) {
163117 +                               } else if (!strncmp(token, CQM_STR, sizeof(CQM_STR))) {
163118                                         cqm_test = true;
163119 -                               } else if (!strcmp(token, "cat")) {
163120 +                               } else if (!strncmp(token, CAT_STR, sizeof(CAT_STR))) {
163121                                         cat_test = true;
163122                                 } else {
163123                                         printf("invalid argument\n");
163124 @@ -161,7 +161,7 @@ int main(int argc, char **argv)
163125         if (!is_amd && mbm_test) {
163126                 printf("# Starting MBM BW change ...\n");
163127                 if (!has_ben)
163128 -                       sprintf(benchmark_cmd[5], "%s", "mba");
163129 +                       sprintf(benchmark_cmd[5], "%s", MBA_STR);
163130                 res = mbm_bw_change(span, cpu_no, bw_report, benchmark_cmd);
163131                 printf("%sok MBM: bw change\n", res ? "not " : "");
163132                 mbm_test_cleanup();
163133 @@ -181,7 +181,7 @@ int main(int argc, char **argv)
163134         if (cqm_test) {
163135                 printf("# Starting CQM test ...\n");
163136                 if (!has_ben)
163137 -                       sprintf(benchmark_cmd[5], "%s", "cqm");
163138 +                       sprintf(benchmark_cmd[5], "%s", CQM_STR);
163139                 res = cqm_resctrl_val(cpu_no, no_of_bits, benchmark_cmd);
163140                 printf("%sok CQM: test\n", res ? "not " : "");
163141                 cqm_test_cleanup();
163142 diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
163143 index 520fea3606d1..8df557894059 100644
163144 --- a/tools/testing/selftests/resctrl/resctrl_val.c
163145 +++ b/tools/testing/selftests/resctrl/resctrl_val.c
163146 @@ -221,8 +221,8 @@ static int read_from_imc_dir(char *imc_dir, int count)
163147   */
163148  static int num_of_imcs(void)
163150 +       char imc_dir[512], *temp;
163151         unsigned int count = 0;
163152 -       char imc_dir[512];
163153         struct dirent *ep;
163154         int ret;
163155         DIR *dp;
163156 @@ -230,7 +230,25 @@ static int num_of_imcs(void)
163157         dp = opendir(DYN_PMU_PATH);
163158         if (dp) {
163159                 while ((ep = readdir(dp))) {
163160 -                       if (strstr(ep->d_name, UNCORE_IMC)) {
163161 +                       temp = strstr(ep->d_name, UNCORE_IMC);
163162 +                       if (!temp)
163163 +                               continue;
163165 +                       /*
163166 +                        * imc counters are named as "uncore_imc_<n>", hence
163167 +                        * increment the pointer to point to <n>. Note that
163168 +                        * sizeof(UNCORE_IMC) would count for null character as
163169 +                        * well and hence the last underscore character in
163170 +                        * uncore_imc'_' need not be counted.
163171 +                        */
163172 +                       temp = temp + sizeof(UNCORE_IMC);
163174 +                       /*
163175 +                        * Some directories under "DYN_PMU_PATH" could have
163176 +                        * names like "uncore_imc_free_running", hence, check if
163177 +                        * first character is a numerical digit or not.
163178 +                        */
163179 +                       if (temp[0] >= '0' && temp[0] <= '9') {
163180                                 sprintf(imc_dir, "%s/%s/", DYN_PMU_PATH,
163181                                         ep->d_name);
163182                                 ret = read_from_imc_dir(imc_dir, count);
163183 @@ -282,9 +300,9 @@ static int initialize_mem_bw_imc(void)
163184   * Memory B/W utilized by a process on a socket can be calculated using
163185   * iMC counters. Perf events are used to read these counters.
163186   *
163187 - * Return: >= 0 on success. < 0 on failure.
163188 + * Return: = 0 on success. < 0 on failure.
163189   */
163190 -static float get_mem_bw_imc(int cpu_no, char *bw_report)
163191 +static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
163193         float reads, writes, of_mul_read, of_mul_write;
163194         int imc, j, ret;
163195 @@ -355,13 +373,18 @@ static float get_mem_bw_imc(int cpu_no, char *bw_report)
163196                 close(imc_counters_config[imc][WRITE].fd);
163197         }
163199 -       if (strcmp(bw_report, "reads") == 0)
163200 -               return reads;
163201 +       if (strcmp(bw_report, "reads") == 0) {
163202 +               *bw_imc = reads;
163203 +               return 0;
163204 +       }
163206 -       if (strcmp(bw_report, "writes") == 0)
163207 -               return writes;
163208 +       if (strcmp(bw_report, "writes") == 0) {
163209 +               *bw_imc = writes;
163210 +               return 0;
163211 +       }
163213 -       return (reads + writes);
163214 +       *bw_imc = reads + writes;
163215 +       return 0;
163218  void set_mbm_path(const char *ctrlgrp, const char *mongrp, int resource_id)
163219 @@ -397,10 +420,10 @@ static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp,
163220                 return;
163221         }
163223 -       if (strcmp(resctrl_val, "mbm") == 0)
163224 +       if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)))
163225                 set_mbm_path(ctrlgrp, mongrp, resource_id);
163227 -       if ((strcmp(resctrl_val, "mba") == 0)) {
163228 +       if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
163229                 if (ctrlgrp)
163230                         sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH,
163231                                 RESCTRL_PATH, ctrlgrp, resource_id);
163232 @@ -420,9 +443,8 @@ static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp,
163233   * 1. If con_mon grp is given, then read from it
163234   * 2. If con_mon grp is not given, then read from root con_mon grp
163235   */
163236 -static unsigned long get_mem_bw_resctrl(void)
163237 +static int get_mem_bw_resctrl(unsigned long *mbm_total)
163239 -       unsigned long mbm_total = 0;
163240         FILE *fp;
163242         fp = fopen(mbm_total_path, "r");
163243 @@ -431,7 +453,7 @@ static unsigned long get_mem_bw_resctrl(void)
163245                 return -1;
163246         }
163247 -       if (fscanf(fp, "%lu", &mbm_total) <= 0) {
163248 +       if (fscanf(fp, "%lu", mbm_total) <= 0) {
163249                 perror("Could not get mbm local bytes");
163250                 fclose(fp);
163252 @@ -439,7 +461,7 @@ static unsigned long get_mem_bw_resctrl(void)
163253         }
163254         fclose(fp);
163256 -       return mbm_total;
163257 +       return 0;
163260  pid_t bm_pid, ppid;
163261 @@ -524,14 +546,15 @@ static void initialize_llc_occu_resctrl(const char *ctrlgrp, const char *mongrp,
163262                 return;
163263         }
163265 -       if (strcmp(resctrl_val, "cqm") == 0)
163266 +       if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
163267                 set_cqm_path(ctrlgrp, mongrp, resource_id);
163270  static int
163271  measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
163273 -       unsigned long bw_imc, bw_resc, bw_resc_end;
163274 +       unsigned long bw_resc, bw_resc_end;
163275 +       float bw_imc;
163276         int ret;
163278         /*
163279 @@ -541,13 +564,13 @@ measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
163280          * Compare the two values to validate resctrl value.
163281          * It takes 1sec to measure the data.
163282          */
163283 -       bw_imc = get_mem_bw_imc(param->cpu_no, param->bw_report);
163284 -       if (bw_imc <= 0)
163285 -               return bw_imc;
163286 +       ret = get_mem_bw_imc(param->cpu_no, param->bw_report, &bw_imc);
163287 +       if (ret < 0)
163288 +               return ret;
163290 -       bw_resc_end = get_mem_bw_resctrl();
163291 -       if (bw_resc_end <= 0)
163292 -               return bw_resc_end;
163293 +       ret = get_mem_bw_resctrl(&bw_resc_end);
163294 +       if (ret < 0)
163295 +               return ret;
163297         bw_resc = (bw_resc_end - *bw_resc_start) / MB;
163298         ret = print_results_bw(param->filename, bm_pid, bw_imc, bw_resc);
163299 @@ -579,8 +602,8 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
163300         if (strcmp(param->filename, "") == 0)
163301                 sprintf(param->filename, "stdio");
163303 -       if ((strcmp(resctrl_val, "mba")) == 0 ||
163304 -           (strcmp(resctrl_val, "mbm")) == 0) {
163305 +       if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) ||
163306 +           !strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
163307                 ret = validate_bw_report_request(param->bw_report);
163308                 if (ret)
163309                         return ret;
163310 @@ -674,15 +697,15 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
163311         if (ret)
163312                 goto out;
163314 -       if ((strcmp(resctrl_val, "mbm") == 0) ||
163315 -           (strcmp(resctrl_val, "mba") == 0)) {
163316 +       if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
163317 +           !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
163318                 ret = initialize_mem_bw_imc();
163319                 if (ret)
163320                         goto out;
163322                 initialize_mem_bw_resctrl(param->ctrlgrp, param->mongrp,
163323                                           param->cpu_no, resctrl_val);
163324 -       } else if (strcmp(resctrl_val, "cqm") == 0)
163325 +       } else if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
163326                 initialize_llc_occu_resctrl(param->ctrlgrp, param->mongrp,
163327                                             param->cpu_no, resctrl_val);
163329 @@ -710,8 +733,8 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
163331         /* Test runs until the callback setup() tells the test to stop. */
163332         while (1) {
163333 -               if ((strcmp(resctrl_val, "mbm") == 0) ||
163334 -                   (strcmp(resctrl_val, "mba") == 0)) {
163335 +               if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
163336 +                   !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
163337                         ret = param->setup(1, param);
163338                         if (ret) {
163339                                 ret = 0;
163340 @@ -721,7 +744,7 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
163341                         ret = measure_vals(param, &bw_resc_start);
163342                         if (ret)
163343                                 break;
163344 -               } else if (strcmp(resctrl_val, "cqm") == 0) {
163345 +               } else if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR))) {
163346                         ret = param->setup(1, param);
163347                         if (ret) {
163348                                 ret = 0;
163349 diff --git a/tools/testing/selftests/resctrl/resctrlfs.c b/tools/testing/selftests/resctrl/resctrlfs.c
163350 index 19c0ec4045a4..b57170f53861 100644
163351 --- a/tools/testing/selftests/resctrl/resctrlfs.c
163352 +++ b/tools/testing/selftests/resctrl/resctrlfs.c
163353 @@ -49,8 +49,6 @@ static int find_resctrl_mount(char *buffer)
163354         return -ENOENT;
163357 -char cbm_mask[256];
163360   * remount_resctrlfs - Remount resctrl FS at /sys/fs/resctrl
163361   * @mum_resctrlfs:     Should the resctrl FS be remounted?
163362 @@ -205,16 +203,18 @@ int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size)
163364   * get_cbm_mask - Get cbm mask for given cache
163365   * @cache_type:        Cache level L2/L3
163367 - * Mask is stored in cbm_mask which is global variable.
163368 + * @cbm_mask:  cbm_mask returned as a string
163369   *
163370   * Return: = 0 on success, < 0 on failure.
163371   */
163372 -int get_cbm_mask(char *cache_type)
163373 +int get_cbm_mask(char *cache_type, char *cbm_mask)
163375         char cbm_mask_path[1024];
163376         FILE *fp;
163378 +       if (!cbm_mask)
163379 +               return -1;
163381         sprintf(cbm_mask_path, "%s/%s/cbm_mask", CBM_MASK_PATH, cache_type);
163383         fp = fopen(cbm_mask_path, "r");
163384 @@ -334,7 +334,7 @@ void run_benchmark(int signum, siginfo_t *info, void *ucontext)
163385                 operation = atoi(benchmark_cmd[4]);
163386                 sprintf(resctrl_val, "%s", benchmark_cmd[5]);
163388 -               if (strcmp(resctrl_val, "cqm") != 0)
163389 +               if (strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
163390                         buffer_span = span * MB;
163391                 else
163392                         buffer_span = span;
163393 @@ -459,8 +459,8 @@ int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
163394                 goto out;
163396         /* Create mon grp and write pid into it for "mbm" and "cqm" test */
163397 -       if ((strcmp(resctrl_val, "cqm") == 0) ||
163398 -           (strcmp(resctrl_val, "mbm") == 0)) {
163399 +       if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)) ||
163400 +           !strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
163401                 if (strlen(mongrp)) {
163402                         sprintf(monitorgroup_p, "%s/mon_groups", controlgroup);
163403                         sprintf(monitorgroup, "%s/%s", monitorgroup_p, mongrp);
163404 @@ -505,9 +505,9 @@ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
163405         int resource_id, ret = 0;
163406         FILE *fp;
163408 -       if ((strcmp(resctrl_val, "mba") != 0) &&
163409 -           (strcmp(resctrl_val, "cat") != 0) &&
163410 -           (strcmp(resctrl_val, "cqm") != 0))
163411 +       if (strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) &&
163412 +           strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) &&
163413 +           strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
163414                 return -ENOENT;
163416         if (!schemata) {
163417 @@ -528,9 +528,10 @@ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
163418         else
163419                 sprintf(controlgroup, "%s/schemata", RESCTRL_PATH);
163421 -       if (!strcmp(resctrl_val, "cat") || !strcmp(resctrl_val, "cqm"))
163422 +       if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) ||
163423 +           !strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
163424                 sprintf(schema, "%s%d%c%s", "L3:", resource_id, '=', schemata);
163425 -       if (strcmp(resctrl_val, "mba") == 0)
163426 +       if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)))
163427                 sprintf(schema, "%s%d%c%s", "MB:", resource_id, '=', schemata);
163429         fp = fopen(controlgroup, "w");
163430 @@ -615,26 +616,56 @@ char *fgrep(FILE *inf, const char *str)
163431   * validate_resctrl_feature_request - Check if requested feature is valid.
163432   * @resctrl_val:       Requested feature
163433   *
163434 - * Return: 0 on success, non-zero on failure
163435 + * Return: True if the feature is supported, else false
163436   */
163437 -bool validate_resctrl_feature_request(char *resctrl_val)
163438 +bool validate_resctrl_feature_request(const char *resctrl_val)
163440 -       FILE *inf = fopen("/proc/cpuinfo", "r");
163441 +       struct stat statbuf;
163442         bool found = false;
163443         char *res;
163444 +       FILE *inf;
163446 -       if (!inf)
163447 +       if (!resctrl_val)
163448                 return false;
163450 -       res = fgrep(inf, "flags");
163452 -       if (res) {
163453 -               char *s = strchr(res, ':');
163454 +       if (remount_resctrlfs(false))
163455 +               return false;
163457 -               found = s && !strstr(s, resctrl_val);
163458 -               free(res);
163459 +       if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
163460 +               if (!stat(L3_PATH, &statbuf))
163461 +                       return true;
163462 +       } else if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
163463 +               if (!stat(MB_PATH, &statbuf))
163464 +                       return true;
163465 +       } else if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
163466 +                  !strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
163467 +               if (!stat(L3_MON_PATH, &statbuf)) {
163468 +                       inf = fopen(L3_MON_FEATURES_PATH, "r");
163469 +                       if (!inf)
163470 +                               return false;
163472 +                       if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
163473 +                               res = fgrep(inf, "llc_occupancy");
163474 +                               if (res) {
163475 +                                       found = true;
163476 +                                       free(res);
163477 +                               }
163478 +                       }
163480 +                       if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
163481 +                               res = fgrep(inf, "mbm_total_bytes");
163482 +                               if (res) {
163483 +                                       free(res);
163484 +                                       res = fgrep(inf, "mbm_local_bytes");
163485 +                                       if (res) {
163486 +                                               found = true;
163487 +                                               free(res);
163488 +                                       }
163489 +                               }
163490 +                       }
163491 +                       fclose(inf);
163492 +               }
163493         }
163494 -       fclose(inf);
163496         return found;
163498 diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
163499 index 98c3b647f54d..e3d5c77a8612 100644
163500 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c
163501 +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
163502 @@ -1753,16 +1753,25 @@ TEST_F(TRACE_poke, getpid_runs_normally)
163503  # define SYSCALL_RET_SET(_regs, _val)                          \
163504         do {                                                    \
163505                 typeof(_val) _result = (_val);                  \
163506 -               /*                                              \
163507 -                * A syscall error is signaled by CR0 SO bit    \
163508 -                * and the code is stored as a positive value.  \
163509 -                */                                             \
163510 -               if (_result < 0) {                              \
163511 -                       SYSCALL_RET(_regs) = -_result;          \
163512 -                       (_regs).ccr |= 0x10000000;              \
163513 -               } else {                                        \
163514 +               if ((_regs.trap & 0xfff0) == 0x3000) {          \
163515 +                       /*                                      \
163516 +                        * scv 0 system call uses -ve result    \
163517 +                        * for error, so no need to adjust.     \
163518 +                        */                                     \
163519                         SYSCALL_RET(_regs) = _result;           \
163520 -                       (_regs).ccr &= ~0x10000000;             \
163521 +               } else {                                        \
163522 +                       /*                                      \
163523 +                        * A syscall error is signaled by the   \
163524 +                        * CR0 SO bit and the code is stored as \
163525 +                        * a positive value.                    \
163526 +                        */                                     \
163527 +                       if (_result < 0) {                      \
163528 +                               SYSCALL_RET(_regs) = -_result;  \
163529 +                               (_regs).ccr |= 0x10000000;      \
163530 +                       } else {                                \
163531 +                               SYSCALL_RET(_regs) = _result;   \
163532 +                               (_regs).ccr &= ~0x10000000;     \
163533 +                       }                                       \
163534                 }                                               \
163535         } while (0)
163536  # define SYSCALL_RET_SET_ON_PTRACE_EXIT
163537 diff --git a/tools/testing/selftests/x86/thunks_32.S b/tools/testing/selftests/x86/thunks_32.S
163538 index a71d92da8f46..f3f56e681e9f 100644
163539 --- a/tools/testing/selftests/x86/thunks_32.S
163540 +++ b/tools/testing/selftests/x86/thunks_32.S
163541 @@ -45,3 +45,5 @@ call64_from_32:
163542         ret
163544  .size call64_from_32, .-call64_from_32
163546 +.section .note.GNU-stack,"",%progbits
163547 diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
163548 index 62bd908ecd58..f08f5e82460b 100644
163549 --- a/virt/kvm/coalesced_mmio.c
163550 +++ b/virt/kvm/coalesced_mmio.c
163551 @@ -174,21 +174,36 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
163552                                            struct kvm_coalesced_mmio_zone *zone)
163554         struct kvm_coalesced_mmio_dev *dev, *tmp;
163555 +       int r;
163557         if (zone->pio != 1 && zone->pio != 0)
163558                 return -EINVAL;
163560         mutex_lock(&kvm->slots_lock);
163562 -       list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
163563 +       list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) {
163564                 if (zone->pio == dev->zone.pio &&
163565                     coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
163566 -                       kvm_io_bus_unregister_dev(kvm,
163567 +                       r = kvm_io_bus_unregister_dev(kvm,
163568                                 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
163569                         kvm_iodevice_destructor(&dev->dev);
163571 +                       /*
163572 +                        * On failure, unregister destroys all devices on the
163573 +                        * bus _except_ the target device, i.e. coalesced_zones
163574 +                        * has been modified.  No need to restart the walk as
163575 +                        * there aren't any zones left.
163576 +                        */
163577 +                       if (r)
163578 +                               break;
163579                 }
163580 +       }
163582         mutex_unlock(&kvm->slots_lock);
163584 +       /*
163585 +        * Ignore the result of kvm_io_bus_unregister_dev(), from userspace's
163586 +        * perspective, the coalesced MMIO is most definitely unregistered.
163587 +        */
163588         return 0;
163590 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
163591 index 383df23514b9..5cabc6c748db 100644
163592 --- a/virt/kvm/kvm_main.c
163593 +++ b/virt/kvm/kvm_main.c
163594 @@ -2758,8 +2758,8 @@ static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
163595         if (val < grow_start)
163596                 val = grow_start;
163598 -       if (val > halt_poll_ns)
163599 -               val = halt_poll_ns;
163600 +       if (val > vcpu->kvm->max_halt_poll_ns)
163601 +               val = vcpu->kvm->max_halt_poll_ns;
163603         vcpu->halt_poll_ns = val;
163604  out:
163605 @@ -2838,7 +2838,8 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
163606                                 goto out;
163607                         }
163608                         poll_end = cur = ktime_get();
163609 -               } while (single_task_running() && ktime_before(cur, stop));
163610 +               } while (single_task_running() && !need_resched() &&
163611 +                        ktime_before(cur, stop));
163612         }
163614         prepare_to_rcuwait(&vcpu->wait);
163615 @@ -4486,15 +4487,15 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
163618  /* Caller must hold slots_lock. */
163619 -void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
163620 -                              struct kvm_io_device *dev)
163621 +int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
163622 +                             struct kvm_io_device *dev)
163624         int i, j;
163625         struct kvm_io_bus *new_bus, *bus;
163627         bus = kvm_get_bus(kvm, bus_idx);
163628         if (!bus)
163629 -               return;
163630 +               return 0;
163632         for (i = 0; i < bus->dev_count; i++)
163633                 if (bus->range[i].dev == dev) {
163634 @@ -4502,7 +4503,7 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
163635                 }
163637         if (i == bus->dev_count)
163638 -               return;
163639 +               return 0;
163641         new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
163642                           GFP_KERNEL_ACCOUNT);
163643 @@ -4511,7 +4512,13 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
163644                 new_bus->dev_count--;
163645                 memcpy(new_bus->range + i, bus->range + i + 1,
163646                                 flex_array_size(new_bus, range, new_bus->dev_count - i));
163647 -       } else {
163648 +       }
163650 +       rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
163651 +       synchronize_srcu_expedited(&kvm->srcu);
163653 +       /* Destroy the old bus _after_ installing the (null) bus. */
163654 +       if (!new_bus) {
163655                 pr_err("kvm: failed to shrink bus, removing it completely\n");
163656                 for (j = 0; j < bus->dev_count; j++) {
163657                         if (j == i)
163658 @@ -4520,10 +4527,8 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
163659                 }
163660         }
163662 -       rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
163663 -       synchronize_srcu_expedited(&kvm->srcu);
163664         kfree(bus);
163665 -       return;
163666 +       return new_bus ? 0 : -ENOMEM;
163669  struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,